mirror of
https://github.com/chatmail/relay.git
synced 2026-05-10 16:04:37 +00:00
Compare commits
47 Commits
j4n/docker
...
quota_expi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fb80f23cfd | ||
|
|
0aa08b7413 | ||
|
|
14dfabf2ff | ||
|
|
0a77b3339b | ||
|
|
001d8c80fc | ||
|
|
1e376f7945 | ||
|
|
1ae92e0639 | ||
|
|
56386c231b | ||
|
|
2bdfecff72 | ||
|
|
cef739e3b3 | ||
|
|
3d128d3c64 | ||
|
|
79f68342f4 | ||
|
|
54863453c2 | ||
|
|
74326a8c54 | ||
|
|
59e5dea597 | ||
|
|
d7d89d66c1 | ||
|
|
00d723bd6e | ||
|
|
c257bfca4b | ||
|
|
82c9831369 | ||
|
|
b835318ce9 | ||
|
|
b4a46d23e6 | ||
|
|
c6d9d27a84 | ||
|
|
4521f03c99 | ||
|
|
c78859aec6 | ||
|
|
98bd5944cc | ||
|
|
e8933c455f | ||
|
|
d3a483c403 | ||
|
|
e687120d96 | ||
|
|
7409bd3452 | ||
|
|
1a34172487 | ||
|
|
38246ca8ea | ||
|
|
2635ac7e6d | ||
|
|
4fabfb31f8 | ||
|
|
36478dbfcf | ||
|
|
ff541b81ea | ||
|
|
ed9b4092a8 | ||
|
|
1b8ad3ca12 | ||
|
|
f85d304e65 | ||
|
|
4d1856d8f1 | ||
|
|
ae2ab52aa9 | ||
|
|
d0c396538b | ||
|
|
78a4e28408 | ||
|
|
2432d4f498 | ||
|
|
31301abb42 | ||
|
|
6b4edd8502 | ||
|
|
9c467ab3e8 | ||
|
|
774350778b |
@@ -1,18 +0,0 @@
|
||||
data/
|
||||
venv/
|
||||
__pycache__
|
||||
*.pyc
|
||||
*.orig
|
||||
*.ini
|
||||
.pytest_cache
|
||||
.env
|
||||
|
||||
# Slim build context — .git/ alone can be 100s of MB
|
||||
.git
|
||||
.github/
|
||||
docs/
|
||||
tests/
|
||||
|
||||
# Exclude markdown files but keep www/src/*.md (used by WebsiteDeployer)
|
||||
*.md
|
||||
!www/**/*.md
|
||||
45
.github/workflows/ci.yaml
vendored
45
.github/workflows/ci.yaml
vendored
@@ -1,21 +1,32 @@
|
||||
name: CI
|
||||
name: Run unit-tests and container-based deploy+test verification
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
# Triggers when a PR is merged into main or a direct push occurs
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
|
||||
# Triggers for any PR (and its subsequent commits) targeting the main branch
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
|
||||
# Newest push wins: Prevents multiple runs from clashing and wasting runner efforts
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
|
||||
jobs:
|
||||
tox:
|
||||
name: isolated chatmaild tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
# Checkout pull request HEAD commit instead of merge commit
|
||||
# Otherwise `test_deployed_state` will be unhappy.
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: download filtermail
|
||||
run: curl -L https://github.com/chatmail/filtermail/releases/download/v0.5.2/filtermail-x86_64 -o /usr/local/bin/filtermail && chmod +x /usr/local/bin/filtermail
|
||||
run: curl -L https://github.com/chatmail/filtermail/releases/download/v0.6.1/filtermail-x86_64 -o /usr/local/bin/filtermail && chmod +x /usr/local/bin/filtermail
|
||||
- name: run chatmaild tests
|
||||
working-directory: chatmaild
|
||||
run: pipx run tox
|
||||
@@ -24,7 +35,9 @@ jobs:
|
||||
name: deploy-chatmail tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: initenv
|
||||
run: scripts/initenv.sh
|
||||
@@ -38,5 +51,23 @@ jobs:
|
||||
- name: run deploy-chatmail offline tests
|
||||
run: pytest --pyargs cmdeploy
|
||||
|
||||
# all other cmdeploy commands require a staging server
|
||||
# see https://github.com/deltachat/chatmail/issues/100
|
||||
lxc-test:
|
||||
name: LXC deploy and test
|
||||
uses: chatmail/cmlxc/.github/workflows/lxc-test.yml@v0.10.0
|
||||
with:
|
||||
cmlxc_commands: |
|
||||
cmlxc init
|
||||
# single cmdeploy relay test
|
||||
cmlxc -v deploy-cmdeploy --source ./repo cm0
|
||||
cmlxc -v test-mini cm0
|
||||
cmlxc -v test-cmdeploy cm0
|
||||
|
||||
# cross cmdeploy relay test
|
||||
cmlxc -v deploy-cmdeploy --source ./repo --ipv4-only cm1
|
||||
cmlxc -v test-cmdeploy cm0 cm1
|
||||
|
||||
# cross cmdeploy/madmail relay tests
|
||||
cmlxc -v deploy-madmail mad0
|
||||
cmlxc -v test-cmdeploy cm0 mad0
|
||||
cmlxc -v test-mini cm0 mad0
|
||||
cmlxc -v test-mini mad0 cm0
|
||||
|
||||
375
.github/workflows/deploy.yaml
vendored
375
.github/workflows/deploy.yaml
vendored
@@ -1,375 +0,0 @@
|
||||
name: Deploy
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- j4n/docker-pr
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- 'scripts/**'
|
||||
- '**/README.md'
|
||||
- 'CHANGELOG.md'
|
||||
- 'LICENSE'
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
build-docker:
|
||||
name: Build Docker image
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
outputs:
|
||||
image: ${{ steps.image-ref.outputs.image }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to GHCR
|
||||
if: github.event_name == 'push'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels)
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
# Tagged releases: v1.2.3 -> :1.2.3, :1.2, :latest
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
# Branch pushes: foo/docker-pr -> :foo-docker-pr
|
||||
type=ref,event=branch
|
||||
# Always: :sha-<hash>
|
||||
type=sha
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: docker/chatmail_relay.dockerfile
|
||||
push: ${{ github.event_name == 'push' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
build-args: |
|
||||
GIT_HASH=${{ github.sha }}
|
||||
|
||||
- name: Output image reference
|
||||
id: image-ref
|
||||
run: |
|
||||
SHORT_SHA=$(echo "${{ github.sha }}" | cut -c1-7)
|
||||
IMAGE="${{ env.REGISTRY }}/$(echo "${{ env.IMAGE_NAME }}" | tr '[:upper:]' '[:lower:]'):sha-${SHORT_SHA}"
|
||||
echo "image=${IMAGE}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
deploy:
|
||||
name: Deploy to ${{ matrix.host }}
|
||||
needs: build-docker
|
||||
# dont do the regular tests on this branch
|
||||
if: >-
|
||||
!cancelled() && (
|
||||
github.event_name == 'push' ||
|
||||
(github.event_name == 'pull_request' && !startsWith(github.head_ref, 'j4n/'))
|
||||
)
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- host: staging2.testrun.org
|
||||
acme_dir: acme
|
||||
dkim_dir: dkimkeys
|
||||
zone_file: staging.testrun.org-default.zone
|
||||
disable_ipv6: false
|
||||
add_ssh_keys: true
|
||||
- host: staging-ipv4.testrun.org
|
||||
acme_dir: acme-ipv4
|
||||
dkim_dir: dkimkeys-ipv4
|
||||
zone_file: staging-ipv4.testrun.org-default.zone
|
||||
disable_ipv6: true
|
||||
add_ssh_keys: false
|
||||
environment:
|
||||
name: ${{ matrix.host }}
|
||||
url: https://${{ matrix.host }}/
|
||||
concurrency: ${{ matrix.host }}
|
||||
steps:
|
||||
# --- Common setup ---
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: prepare SSH and save ACME/DKIM
|
||||
env:
|
||||
HOST: ${{ matrix.host }}
|
||||
ACME_DIR: ${{ matrix.acme_dir }}
|
||||
DKIM_DIR: ${{ matrix.dkim_dir }}
|
||||
ZONE: ${{ matrix.zone_file }}
|
||||
run: |
|
||||
mkdir ~/.ssh
|
||||
echo "${{ secrets.STAGING_SSH_KEY }}" >> ~/.ssh/id_ed25519
|
||||
chmod 600 ~/.ssh/id_ed25519
|
||||
ssh-keyscan ${HOST} > ~/.ssh/known_hosts
|
||||
# save previous acme & dkim state (trailing slash = copy contents)
|
||||
rsync -avz root@${HOST}:/var/lib/acme/ ${ACME_DIR}/ || true
|
||||
rsync -avz root@${HOST}:/etc/dkimkeys/ ${DKIM_DIR}/ || true
|
||||
# backup to ns.testrun.org if contents are useful
|
||||
if [ -f ${DKIM_DIR}/opendkim.private ]; then
|
||||
rsync -avz -e "ssh -o StrictHostKeyChecking=accept-new" ${DKIM_DIR}/ root@ns.testrun.org:/tmp/${DKIM_DIR}/ || true
|
||||
fi
|
||||
if [ "$(ls -A ${ACME_DIR}/certs 2>/dev/null)" ]; then
|
||||
rsync -avz -e "ssh -o StrictHostKeyChecking=accept-new" ${ACME_DIR}/ root@ns.testrun.org:/tmp/${ACME_DIR}/ || true
|
||||
fi
|
||||
# make sure CAA record isn't set
|
||||
scp -o StrictHostKeyChecking=accept-new .github/workflows/${ZONE} root@ns.testrun.org:/etc/nsd/${HOST}.zone
|
||||
ssh root@ns.testrun.org sed -i '/CAA/d' /etc/nsd/${HOST}.zone
|
||||
ssh root@ns.testrun.org nsd-checkzone ${HOST} /etc/nsd/${HOST}.zone
|
||||
ssh root@ns.testrun.org systemctl reload nsd
|
||||
|
||||
- name: rebuild VPS
|
||||
env:
|
||||
SERVER_ID: ${{ matrix.host == 'staging2.testrun.org' && secrets.STAGING_SERVER_ID || secrets.STAGING_IPV4_SERVER_ID }}
|
||||
run: |
|
||||
curl -X POST \
|
||||
-H "Authorization: Bearer ${{ secrets.HETZNER_API_TOKEN }}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"image":"debian-12"}' \
|
||||
"https://api.hetzner.cloud/v1/servers/${SERVER_ID}/actions/rebuild"
|
||||
|
||||
- run: scripts/initenv.sh
|
||||
- name: append venv/bin to PATH
|
||||
run: echo venv/bin >>$GITHUB_PATH
|
||||
|
||||
- name: wait for VPS rebuild
|
||||
id: wait-for-vps
|
||||
env:
|
||||
HOST: ${{ matrix.host }}
|
||||
run: |
|
||||
rm ~/.ssh/known_hosts
|
||||
while ! ssh -o ConnectTimeout=180 -o StrictHostKeyChecking=accept-new root@${HOST} id -u ; do sleep 1 ; done
|
||||
|
||||
- name: restore ACME/DKIM
|
||||
env:
|
||||
HOST: ${{ matrix.host }}
|
||||
ACME_DIR: ${{ matrix.acme_dir }}
|
||||
DKIM_DIR: ${{ matrix.dkim_dir }}
|
||||
run: |
|
||||
# download from ns.testrun.org
|
||||
rsync -e "ssh -o StrictHostKeyChecking=accept-new" -avz root@ns.testrun.org:/tmp/${ACME_DIR}/ acme-restore/ || true
|
||||
rsync -avz root@ns.testrun.org:/tmp/${DKIM_DIR}/ dkimkeys-restore/ || true
|
||||
# restore to VPS
|
||||
rsync -avz acme-restore/ root@${HOST}:/var/lib/acme/ || true
|
||||
rsync -avz dkimkeys-restore/ root@${HOST}:/etc/dkimkeys/ || true
|
||||
ssh root@${HOST} chown root:root -R /var/lib/acme || true
|
||||
|
||||
- name: bare offline tests
|
||||
if: github.ref == 'refs/heads/main' || github.event_name == 'pull_request'
|
||||
run: pytest --pyargs cmdeploy
|
||||
|
||||
- name: bare deploy
|
||||
if: github.ref == 'refs/heads/main' || github.event_name == 'pull_request'
|
||||
env:
|
||||
HOST: ${{ matrix.host }}
|
||||
DISABLE_IPV6: ${{ matrix.disable_ipv6 }}
|
||||
run: |
|
||||
ssh root@${HOST} 'apt update && apt install -y git python3.11-venv python3-dev gcc'
|
||||
ssh root@${HOST} 'git clone https://github.com/chatmail/relay'
|
||||
ssh root@${HOST} "cd relay && git checkout ${{ github.head_ref || github.ref_name }}"
|
||||
ssh root@${HOST} 'cd relay && scripts/initenv.sh'
|
||||
ssh root@${HOST} "cd relay && scripts/cmdeploy init ${HOST}"
|
||||
if [ "${DISABLE_IPV6}" = "true" ]; then
|
||||
ssh root@${HOST} "sed -i 's#disable_ipv6 = False#disable_ipv6 = True#' relay/chatmail.ini"
|
||||
fi
|
||||
ssh root@${HOST} "sed -i 's/#\s*mtail_address/mtail_address/' relay/chatmail.ini"
|
||||
ssh root@${HOST} "cd relay && scripts/cmdeploy run --verbose --skip-dns-check --ssh-host localhost"
|
||||
|
||||
- name: bare DNS
|
||||
if: github.ref == 'refs/heads/main' || github.event_name == 'pull_request'
|
||||
env:
|
||||
HOST: ${{ matrix.host }}
|
||||
ZONE: ${{ matrix.zone_file }}
|
||||
run: |
|
||||
ssh root@${HOST} chown opendkim:opendkim -R /etc/dkimkeys
|
||||
ssh root@${HOST} "cd relay && scripts/cmdeploy dns --zonefile staging-generated.zone --ssh-host localhost"
|
||||
ssh root@${HOST} cat relay/staging-generated.zone >> .github/workflows/${ZONE}
|
||||
cat .github/workflows/${ZONE}
|
||||
scp .github/workflows/${ZONE} root@ns.testrun.org:/etc/nsd/${HOST}.zone
|
||||
ssh root@ns.testrun.org nsd-checkzone ${HOST} /etc/nsd/${HOST}.zone
|
||||
ssh root@ns.testrun.org systemctl reload nsd
|
||||
|
||||
- name: bare integration tests
|
||||
if: github.ref == 'refs/heads/main' || github.event_name == 'pull_request'
|
||||
env:
|
||||
HOST: ${{ matrix.host }}
|
||||
run: ssh root@${HOST} "cd relay && CHATMAIL_DOMAIN2=ci-chatmail.testrun.org scripts/cmdeploy test --slow --ssh-host localhost"
|
||||
|
||||
- name: bare final DNS check
|
||||
if: github.ref == 'refs/heads/main' || github.event_name == 'pull_request'
|
||||
env:
|
||||
HOST: ${{ matrix.host }}
|
||||
run: ssh root@${HOST} "cd relay && scripts/cmdeploy dns -v --ssh-host localhost"
|
||||
|
||||
# --- Docker deploy (push only, runs even if bare failed) ---
|
||||
|
||||
- name: stop bare services
|
||||
if: >-
|
||||
!cancelled() && github.event_name == 'push'
|
||||
&& steps.wait-for-vps.outcome == 'success'
|
||||
env:
|
||||
HOST: ${{ matrix.host }}
|
||||
run: |
|
||||
ssh root@${HOST} 'systemctl stop postfix dovecot nginx opendkim unbound filtermail doveauth chatmail-metadata iroh-relay mtail fcgiwrap acmetool 2>/dev/null || true'
|
||||
|
||||
- name: install Docker on VPS
|
||||
if: >-
|
||||
!cancelled() && github.event_name == 'push'
|
||||
&& steps.wait-for-vps.outcome == 'success'
|
||||
env:
|
||||
HOST: ${{ matrix.host }}
|
||||
run: |
|
||||
ssh root@${HOST} 'apt-get update && apt-get install -y ca-certificates curl'
|
||||
ssh root@${HOST} 'install -m 0755 -d /etc/apt/keyrings'
|
||||
ssh root@${HOST} 'curl -fsSL https://download.docker.com/linux/debian/gpg -o /etc/apt/keyrings/docker.asc && chmod a+r /etc/apt/keyrings/docker.asc'
|
||||
ssh root@${HOST} 'echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/debian $(. /etc/os-release && echo $VERSION_CODENAME) stable" > /etc/apt/sources.list.d/docker.list'
|
||||
ssh root@${HOST} 'apt-get update && apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin'
|
||||
|
||||
- name: prepare Docker bind mounts
|
||||
if: >-
|
||||
!cancelled() && github.event_name == 'push'
|
||||
&& steps.wait-for-vps.outcome == 'success'
|
||||
env:
|
||||
HOST: ${{ matrix.host }}
|
||||
run: |
|
||||
ssh root@${HOST} 'mkdir -p /srv/chatmail/certs /srv/chatmail/dkim'
|
||||
ssh root@${HOST} 'cp -a /var/lib/acme/. /srv/chatmail/certs/ && cp -a /etc/dkimkeys/. /srv/chatmail/dkim/' || true
|
||||
|
||||
- name: generate and upload chatmail.ini
|
||||
if: >-
|
||||
!cancelled() && github.event_name == 'push'
|
||||
&& steps.wait-for-vps.outcome == 'success'
|
||||
env:
|
||||
HOST: ${{ matrix.host }}
|
||||
run: |
|
||||
cmdeploy init ${HOST}
|
||||
sed -i 's/#\s*mtail_address/mtail_address/' chatmail.ini
|
||||
scp chatmail.ini root@${HOST}:/srv/chatmail/chatmail.ini
|
||||
|
||||
- name: deploy with Docker
|
||||
if: >-
|
||||
!cancelled() && github.event_name == 'push'
|
||||
&& steps.wait-for-vps.outcome == 'success'
|
||||
env:
|
||||
HOST: ${{ matrix.host }}
|
||||
run: |
|
||||
GHCR_IMAGE="${{ needs.build-docker.outputs.image }}"
|
||||
rsync -avz --exclude='.git' --exclude='venv' --exclude='__pycache__' ./ root@${HOST}:/srv/chatmail/relay/
|
||||
# Login to GHCR on VPS and pull pre-built image
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | ssh root@${HOST} 'docker login ghcr.io -u ${{ github.actor }} --password-stdin'
|
||||
ssh root@${HOST} "docker pull ${GHCR_IMAGE}"
|
||||
ssh root@${HOST} "cd /srv/chatmail/relay && CHATMAIL_IMAGE=${GHCR_IMAGE} MAIL_DOMAIN=${HOST} docker compose -f docker-compose.yaml -f docker/docker-compose.ci.yaml up -d"
|
||||
|
||||
- name: wait for container healthy
|
||||
if: >-
|
||||
!cancelled() && github.event_name == 'push'
|
||||
&& steps.wait-for-vps.outcome == 'success'
|
||||
env:
|
||||
HOST: ${{ matrix.host }}
|
||||
run: |
|
||||
# Stream journald inside the container
|
||||
ssh root@${HOST} 'docker exec chatmail journalctl -f --no-pager' &
|
||||
LOG_PID=$!
|
||||
trap "kill $LOG_PID 2>/dev/null || true" EXIT
|
||||
for i in $(seq 1 60); do
|
||||
status=$(ssh root@${HOST} 'docker inspect --format={{.State.Health.Status}} chatmail 2>/dev/null' || echo "missing")
|
||||
echo " [$i/60] status=$status"
|
||||
if [ "$status" = "healthy" ]; then
|
||||
echo "Container is healthy."
|
||||
exit 0
|
||||
fi
|
||||
if [ "$status" = "unhealthy" ]; then
|
||||
echo "Container is unhealthy!"
|
||||
break
|
||||
fi
|
||||
sleep 5
|
||||
done
|
||||
echo "Container did not become healthy."
|
||||
kill $LOG_PID 2>/dev/null || true
|
||||
echo "--- failed units ---"
|
||||
ssh root@${HOST} 'docker exec chatmail systemctl --failed --no-pager' || true
|
||||
echo "--- service logs ---"
|
||||
ssh root@${HOST} 'docker exec chatmail journalctl -u dovecot -u postfix -u nginx -u unbound --no-pager -n 50' || true
|
||||
echo "--- listening ports ---"
|
||||
ssh root@${HOST} 'docker exec chatmail ss -tlnp' || true
|
||||
echo "--- chatmail.ini ---"
|
||||
ssh root@${HOST} 'docker exec chatmail cat /etc/chatmail/chatmail.ini' || true
|
||||
exit 1
|
||||
|
||||
- name: show container state
|
||||
if: >-
|
||||
!cancelled() && github.event_name == 'push'
|
||||
&& steps.wait-for-vps.outcome == 'success'
|
||||
env:
|
||||
HOST: ${{ matrix.host }}
|
||||
run: |
|
||||
echo "--- listening ports ---"
|
||||
ssh root@${HOST} 'docker exec chatmail ss -tlnp'
|
||||
echo "--- chatmail.ini ---"
|
||||
ssh root@${HOST} 'docker exec chatmail cat /etc/chatmail/chatmail.ini'
|
||||
|
||||
- name: Docker offline tests
|
||||
if: >-
|
||||
!cancelled() && github.event_name == 'push'
|
||||
&& steps.wait-for-vps.outcome == 'success'
|
||||
run: CHATMAIL_DOCKER=chatmail pytest --pyargs cmdeploy
|
||||
|
||||
- name: Docker DNS
|
||||
if: >-
|
||||
!cancelled() && github.event_name == 'push'
|
||||
&& steps.wait-for-vps.outcome == 'success'
|
||||
env:
|
||||
HOST: ${{ matrix.host }}
|
||||
ZONE: ${{ matrix.zone_file }}
|
||||
run: |
|
||||
# Reset zone file in case bare DNS already appended to it
|
||||
git checkout .github/workflows/${ZONE}
|
||||
ssh root@${HOST} 'docker exec chatmail chown opendkim:opendkim -R /etc/dkimkeys'
|
||||
ssh root@${HOST} 'docker exec chatmail cmdeploy dns --ssh-host @local --zonefile /opt/chatmail/staging.zone --verbose'
|
||||
ssh root@${HOST} 'docker cp chatmail:/opt/chatmail/staging.zone /tmp/staging.zone'
|
||||
scp root@${HOST}:/tmp/staging.zone staging-generated.zone
|
||||
cat staging-generated.zone >> .github/workflows/${ZONE}
|
||||
cat .github/workflows/${ZONE}
|
||||
scp .github/workflows/${ZONE} root@ns.testrun.org:/etc/nsd/${HOST}.zone
|
||||
ssh root@ns.testrun.org nsd-checkzone ${HOST} /etc/nsd/${HOST}.zone
|
||||
ssh root@ns.testrun.org systemctl reload nsd
|
||||
|
||||
- name: Docker integration tests
|
||||
if: >-
|
||||
!cancelled() && github.event_name == 'push'
|
||||
&& steps.wait-for-vps.outcome == 'success'
|
||||
run: CHATMAIL_DOCKER=chatmail CHATMAIL_DOMAIN2=ci-chatmail.testrun.org cmdeploy test --slow
|
||||
|
||||
- name: Docker final DNS check
|
||||
if: >-
|
||||
!cancelled() && github.event_name == 'push'
|
||||
&& steps.wait-for-vps.outcome == 'success'
|
||||
env:
|
||||
HOST: ${{ matrix.host }}
|
||||
run: ssh root@${HOST} 'docker exec chatmail cmdeploy dns -v --ssh-host @local'
|
||||
|
||||
# --- Cleanup ---
|
||||
|
||||
- name: add SSH keys
|
||||
if: >-
|
||||
!cancelled() && matrix.add_ssh_keys
|
||||
&& steps.wait-for-vps.outcome == 'success'
|
||||
run: ssh root@${{ matrix.host }} 'curl -s https://github.com/hpk42.keys https://github.com/j4n.keys >> .ssh/authorized_keys'
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -164,9 +164,3 @@ cython_debug/
|
||||
#.idea/
|
||||
|
||||
chatmail.zone
|
||||
|
||||
# docker
|
||||
/data/
|
||||
/custom/
|
||||
docker-compose.override.yaml
|
||||
.env
|
||||
|
||||
19
CHANGELOG.md
19
CHANGELOG.md
@@ -1,5 +1,24 @@
|
||||
# Changelog for chatmail deployment
|
||||
|
||||
## Unreleased
|
||||
|
||||
### Features
|
||||
|
||||
- Automated per-user quota-keeping.
|
||||
Replace daily timer-based message expire script
|
||||
with Dovecot quota-warning-triggered cleanup (`chatmail-quota-expire`).
|
||||
When a user reaches 90% of their mailbox quota
|
||||
Dovecot calls the new script which removes the largest and oldest messages
|
||||
until usage drops below 80%.
|
||||
The daily `chatmail-expire` timer now only handles deletion
|
||||
of inactive user mailboxes.
|
||||
|
||||
After upgrading, run the following once to clean up
|
||||
mailboxes that are already over quota::
|
||||
|
||||
/usr/local/lib/chatmaild/venv/bin/chatmail-quota-expire \
|
||||
400 /home/vmail/mail/YOURDOMAIN --sweep
|
||||
|
||||
## 1.9.0 2025-12-18
|
||||
|
||||
### Documentation
|
||||
|
||||
@@ -6,10 +6,7 @@ build-backend = "setuptools.build_meta"
|
||||
name = "chatmaild"
|
||||
version = "0.3"
|
||||
dependencies = [
|
||||
"aiosmtpd",
|
||||
"iniconfig",
|
||||
"deltachat-rpc-server",
|
||||
"deltachat-rpc-client",
|
||||
"filelock",
|
||||
"requests",
|
||||
"crypt-r >= 3.13.1 ; python_version >= '3.11'",
|
||||
@@ -24,8 +21,8 @@ where = ['src']
|
||||
[project.scripts]
|
||||
doveauth = "chatmaild.doveauth:main"
|
||||
chatmail-metadata = "chatmaild.metadata:main"
|
||||
chatmail-metrics = "chatmaild.metrics:main"
|
||||
chatmail-expire = "chatmaild.expire:main"
|
||||
chatmail-expire = "chatmaild.expire_inactive_users:main"
|
||||
chatmail-quota-expire = "chatmaild.quota_expire:main"
|
||||
chatmail-fsreport = "chatmaild.fsreport:main"
|
||||
lastlogin = "chatmaild.lastlogin:main"
|
||||
turnserver = "chatmaild.turnserver:main"
|
||||
@@ -71,6 +68,7 @@ commands =
|
||||
deps = pytest
|
||||
pdbpp
|
||||
pytest-localserver
|
||||
aiosmtpd
|
||||
execnet
|
||||
commands = pytest -v -rsXx {posargs}
|
||||
"""
|
||||
|
||||
@@ -25,8 +25,6 @@ class Config:
|
||||
self.max_user_send_burst_size = int(params.get("max_user_send_burst_size", 10))
|
||||
self.max_mailbox_size = params["max_mailbox_size"]
|
||||
self.max_message_size = int(params.get("max_message_size", "31457280"))
|
||||
self.delete_mails_after = params["delete_mails_after"]
|
||||
self.delete_large_after = params["delete_large_after"]
|
||||
self.delete_inactive_users_after = int(params["delete_inactive_users_after"])
|
||||
self.username_min_length = int(params["username_min_length"])
|
||||
self.username_max_length = int(params["username_max_length"])
|
||||
@@ -38,6 +36,9 @@ class Config:
|
||||
self.filtermail_smtp_port_incoming = int(
|
||||
params.get("filtermail_smtp_port_incoming", "10081")
|
||||
)
|
||||
self.filtermail_http_port_incoming = int(
|
||||
params.get("filtermail_http_port_incoming", "10082")
|
||||
)
|
||||
self.postfix_reinject_port = int(params.get("postfix_reinject_port", "10025"))
|
||||
self.postfix_reinject_port_incoming = int(
|
||||
params.get("postfix_reinject_port_incoming", "10026")
|
||||
@@ -92,6 +93,11 @@ class Config:
|
||||
# old unused option (except for first migration from sqlite to maildir store)
|
||||
self.passdb_path = Path(params.get("passdb_path", "/home/vmail/passdb.sqlite"))
|
||||
|
||||
@property
|
||||
def max_mailbox_size_mb(self):
|
||||
"""Return max_mailbox_size as an integer in megabytes."""
|
||||
return parse_size_mb(self.max_mailbox_size)
|
||||
|
||||
def _getbytefile(self):
|
||||
return open(self._inipath, "rb")
|
||||
|
||||
@@ -105,6 +111,16 @@ class Config:
|
||||
return User(maildir, addr, password_path, uid="vmail", gid="vmail")
|
||||
|
||||
|
||||
def parse_size_mb(limit):
|
||||
"""Parse a size string like ``500M`` or ``2G`` and return megabytes."""
|
||||
value = limit.strip().upper().rstrip("B")
|
||||
if value.endswith("G"):
|
||||
return int(value[:-1]) * 1024
|
||||
if value.endswith("M"):
|
||||
return int(value[:-1])
|
||||
return int(value)
|
||||
|
||||
|
||||
def write_initial_config(inipath, mail_domain, overrides):
|
||||
"""Write out default config file, using the specified config value overrides."""
|
||||
content = get_default_config_content(mail_domain, **overrides)
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
import filelock
|
||||
|
||||
try:
|
||||
import crypt_r
|
||||
except ImportError:
|
||||
@@ -13,6 +16,7 @@ from .dictproxy import DictProxy
|
||||
from .migrate_db import migrate_from_db_to_maildir
|
||||
|
||||
NOCREATE_FILE = "/etc/chatmail-nocreate"
|
||||
VALID_LOCALPART_RE = re.compile(r"^[a-z0-9._-]+$")
|
||||
|
||||
|
||||
def encrypt_password(password: str):
|
||||
@@ -52,6 +56,10 @@ def is_allowed_to_create(config: Config, user, cleartext_password) -> bool:
|
||||
)
|
||||
return False
|
||||
|
||||
if not VALID_LOCALPART_RE.match(localpart):
|
||||
logging.warning("localpart %r contains invalid characters", localpart)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
@@ -140,8 +148,13 @@ class AuthDictProxy(DictProxy):
|
||||
if not is_allowed_to_create(self.config, addr, cleartext_password):
|
||||
return
|
||||
|
||||
user.set_password(encrypt_password(cleartext_password))
|
||||
print(f"Created address: {addr}", file=sys.stderr)
|
||||
lock = filelock.FileLock(str(user.password_path) + ".lock", timeout=5)
|
||||
with lock:
|
||||
userdata = user.get_userdb_dict()
|
||||
if userdata:
|
||||
return userdata
|
||||
user.set_password(encrypt_password(cleartext_password))
|
||||
print(f"Created address: {addr}", file=sys.stderr)
|
||||
return user.get_userdb_dict()
|
||||
|
||||
|
||||
|
||||
@@ -115,11 +115,8 @@ class Expiry:
|
||||
cutoff_without_login = (
|
||||
self.now - int(self.config.delete_inactive_users_after) * 86400
|
||||
)
|
||||
cutoff_mails = self.now - int(self.config.delete_mails_after) * 86400
|
||||
cutoff_large_mails = self.now - int(self.config.delete_large_after) * 86400
|
||||
|
||||
self.all_mboxes += 1
|
||||
changed = False
|
||||
if mbox.last_login and mbox.last_login < cutoff_without_login:
|
||||
self.remove_mailbox(mbox.basedir)
|
||||
return
|
||||
@@ -131,25 +128,10 @@ class Expiry:
|
||||
print_info(f"checking mailbox {date.strftime('%b %d')} {mboxname}")
|
||||
else:
|
||||
print_info(f"checking mailbox (no last_login) {mboxname}")
|
||||
self.all_files += len(mbox.messages)
|
||||
for message in mbox.messages:
|
||||
if message.mtime < cutoff_mails:
|
||||
self.remove_file(message.path, mtime=message.mtime)
|
||||
elif message.size > 200000 and message.mtime < cutoff_large_mails:
|
||||
# we only remove noticed large files (not unnoticed ones in new/)
|
||||
parts = message.path.split("/")
|
||||
if len(parts) >= 2 and parts[-2] == "cur":
|
||||
self.remove_file(message.path, mtime=message.mtime)
|
||||
else:
|
||||
continue
|
||||
changed = True
|
||||
if changed:
|
||||
self.remove_file(f"{mbox.basedir}/maildirsize")
|
||||
|
||||
def get_summary(self):
|
||||
return (
|
||||
f"Removed {self.del_mboxes} out of {self.all_mboxes} mailboxes "
|
||||
f"and {self.del_files} out of {self.all_files} files in existing mailboxes "
|
||||
f"in {time.time() - self.start:2.2f} seconds"
|
||||
)
|
||||
|
||||
|
||||
@@ -23,12 +23,6 @@ max_mailbox_size = 500M
|
||||
# maximum message size for an e-mail in bytes
|
||||
max_message_size = 31457280
|
||||
|
||||
# days after which mails are unconditionally deleted
|
||||
delete_mails_after = 20
|
||||
|
||||
# days after which large messages (>200k) are unconditionally deleted
|
||||
delete_large_after = 7
|
||||
|
||||
# days after which users without a successful login are deleted (database and mails)
|
||||
delete_inactive_users_after = 90
|
||||
|
||||
|
||||
@@ -101,7 +101,11 @@ class MetadataDictProxy(DictProxy):
|
||||
# Handle `GETMETADATA "" /shared/vendor/deltachat/irohrelay`
|
||||
return f"O{self.iroh_relay}\n"
|
||||
elif keyname == "vendor/vendor.dovecot/pvt/server/vendor/deltachat/turn":
|
||||
res = turn_credentials()
|
||||
try:
|
||||
res = turn_credentials()
|
||||
except Exception:
|
||||
logging.exception("failed to get TURN credentials")
|
||||
return "N\n"
|
||||
port = 3478
|
||||
return f"O{self.turn_hostname}:{port}:{res}\n"
|
||||
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def main(vmail_dir=None):
|
||||
if vmail_dir is None:
|
||||
vmail_dir = sys.argv[1]
|
||||
|
||||
accounts = 0
|
||||
ci_accounts = 0
|
||||
|
||||
for path in Path(vmail_dir).iterdir():
|
||||
if not path.joinpath("cur").is_dir():
|
||||
continue
|
||||
accounts += 1
|
||||
if path.name[:3] in ("ci-", "ac_"):
|
||||
ci_accounts += 1
|
||||
|
||||
print("# HELP total number of accounts")
|
||||
print("# TYPE accounts gauge")
|
||||
print(f"accounts {accounts}")
|
||||
print("# HELP number of CI accounts")
|
||||
print("# TYPE ci_accounts gauge")
|
||||
print(f"ci_accounts {ci_accounts}")
|
||||
print("# HELP number of non-CI accounts")
|
||||
print("# TYPE nonci_accounts gauge")
|
||||
print(f"nonci_accounts {accounts - ci_accounts}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -2,8 +2,8 @@
|
||||
|
||||
"""CGI script for creating new accounts."""
|
||||
|
||||
import ipaddress
|
||||
import json
|
||||
import random
|
||||
import secrets
|
||||
import string
|
||||
from urllib.parse import quote
|
||||
@@ -15,13 +15,25 @@ ALPHANUMERIC = string.ascii_lowercase + string.digits
|
||||
ALPHANUMERIC_PUNCT = string.ascii_letters + string.digits + string.punctuation
|
||||
|
||||
|
||||
def wrap_ip(host):
|
||||
if host.startswith("[") and host.endswith("]"):
|
||||
return host
|
||||
try:
|
||||
ipaddress.ip_address(host)
|
||||
return f"[{host}]"
|
||||
except ValueError:
|
||||
return host
|
||||
|
||||
|
||||
def create_newemail_dict(config: Config):
|
||||
user = "".join(random.choices(ALPHANUMERIC, k=config.username_max_length))
|
||||
user = "".join(
|
||||
secrets.choice(ALPHANUMERIC) for _ in range(config.username_max_length)
|
||||
)
|
||||
password = "".join(
|
||||
secrets.choice(ALPHANUMERIC_PUNCT)
|
||||
for _ in range(config.password_min_length + 3)
|
||||
)
|
||||
return dict(email=f"{user}@{config.mail_domain}", password=f"{password}")
|
||||
return dict(email=f"{user}@{wrap_ip(config.mail_domain)}", password=f"{password}")
|
||||
|
||||
|
||||
def create_dclogin_url(email, password):
|
||||
|
||||
152
chatmaild/src/chatmaild/quota_expire.py
Normal file
152
chatmaild/src/chatmaild/quota_expire.py
Normal file
@@ -0,0 +1,152 @@
|
||||
"""
|
||||
Remove messages from a mailbox to meet a size target.
|
||||
|
||||
Dovecot calls this script when a user's quota is near its limit.
|
||||
Files are scored by ``size * age`` so that large, old messages
|
||||
are removed first.
|
||||
|
||||
Usage::
|
||||
|
||||
quota_expire <target_mb> <mailbox_path>
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from argparse import ArgumentParser
|
||||
from collections import namedtuple
|
||||
from stat import S_ISREG
|
||||
|
||||
FileEntry = namedtuple("FileEntry", ("path", "mtime", "size"))
|
||||
|
||||
|
||||
def _get_file_entry(path):
|
||||
try:
|
||||
st = os.stat(path)
|
||||
except FileNotFoundError:
|
||||
return None
|
||||
if not S_ISREG(st.st_mode):
|
||||
return None
|
||||
return FileEntry(path, st.st_mtime, st.st_size)
|
||||
|
||||
|
||||
def _listdir(path):
|
||||
try:
|
||||
return os.listdir(path)
|
||||
except FileNotFoundError:
|
||||
return []
|
||||
|
||||
|
||||
def scan_mailbox_messages(mailbox_dir):
|
||||
messages = []
|
||||
for sub in ("cur", "new", "tmp"):
|
||||
subdir = f"{mailbox_dir}/{sub}"
|
||||
for name in _listdir(subdir):
|
||||
entry = _get_file_entry(f"{subdir}/{name}")
|
||||
if entry is not None:
|
||||
messages.append(entry)
|
||||
return messages
|
||||
|
||||
|
||||
def _remove_stale_caches(mailbox_dir):
|
||||
for name in ("maildirsize", "dovecot.index.cache"):
|
||||
try:
|
||||
os.unlink(f"{mailbox_dir}/{name}")
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
|
||||
def expire_to_target(mailbox_dir, target_bytes, now=None):
|
||||
"""Remove highest-scored files until total size <= *target_bytes*.
|
||||
|
||||
Returns the list of removed file paths.
|
||||
"""
|
||||
if now is None:
|
||||
now = time.time()
|
||||
|
||||
messages = scan_mailbox_messages(mailbox_dir)
|
||||
total_size = sum(m.size for m in messages)
|
||||
|
||||
if total_size <= target_bytes:
|
||||
return []
|
||||
|
||||
# Score: large and old files get the highest score.
|
||||
scored = sorted(
|
||||
messages,
|
||||
key=lambda m: m.size * (now - m.mtime),
|
||||
reverse=True,
|
||||
)
|
||||
|
||||
removed = []
|
||||
for entry in scored:
|
||||
if total_size <= target_bytes:
|
||||
break
|
||||
try:
|
||||
os.unlink(entry.path)
|
||||
except FileNotFoundError:
|
||||
continue
|
||||
total_size -= entry.size
|
||||
removed.append(entry.path)
|
||||
|
||||
if removed:
|
||||
_remove_stale_caches(mailbox_dir)
|
||||
|
||||
return removed
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""Remove mailbox messages to stay within a megabyte target."""
|
||||
parser = ArgumentParser(description=main.__doc__)
|
||||
parser.add_argument(
|
||||
"target_mb",
|
||||
type=int,
|
||||
help="target mailbox size in megabytes",
|
||||
)
|
||||
parser.add_argument(
|
||||
"mailbox_path",
|
||||
help="path to a user mailbox, or with --sweep the mailboxes directory",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--sweep",
|
||||
action="store_true",
|
||||
help="sweep all mailboxes under mailbox_path",
|
||||
)
|
||||
args = parser.parse_args(args)
|
||||
|
||||
target_bytes = args.target_mb * 1024 * 1024
|
||||
|
||||
if args.sweep:
|
||||
return _sweep(args.mailbox_path, target_bytes)
|
||||
|
||||
removed = expire_to_target(args.mailbox_path, target_bytes)
|
||||
if removed:
|
||||
print(
|
||||
f"removed {len(removed)} file(s) from {args.mailbox_path}"
|
||||
f" to reach {args.target_mb} MB target",
|
||||
file=sys.stderr,
|
||||
)
|
||||
return 0
|
||||
|
||||
|
||||
def _sweep(mailboxes_dir, target_bytes):
|
||||
try:
|
||||
names = os.listdir(mailboxes_dir)
|
||||
except FileNotFoundError:
|
||||
print(f"directory not found: {mailboxes_dir}", file=sys.stderr)
|
||||
return 1
|
||||
for name in sorted(names):
|
||||
if "@" not in name:
|
||||
continue
|
||||
mbox = f"{mailboxes_dir}/{name}"
|
||||
removed = expire_to_target(mbox, target_bytes)
|
||||
if removed:
|
||||
print(
|
||||
f"removed {len(removed)} file(s) from {name}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -34,8 +34,6 @@ def test_read_config_testrun(make_config):
|
||||
assert config.postfix_reinject_port == 10025
|
||||
assert config.max_user_send_per_minute == 60
|
||||
assert config.max_mailbox_size == "500M"
|
||||
assert config.delete_mails_after == "20"
|
||||
assert config.delete_large_after == "7"
|
||||
assert config.username_min_length == 9
|
||||
assert config.username_max_length == 9
|
||||
assert config.password_min_length == 9
|
||||
|
||||
@@ -120,6 +120,60 @@ def test_handle_dovecot_protocol_iterate(gencreds, example_config):
|
||||
assert not lines[2]
|
||||
|
||||
|
||||
def test_invalid_localpart_characters(make_config):
|
||||
"""Test that is_allowed_to_create rejects localparts with invalid characters."""
|
||||
config = make_config("chat.example.org", {"username_min_length": "3"})
|
||||
password = "zequ0Aimuchoodaechik"
|
||||
domain = config.mail_domain
|
||||
|
||||
# valid localparts
|
||||
assert is_allowed_to_create(config, f"abc123@{domain}", password)
|
||||
assert is_allowed_to_create(config, f"a.b-c_d@{domain}", password)
|
||||
|
||||
# uppercase rejected
|
||||
assert not is_allowed_to_create(config, f"Abc123@{domain}", password)
|
||||
assert not is_allowed_to_create(config, f"ABCDEFG@{domain}", password)
|
||||
|
||||
# spaces and special chars rejected
|
||||
assert not is_allowed_to_create(config, f"a b cde@{domain}", password)
|
||||
assert not is_allowed_to_create(config, f"abc+def@{domain}", password)
|
||||
assert not is_allowed_to_create(config, f"abc!def@{domain}", password)
|
||||
assert not is_allowed_to_create(config, f"ab@cdef@{domain}", password)
|
||||
assert not is_allowed_to_create(config, f"abc/def@{domain}", password)
|
||||
assert not is_allowed_to_create(config, f"abc\\def@{domain}", password)
|
||||
|
||||
|
||||
def test_concurrent_creation_same_account(dictproxy):
|
||||
"""Test that concurrent creation of the same account doesn't corrupt password."""
|
||||
addr = "racetest1@chat.example.org"
|
||||
password = "zequ0Aimuchoodaechik"
|
||||
num_threads = 10
|
||||
results = queue.Queue()
|
||||
|
||||
def create():
|
||||
try:
|
||||
res = dictproxy.lookup_passdb(addr, password)
|
||||
results.put(("ok", res))
|
||||
except Exception:
|
||||
results.put(("err", traceback.format_exc()))
|
||||
|
||||
threads = [threading.Thread(target=create, daemon=True) for _ in range(num_threads)]
|
||||
for t in threads:
|
||||
t.start()
|
||||
for t in threads:
|
||||
t.join(timeout=10)
|
||||
|
||||
passwords_seen = set()
|
||||
for _ in range(num_threads):
|
||||
status, res = results.get()
|
||||
if status == "err":
|
||||
pytest.fail(f"concurrent creation failed\n{res}")
|
||||
passwords_seen.add(res["password"])
|
||||
|
||||
# all threads must see the same password hash
|
||||
assert len(passwords_seen) == 1
|
||||
|
||||
|
||||
def test_50_concurrent_lookups_different_accounts(gencreds, dictproxy):
|
||||
num_threads = 50
|
||||
req_per_thread = 5
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import os
|
||||
import random
|
||||
from datetime import datetime
|
||||
from fnmatch import fnmatch
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
@@ -112,40 +111,48 @@ def test_report(mbox1, example_config):
|
||||
report_main(args)
|
||||
|
||||
|
||||
def test_report_mdir_filters_by_path(mbox1, example_config):
|
||||
"""Test that Report with mdir='cur' only counts messages in cur/ subdirectory."""
|
||||
from chatmaild.fsreport import Report
|
||||
|
||||
now = datetime.utcnow().timestamp()
|
||||
|
||||
# Set password mtime to old enough so min_login_age check passes
|
||||
password = Path(mbox1.basedir).joinpath("password")
|
||||
old_time = now - 86400 * 10 # 10 days ago
|
||||
os.utime(password, (old_time, old_time))
|
||||
|
||||
# Reload mailbox with updated mtime
|
||||
from chatmaild.expire import MailboxStat
|
||||
|
||||
mbox = MailboxStat(mbox1.basedir)
|
||||
|
||||
# Report without mdir — should count all messages
|
||||
rep_all = Report(now=now, min_login_age=1, mdir=None)
|
||||
rep_all.process_mailbox_stat(mbox)
|
||||
total_all = rep_all.message_buckets[0]
|
||||
|
||||
# Report with mdir='cur' — should only count cur/ messages
|
||||
rep_cur = Report(now=now, min_login_age=1, mdir="cur")
|
||||
rep_cur.process_mailbox_stat(mbox)
|
||||
total_cur = rep_cur.message_buckets[0]
|
||||
|
||||
# Report with mdir='new' — should only count new/ messages
|
||||
rep_new = Report(now=now, min_login_age=1, mdir="new")
|
||||
rep_new.process_mailbox_stat(mbox)
|
||||
total_new = rep_new.message_buckets[0]
|
||||
|
||||
# cur has 500-byte msg, new has 600-byte msg (from fill_mbox)
|
||||
assert total_cur == 500
|
||||
assert total_new == 600
|
||||
assert total_all == 500 + 600
|
||||
|
||||
|
||||
def test_expiry_cli_basic(example_config, mbox1):
|
||||
args = (str(example_config._inipath),)
|
||||
expiry_main(args)
|
||||
|
||||
|
||||
def test_expiry_cli_old_files(capsys, example_config, mbox1):
|
||||
relpaths_old = ["cur/msg_old1", "cur/msg_old1"]
|
||||
cutoff_days = int(example_config.delete_mails_after) + 1
|
||||
create_new_messages(mbox1.basedir, relpaths_old, size=1000, days=cutoff_days)
|
||||
|
||||
relpaths_large = ["cur/msg_old_large1", "new/msg_old_large2"]
|
||||
cutoff_days = int(example_config.delete_large_after) + 1
|
||||
create_new_messages(
|
||||
mbox1.basedir, relpaths_large, size=1000 * 300, days=cutoff_days
|
||||
)
|
||||
|
||||
create_new_messages(mbox1.basedir, ["cur/shouldstay"], size=1000 * 300, days=1)
|
||||
|
||||
args = str(example_config._inipath), "--remove", "-v"
|
||||
expiry_main(args)
|
||||
out, err = capsys.readouterr()
|
||||
|
||||
allpaths = relpaths_old + relpaths_large + ["maildirsize"]
|
||||
for path in allpaths:
|
||||
for line in err.split("\n"):
|
||||
if fnmatch(line, f"removing*{path}"):
|
||||
break
|
||||
else:
|
||||
if path != "new/msg_old_large2":
|
||||
pytest.fail(f"failed to remove {path}\n{err}")
|
||||
|
||||
assert "shouldstay" not in err
|
||||
|
||||
|
||||
def test_get_file_entry(tmp_path):
|
||||
assert get_file_entry(str(tmp_path.joinpath("123123"))) is None
|
||||
p = tmp_path.joinpath("x")
|
||||
|
||||
@@ -314,6 +314,51 @@ def test_persistent_queue_items(tmp_path, testaddr, token):
|
||||
assert not queue_item < item2 and not item2 < queue_item
|
||||
|
||||
|
||||
def test_turn_credentials_exception_returns_N(notifier, metadata, monkeypatch):
|
||||
"""Test that turn_credentials() failure returns N\\n instead of crashing."""
|
||||
import chatmaild.metadata
|
||||
|
||||
dictproxy = MetadataDictProxy(
|
||||
notifier=notifier,
|
||||
metadata=metadata,
|
||||
turn_hostname="turn.example.org",
|
||||
)
|
||||
|
||||
def mock_turn_credentials():
|
||||
raise ConnectionRefusedError("socket not available")
|
||||
|
||||
monkeypatch.setattr(chatmaild.metadata, "turn_credentials", mock_turn_credentials)
|
||||
|
||||
transactions = {}
|
||||
res = dictproxy.handle_dovecot_request(
|
||||
"Lshared/0123/vendor/vendor.dovecot/pvt/server/vendor/deltachat/turn"
|
||||
"\tuser@example.org",
|
||||
transactions,
|
||||
)
|
||||
assert res == "N\n"
|
||||
|
||||
|
||||
def test_turn_credentials_success(notifier, metadata, monkeypatch):
|
||||
"""Test that valid turn_credentials() returns TURN URI."""
|
||||
import chatmaild.metadata
|
||||
|
||||
dictproxy = MetadataDictProxy(
|
||||
notifier=notifier,
|
||||
metadata=metadata,
|
||||
turn_hostname="turn.example.org",
|
||||
)
|
||||
|
||||
monkeypatch.setattr(chatmaild.metadata, "turn_credentials", lambda: "user:pass")
|
||||
|
||||
transactions = {}
|
||||
res = dictproxy.handle_dovecot_request(
|
||||
"Lshared/0123/vendor/vendor.dovecot/pvt/server/vendor/deltachat/turn"
|
||||
"\tuser@example.org",
|
||||
transactions,
|
||||
)
|
||||
assert res == "Oturn.example.org:3478:user:pass\n"
|
||||
|
||||
|
||||
def test_iroh_relay(dictproxy):
|
||||
rfile = io.BytesIO(
|
||||
b"\n".join(
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
from chatmaild.metrics import main
|
||||
|
||||
|
||||
def test_main(tmp_path, capsys):
|
||||
paths = []
|
||||
for x in ("ci-asllkj", "ac_12l3kj", "qweqwe", "ci-l1k2j31l2k3"):
|
||||
p = tmp_path.joinpath(x)
|
||||
p.mkdir()
|
||||
p.joinpath("cur").mkdir()
|
||||
paths.append(p)
|
||||
|
||||
tmp_path.joinpath("nomailbox").mkdir()
|
||||
|
||||
main(tmp_path)
|
||||
out, _ = capsys.readouterr()
|
||||
d = {}
|
||||
for line in out.split("\n"):
|
||||
if line.strip() and not line.startswith("#"):
|
||||
name, num = line.split()
|
||||
d[name] = int(num)
|
||||
|
||||
assert d["accounts"] == 4
|
||||
assert d["ci_accounts"] == 3
|
||||
assert d["nonci_accounts"] == 1
|
||||
@@ -19,6 +19,12 @@ def test_create_newemail_dict(example_config):
|
||||
assert ac1["password"] != ac2["password"]
|
||||
|
||||
|
||||
def test_create_newemail_dict_ip(make_config):
|
||||
config = make_config("1.2.3.4")
|
||||
ac = create_newemail_dict(config)
|
||||
assert ac["email"].endswith("@[1.2.3.4]")
|
||||
|
||||
|
||||
def test_create_dclogin_url():
|
||||
url = create_dclogin_url("user@example.org", "p@ss w+rd")
|
||||
assert url.startswith("dclogin:")
|
||||
|
||||
91
chatmaild/src/chatmaild/tests/test_quota_expire.py
Normal file
91
chatmaild/src/chatmaild/tests/test_quota_expire.py
Normal file
@@ -0,0 +1,91 @@
|
||||
import os
|
||||
import time
|
||||
|
||||
from chatmaild.quota_expire import expire_to_target, scan_mailbox_messages
|
||||
|
||||
MB = 1024 * 1024
|
||||
|
||||
|
||||
def _create_message(basedir, relpath, size, days_old=0):
|
||||
path = basedir / relpath
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
path.write_bytes(b"x" * size)
|
||||
mtime = time.time() - days_old * 86400
|
||||
os.utime(path, (mtime, mtime))
|
||||
return path
|
||||
|
||||
|
||||
def test_scan_cur_new_tmp(tmp_path):
|
||||
_create_message(tmp_path, "cur/msg1", 100)
|
||||
_create_message(tmp_path, "new/msg2", 200)
|
||||
_create_message(tmp_path, "tmp/msg3", 300)
|
||||
messages = scan_mailbox_messages(str(tmp_path))
|
||||
assert len(messages) == 3
|
||||
sizes = sorted(m.size for m in messages)
|
||||
assert sizes == [100, 200, 300]
|
||||
|
||||
|
||||
def test_scan_ignores_subfolders(tmp_path):
|
||||
_create_message(tmp_path, "cur/a", 10)
|
||||
_create_message(tmp_path, ".DeltaChat/cur/b", 20)
|
||||
assert len(scan_mailbox_messages(str(tmp_path))) == 1
|
||||
|
||||
|
||||
def test_scan_empty(tmp_path):
|
||||
assert scan_mailbox_messages(str(tmp_path)) == []
|
||||
assert scan_mailbox_messages(str(tmp_path / "nope")) == []
|
||||
|
||||
|
||||
def test_noop_under_limit(tmp_path):
|
||||
_create_message(tmp_path, "cur/msg1", MB)
|
||||
assert expire_to_target(str(tmp_path), 2 * MB) == []
|
||||
assert (tmp_path / "cur" / "msg1").exists()
|
||||
|
||||
|
||||
def test_removes_to_target(tmp_path):
|
||||
now = time.time()
|
||||
for i in range(15):
|
||||
_create_message(tmp_path, f"cur/msg{i:02d}", MB, days_old=i + 1)
|
||||
removed = expire_to_target(str(tmp_path), 10 * MB, now=now)
|
||||
assert len(removed) == 5
|
||||
assert len(scan_mailbox_messages(str(tmp_path))) == 10
|
||||
|
||||
|
||||
def test_scoring_prefers_large_old(tmp_path):
|
||||
now = time.time()
|
||||
_create_message(tmp_path, "cur/large_old", 2 * MB, days_old=30)
|
||||
_create_message(tmp_path, "cur/small_new", MB, days_old=1)
|
||||
removed = expire_to_target(str(tmp_path), 2 * MB, now=now)
|
||||
assert len(removed) == 1
|
||||
assert "large_old" in removed[0]
|
||||
|
||||
|
||||
def test_scoring_large_new_beats_small_old(tmp_path):
|
||||
now = time.time()
|
||||
_create_message(tmp_path, "cur/big_new", 10 * MB, days_old=1)
|
||||
_create_message(tmp_path, "cur/small_old", MB, days_old=5)
|
||||
# big_new score: 10MB * 1d = 10 vs small_old score: 1MB * 5d = 5
|
||||
removed = expire_to_target(str(tmp_path), 10 * MB, now=now)
|
||||
assert len(removed) == 1
|
||||
assert "big_new" in removed[0]
|
||||
|
||||
|
||||
def test_exact_limit(tmp_path):
|
||||
_create_message(tmp_path, "cur/msg1", 5 * MB)
|
||||
assert expire_to_target(str(tmp_path), 5 * MB) == []
|
||||
|
||||
|
||||
def test_removes_stale_caches(tmp_path):
|
||||
_create_message(tmp_path, "cur/msg1", 2 * MB, days_old=5)
|
||||
(tmp_path / "maildirsize").write_text("x")
|
||||
(tmp_path / "dovecot.index.cache").write_text("x")
|
||||
expire_to_target(str(tmp_path), MB)
|
||||
assert not (tmp_path / "maildirsize").exists()
|
||||
assert not (tmp_path / "dovecot.index.cache").exists()
|
||||
|
||||
|
||||
def test_no_cache_removal_when_under_limit(tmp_path):
|
||||
_create_message(tmp_path, "cur/msg1", MB)
|
||||
(tmp_path / "maildirsize").write_text("x")
|
||||
expire_to_target(str(tmp_path), 2 * MB)
|
||||
assert (tmp_path / "maildirsize").exists()
|
||||
73
chatmaild/src/chatmaild/tests/test_turnserver.py
Normal file
73
chatmaild/src/chatmaild/tests/test_turnserver.py
Normal file
@@ -0,0 +1,73 @@
|
||||
import socket
|
||||
import threading
|
||||
import time
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from chatmaild.turnserver import turn_credentials
|
||||
|
||||
SOCKET_PATH = "/run/chatmail-turn/turn.socket"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def turn_socket(tmp_path):
|
||||
"""Create a real Unix socket server at a temp path."""
|
||||
sock_path = str(tmp_path / "turn.socket")
|
||||
server = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
server.bind(sock_path)
|
||||
server.listen(1)
|
||||
yield sock_path, server
|
||||
server.close()
|
||||
|
||||
|
||||
def _call_turn_credentials(sock_path):
|
||||
"""Call turn_credentials but connect to sock_path instead of hardcoded path."""
|
||||
original_connect = socket.socket.connect
|
||||
|
||||
def patched_connect(self, address):
|
||||
if address == SOCKET_PATH:
|
||||
address = sock_path
|
||||
return original_connect(self, address)
|
||||
|
||||
with patch.object(socket.socket, "connect", patched_connect):
|
||||
return turn_credentials()
|
||||
|
||||
|
||||
def test_turn_credentials_timeout(turn_socket):
|
||||
"""Server accepts but never responds — must raise socket.timeout."""
|
||||
sock_path, server = turn_socket
|
||||
|
||||
def accept_and_hang():
|
||||
conn, _ = server.accept()
|
||||
time.sleep(30)
|
||||
conn.close()
|
||||
|
||||
t = threading.Thread(target=accept_and_hang, daemon=True)
|
||||
t.start()
|
||||
|
||||
with pytest.raises(socket.timeout):
|
||||
_call_turn_credentials(sock_path)
|
||||
|
||||
|
||||
def test_turn_credentials_connection_refused(tmp_path):
|
||||
"""Socket file doesn't exist — must raise ConnectionRefusedError or FileNotFoundError."""
|
||||
missing = str(tmp_path / "nonexistent.socket")
|
||||
with pytest.raises((ConnectionRefusedError, FileNotFoundError)):
|
||||
_call_turn_credentials(missing)
|
||||
|
||||
|
||||
def test_turn_credentials_success(turn_socket):
|
||||
"""Server responds with credentials — must return stripped string."""
|
||||
sock_path, server = turn_socket
|
||||
|
||||
def respond():
|
||||
conn, _ = server.accept()
|
||||
conn.sendall(b"testuser:testpass\n")
|
||||
conn.close()
|
||||
|
||||
t = threading.Thread(target=respond, daemon=True)
|
||||
t.start()
|
||||
|
||||
result = _call_turn_credentials(sock_path)
|
||||
assert result == "testuser:testpass"
|
||||
@@ -4,6 +4,7 @@ import socket
|
||||
|
||||
def turn_credentials() -> str:
|
||||
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as client_socket:
|
||||
client_socket.settimeout(5)
|
||||
client_socket.connect("/run/chatmail-turn/turn.socket")
|
||||
with client_socket.makefile("rb") as file:
|
||||
return file.readline().decode("utf-8").strip()
|
||||
|
||||
@@ -10,7 +10,6 @@ dependencies = [
|
||||
"pillow",
|
||||
"qrcode",
|
||||
"markdown",
|
||||
"pytest",
|
||||
"setuptools>=68",
|
||||
"termcolor",
|
||||
"build",
|
||||
@@ -21,6 +20,7 @@ dependencies = [
|
||||
"execnet",
|
||||
"imap_tools",
|
||||
"deltachat-rpc-client",
|
||||
"deltachat-rpc-server",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -67,7 +67,7 @@ class AcmetoolDeployer(Deployer):
|
||||
)
|
||||
files.template(
|
||||
src=importlib.resources.files(__package__).joinpath("desired.yaml.j2"),
|
||||
dest=f"/var/lib/acme/desired/{self.domains[0]}", # 0 is mailhost TLD
|
||||
dest=f"/var/lib/acme/desired/{self.domains[0]}", # 0 is mailhost TLD
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
import importlib.resources
|
||||
import io
|
||||
import os
|
||||
from contextlib import contextmanager
|
||||
|
||||
from pyinfra import host
|
||||
from pyinfra.facts.server import Command
|
||||
from pyinfra.operations import files, server, systemd
|
||||
|
||||
|
||||
@@ -10,6 +13,39 @@ def has_systemd():
|
||||
return os.path.isdir("/run/systemd/system")
|
||||
|
||||
|
||||
def is_in_container() -> bool:
|
||||
"""Return True if running inside a container (Docker, LXC, etc.)."""
|
||||
return (
|
||||
host.get_fact(
|
||||
Command,
|
||||
"systemd-detect-virt --container --quiet 2>/dev/null && echo yes || true",
|
||||
)
|
||||
== "yes"
|
||||
)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def blocked_service_startup():
|
||||
"""Prevent services from auto-starting during package installation.
|
||||
|
||||
Installs a ``/usr/sbin/policy-rc.d`` that exits 101, blocking any
|
||||
service from being started by the package manager. This avoids bind
|
||||
conflicts and CPU/RAM spikes during initial setup. The file is removed
|
||||
when the context exits.
|
||||
"""
|
||||
# For documentation about policy-rc.d, see:
|
||||
# https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt
|
||||
files.put(
|
||||
src=get_resource("policy-rc.d"),
|
||||
dest="/usr/sbin/policy-rc.d",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="755",
|
||||
)
|
||||
yield
|
||||
files.file("/usr/sbin/policy-rc.d", present=False)
|
||||
|
||||
|
||||
def get_resource(arg, pkg=__package__):
|
||||
return importlib.resources.files(pkg).joinpath(arg)
|
||||
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
;
|
||||
; Required DNS entries for chatmail servers
|
||||
;
|
||||
{% if A %}
|
||||
{{ mail_domain }}. A {{ A }}
|
||||
{% endif %}
|
||||
{% if AAAA %}
|
||||
{{ mail_domain }}. AAAA {{ AAAA }}
|
||||
{% endif %}
|
||||
{{ mail_domain }}. MX 10 {{ mail_domain }}.
|
||||
{% if strict_tls %}
|
||||
_mta-sts.{{ mail_domain }}. TXT "v=STSv1; id={{ sts_id }}"
|
||||
mta-sts.{{ mail_domain }}. CNAME {{ mail_domain }}.
|
||||
{% endif %}
|
||||
www.{{ mail_domain }}. CNAME {{ mail_domain }}.
|
||||
{{ dkim_entry }}
|
||||
|
||||
;
|
||||
; Recommended DNS entries for interoperability and security-hardening
|
||||
;
|
||||
{{ mail_domain }}. TXT "v=spf1 a ~all"
|
||||
_dmarc.{{ mail_domain }}. TXT "v=DMARC1;p=reject;adkim=s;aspf=s"
|
||||
|
||||
{% if acme_account_url %}
|
||||
{{ mail_domain }}. CAA 0 issue "letsencrypt.org;accounturi={{ acme_account_url }}"
|
||||
{% endif %}
|
||||
_adsp._domainkey.{{ mail_domain }}. TXT "dkim=discardable"
|
||||
|
||||
_submission._tcp.{{ mail_domain }}. SRV 0 1 587 {{ mail_domain }}.
|
||||
_submissions._tcp.{{ mail_domain }}. SRV 0 1 465 {{ mail_domain }}.
|
||||
_imap._tcp.{{ mail_domain }}. SRV 0 1 143 {{ mail_domain }}.
|
||||
_imaps._tcp.{{ mail_domain }}. SRV 0 1 993 {{ mail_domain }}.
|
||||
@@ -116,24 +116,18 @@ def run_cmd(args, out):
|
||||
return 1
|
||||
|
||||
try:
|
||||
retcode = out.check_call(cmd, env=env)
|
||||
out.check_call(cmd, env=env)
|
||||
if args.website_only:
|
||||
if retcode == 0:
|
||||
out.green("Website deployment completed.")
|
||||
else:
|
||||
out.red("Website deployment failed.")
|
||||
elif retcode == 0:
|
||||
out.green("Deploy completed, call `cmdeploy dns` next.")
|
||||
out.green("Website deployment completed.")
|
||||
elif not args.dns_check_disabled and strict_tls and not remote_data["acme_account_url"]:
|
||||
out.red("Deploy completed but letsencrypt not configured")
|
||||
out.red("Run 'cmdeploy run' again")
|
||||
retcode = 0
|
||||
else:
|
||||
out.red("Deploy failed")
|
||||
out.green("Deploy completed, call `cmdeploy dns` next.")
|
||||
return 0
|
||||
except subprocess.CalledProcessError:
|
||||
out.red("Deploy failed")
|
||||
retcode = 1
|
||||
return retcode
|
||||
return 1
|
||||
|
||||
|
||||
def dns_cmd_options(parser):
|
||||
@@ -213,6 +207,7 @@ def test_cmd(args, out):
|
||||
"""Run local and online tests for chatmail deployment."""
|
||||
|
||||
env = os.environ.copy()
|
||||
env["CHATMAIL_INI"] = str(args.inipath.absolute())
|
||||
if args.ssh_host:
|
||||
env["CHATMAIL_SSH"] = args.ssh_host
|
||||
|
||||
|
||||
@@ -2,11 +2,10 @@
|
||||
Chat Mail pyinfra deploy.
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
from io import StringIO
|
||||
from io import BytesIO, StringIO
|
||||
from pathlib import Path
|
||||
|
||||
from chatmaild.config import read_config
|
||||
@@ -24,9 +23,11 @@ from .basedeploy import (
|
||||
Deployer,
|
||||
Deployment,
|
||||
activate_remote_units,
|
||||
blocked_service_startup,
|
||||
configure_remote_units,
|
||||
get_resource,
|
||||
has_systemd,
|
||||
is_in_container,
|
||||
)
|
||||
from .dovecot.deployer import DovecotDeployer
|
||||
from .external.deployer import ExternalTlsDeployer
|
||||
@@ -123,7 +124,6 @@ def _install_remote_venv_with_chatmaild() -> None:
|
||||
|
||||
def _configure_remote_venv_with_chatmaild(config) -> None:
|
||||
remote_base_dir = "/usr/local/lib/chatmaild"
|
||||
remote_venv_dir = f"{remote_base_dir}/venv"
|
||||
remote_chatmail_inipath = f"{remote_base_dir}/chatmail.ini"
|
||||
root_owned = dict(user="root", group="root", mode="644")
|
||||
|
||||
@@ -134,16 +134,13 @@ def _configure_remote_venv_with_chatmaild(config) -> None:
|
||||
**root_owned,
|
||||
)
|
||||
|
||||
files.template(
|
||||
src=get_resource("metrics.cron.j2"),
|
||||
dest="/etc/cron.d/chatmail-metrics",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
config={
|
||||
"mailboxes_dir": config.mailboxes_dir,
|
||||
"execpath": f"{remote_venv_dir}/bin/chatmail-metrics",
|
||||
},
|
||||
files.file(
|
||||
path="/etc/cron.d/chatmail-metrics",
|
||||
present=False,
|
||||
)
|
||||
files.file(
|
||||
path="/var/www/html/metrics",
|
||||
present=False,
|
||||
)
|
||||
|
||||
|
||||
@@ -153,33 +150,16 @@ class UnboundDeployer(Deployer):
|
||||
self.need_restart = False
|
||||
|
||||
def install(self):
|
||||
# Run local DNS resolver `unbound`.
|
||||
# `resolvconf` takes care of setting up /etc/resolv.conf
|
||||
# to use 127.0.0.1 as the resolver.
|
||||
# Run local DNS resolver `unbound`. `resolvconf` takes care of
|
||||
# setting up /etc/resolv.conf to use 127.0.0.1 as the resolver.
|
||||
|
||||
#
|
||||
# On an IPv4-only system, if unbound is started but not
|
||||
# configured, it causes subsequent steps to fail to resolve hosts.
|
||||
# Here, we use policy-rc.d to prevent unbound from starting up
|
||||
# on initial install. Later, we will configure it and start it.
|
||||
#
|
||||
# For documentation about policy-rc.d, see:
|
||||
# https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt
|
||||
#
|
||||
files.put(
|
||||
src=get_resource("policy-rc.d"),
|
||||
dest="/usr/sbin/policy-rc.d",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="755",
|
||||
)
|
||||
|
||||
apt.packages(
|
||||
name="Install unbound",
|
||||
packages=["unbound", "unbound-anchor", "dnsutils"],
|
||||
)
|
||||
|
||||
files.file("/usr/sbin/policy-rc.d", present=False)
|
||||
# On an IPv4-only system, if unbound is started but not configured,
|
||||
# it causes subsequent steps to fail to resolve hosts.
|
||||
with blocked_service_startup():
|
||||
apt.packages(
|
||||
name="Install unbound",
|
||||
packages=["unbound", "unbound-anchor", "dnsutils", "resolvconf"],
|
||||
)
|
||||
|
||||
def configure(self):
|
||||
server.shell(
|
||||
@@ -271,6 +251,9 @@ class WebsiteDeployer(Deployer):
|
||||
# if www_folder is a hugo page, build it
|
||||
if build_dir:
|
||||
www_path = build_webpages(src_dir, build_dir, self.config)
|
||||
if www_path is None:
|
||||
logger.warning("Web page build failed, skipping website deployment")
|
||||
return
|
||||
# if it is not a hugo page, upload it as is
|
||||
files.rsync(
|
||||
f"{www_path}/", "/var/www/html", flags=["-avz", "--chown=www-data"]
|
||||
@@ -337,12 +320,12 @@ class TurnDeployer(Deployer):
|
||||
def install(self):
|
||||
(url, sha256sum) = {
|
||||
"x86_64": (
|
||||
"https://github.com/chatmail/chatmail-turn/releases/download/v0.3/chatmail-turn-x86_64-linux",
|
||||
"841e527c15fdc2940b0469e206188ea8f0af48533be12ecb8098520f813d41e4",
|
||||
"https://github.com/chatmail/chatmail-turn/releases/download/v0.4/chatmail-turn-x86_64-linux",
|
||||
"1ec1f5c50122165e858a5a91bcba9037a28aa8cb8b64b8db570aa457c6141a8a",
|
||||
),
|
||||
"aarch64": (
|
||||
"https://github.com/chatmail/chatmail-turn/releases/download/v0.3/chatmail-turn-aarch64-linux",
|
||||
"a5fc2d06d937b56a34e098d2cd72a82d3e89967518d159bf246dc69b65e81b42",
|
||||
"https://github.com/chatmail/chatmail-turn/releases/download/v0.4/chatmail-turn-aarch64-linux",
|
||||
"0fb3e792419494e21ecad536464929dba706bb2c88884ed8f1788141d26fc756",
|
||||
),
|
||||
}[host.get_fact(facts.server.Arch)]
|
||||
|
||||
@@ -475,10 +458,19 @@ class ChatmailDeployer(Deployer):
|
||||
("iroh", None, None),
|
||||
]
|
||||
|
||||
def __init__(self, mail_domain):
|
||||
self.mail_domain = mail_domain
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.mail_domain = config.mail_domain
|
||||
|
||||
def install(self):
|
||||
files.put(
|
||||
name="Disable installing recommended packages globally",
|
||||
src=BytesIO(b'APT::Install-Recommends "false";\n'),
|
||||
dest="/etc/apt/apt.conf.d/00InstallRecommends",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
apt.update(name="apt update", cache_time=24 * 3600)
|
||||
apt.upgrade(name="upgrade apt packages", auto_remove=True)
|
||||
|
||||
@@ -491,12 +483,18 @@ class ChatmailDeployer(Deployer):
|
||||
name="Install rsync",
|
||||
packages=["rsync"],
|
||||
)
|
||||
apt.packages(
|
||||
name="Ensure cron is installed",
|
||||
packages=["cron"],
|
||||
)
|
||||
|
||||
def configure(self):
|
||||
# metadata crashes if the mailboxes dir does not exist
|
||||
files.directory(
|
||||
name="Ensure vmail mailbox directory exists",
|
||||
path=str(self.config.mailboxes_dir),
|
||||
user="vmail",
|
||||
group="vmail",
|
||||
mode="700",
|
||||
present=True,
|
||||
)
|
||||
|
||||
# This file is used by auth proxy.
|
||||
# https://wiki.debian.org/EtcMailName
|
||||
server.shell(
|
||||
@@ -586,7 +584,7 @@ def deploy_chatmail(config_path: Path, disable_mail: bool, website_only: bool) -
|
||||
Out().red(f"Deploy failed: mtail_address {config.mtail_address} is not available (VPN up?).\n")
|
||||
exit(1)
|
||||
|
||||
if not os.environ.get("CHATMAIL_NOPORTCHECK"):
|
||||
if not is_in_container():
|
||||
port_services = [
|
||||
(["master", "smtpd"], 25),
|
||||
("unbound", 53),
|
||||
@@ -626,7 +624,7 @@ def deploy_chatmail(config_path: Path, disable_mail: bool, website_only: bool) -
|
||||
tls_deployer = get_tls_deployer(config, mail_domain)
|
||||
|
||||
all_deployers = [
|
||||
ChatmailDeployer(mail_domain),
|
||||
ChatmailDeployer(config),
|
||||
LegacyRemoveDeployer(),
|
||||
FiltermailDeployer(),
|
||||
JournaldDeployer(),
|
||||
|
||||
@@ -1,11 +1,22 @@
|
||||
import datetime
|
||||
import importlib
|
||||
|
||||
from jinja2 import Template
|
||||
|
||||
from . import remote
|
||||
|
||||
|
||||
def parse_zone_records(text):
|
||||
"""Yield ``(name, ttl, rtype, rdata)`` from standard BIND-format text."""
|
||||
for raw_line in text.splitlines():
|
||||
line = raw_line.strip()
|
||||
if not line or line.startswith(";"):
|
||||
continue
|
||||
try:
|
||||
name, ttl, _in, rtype, rdata = line.split(None, 4)
|
||||
except ValueError:
|
||||
raise ValueError(f"Bad zone record line: {line!r}") from None
|
||||
name = name.rstrip(".")
|
||||
yield name, ttl, rtype.upper(), rdata
|
||||
|
||||
|
||||
def get_initial_remote_data(sshexec, mail_domain):
|
||||
return sshexec.logged(
|
||||
call=remote.rdns.perform_initial_checks, kwargs=dict(mail_domain=mail_domain)
|
||||
@@ -31,13 +42,39 @@ def get_filled_zone_file(remote_data):
|
||||
if not sts_id:
|
||||
remote_data["sts_id"] = datetime.datetime.now().strftime("%Y%m%d%H%M")
|
||||
|
||||
template = importlib.resources.files(__package__).joinpath("chatmail.zone.j2")
|
||||
content = template.read_text()
|
||||
zonefile = Template(content).render(**remote_data)
|
||||
lines = [x.strip() for x in zonefile.split("\n") if x.strip()]
|
||||
d = remote_data["mail_domain"]
|
||||
|
||||
def append_record(name, rtype, rdata, ttl=3600):
|
||||
lines.append(f"{name:<40} {ttl:<6} IN {rtype:<5} {rdata}")
|
||||
|
||||
lines = ["; Required DNS entries"]
|
||||
if remote_data.get("A"):
|
||||
append_record(f"{d}.", "A", remote_data["A"])
|
||||
if remote_data.get("AAAA"):
|
||||
append_record(f"{d}.", "AAAA", remote_data["AAAA"])
|
||||
append_record(f"{d}.", "MX", f"10 {d}.")
|
||||
if remote_data.get("strict_tls"):
|
||||
append_record(f"_mta-sts.{d}.", "TXT", f'"v=STSv1; id={remote_data["sts_id"]}"')
|
||||
append_record(f"mta-sts.{d}.", "CNAME", f"{d}.")
|
||||
append_record(f"www.{d}.", "CNAME", f"{d}.")
|
||||
lines.append(remote_data["dkim_entry"])
|
||||
lines.append("")
|
||||
zonefile = "\n".join(lines)
|
||||
return zonefile
|
||||
lines.append("; Recommended DNS entries")
|
||||
append_record(f"{d}.", "TXT", '"v=spf1 a ~all"')
|
||||
append_record(f"_dmarc.{d}.", "TXT", '"v=DMARC1;p=reject;adkim=s;aspf=s"')
|
||||
if remote_data.get("acme_account_url"):
|
||||
append_record(
|
||||
f"{d}.",
|
||||
"CAA",
|
||||
f'0 issue "letsencrypt.org;accounturi={remote_data["acme_account_url"]}"',
|
||||
)
|
||||
append_record(f"_adsp._domainkey.{d}.", "TXT", '"dkim=discardable"')
|
||||
append_record(f"_submission._tcp.{d}.", "SRV", f"0 1 587 {d}.")
|
||||
append_record(f"_submissions._tcp.{d}.", "SRV", f"0 1 465 {d}.")
|
||||
append_record(f"_imap._tcp.{d}.", "SRV", f"0 1 143 {d}.")
|
||||
append_record(f"_imaps._tcp.{d}.", "SRV", f"0 1 993 {d}.")
|
||||
lines.append("")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def check_full_zone(sshexec, remote_data, out, zonefile) -> int:
|
||||
@@ -58,7 +95,8 @@ def check_full_zone(sshexec, remote_data, out, zonefile) -> int:
|
||||
returncode = 1
|
||||
if remote_data.get("dkim_entry") in required_diff:
|
||||
out(
|
||||
"If the DKIM entry above does not work with your DNS provider, you can try this one:\n"
|
||||
"If the DKIM entry above does not work with your DNS provider,"
|
||||
" you can try this one:\n"
|
||||
)
|
||||
out(remote_data.get("web_dkim_entry") + "\n")
|
||||
if recommended_diff:
|
||||
|
||||
@@ -1,20 +1,33 @@
|
||||
import os
|
||||
import io
|
||||
import urllib.request
|
||||
|
||||
from chatmaild.config import Config
|
||||
from pyinfra import host
|
||||
from pyinfra.facts.server import Arch, Sysctl
|
||||
from pyinfra.facts.systemd import SystemdEnabled
|
||||
from pyinfra.facts.deb import DebPackages
|
||||
from pyinfra.facts.server import Arch, Command, Sysctl
|
||||
from pyinfra.operations import apt, files, server, systemd
|
||||
|
||||
from cmdeploy.basedeploy import (
|
||||
Deployer,
|
||||
activate_remote_units,
|
||||
blocked_service_startup,
|
||||
configure_remote_units,
|
||||
get_resource,
|
||||
has_systemd,
|
||||
is_in_container,
|
||||
)
|
||||
|
||||
DOVECOT_ARCHIVE_VERSION = "2.3.21+dfsg1-3"
|
||||
DOVECOT_PACKAGE_VERSION = f"1:{DOVECOT_ARCHIVE_VERSION}"
|
||||
|
||||
DOVECOT_SHA256 = {
|
||||
("core", "amd64"): "dd060706f52a306fa863d874717210b9fe10536c824afe1790eec247ded5b27d",
|
||||
("core", "arm64"): "e7548e8a82929722e973629ecc40fcfa886894cef3db88f23535149e7f730dc9",
|
||||
("imapd", "amd64"): "8d8dc6fc00bbb6cdb25d345844f41ce2f1c53f764b79a838eb2a03103eebfa86",
|
||||
("imapd", "arm64"): "178fa877ddd5df9930e8308b518f4b07df10e759050725f8217a0c1fb3fd707f",
|
||||
("lmtpd", "amd64"): "2f69ba5e35363de50962d42cccbfe4ed8495265044e244007d7ccddad77513ab",
|
||||
("lmtpd", "arm64"): "89f52fb36524f5877a177dff4a713ba771fd3f91f22ed0af7238d495e143b38f",
|
||||
}
|
||||
|
||||
|
||||
class DovecotDeployer(Deployer):
|
||||
daemon_reload = False
|
||||
@@ -26,23 +39,64 @@ class DovecotDeployer(Deployer):
|
||||
|
||||
def install(self):
|
||||
arch = host.get_fact(Arch)
|
||||
if has_systemd() and "dovecot.service" in host.get_fact(SystemdEnabled):
|
||||
return # already installed and running
|
||||
_install_dovecot_package("core", arch)
|
||||
_install_dovecot_package("imapd", arch)
|
||||
_install_dovecot_package("lmtpd", arch)
|
||||
with blocked_service_startup():
|
||||
debs = []
|
||||
for pkg in ("core", "imapd", "lmtpd"):
|
||||
deb, changed = _download_dovecot_package(pkg, arch)
|
||||
self.need_restart |= changed
|
||||
if deb:
|
||||
debs.append(deb)
|
||||
if debs:
|
||||
deb_list = " ".join(debs)
|
||||
# First dpkg may fail on missing dependencies (stderr suppressed);
|
||||
# apt-get --fix-broken pulls them in, then dpkg retries cleanly.
|
||||
server.shell(
|
||||
name="Install dovecot packages",
|
||||
commands=[
|
||||
f"dpkg --force-confdef --force-confold -i {deb_list} 2> /dev/null || true",
|
||||
"DEBIAN_FRONTEND=noninteractive apt-get -y --fix-broken install",
|
||||
f"dpkg --force-confdef --force-confold -i {deb_list}",
|
||||
],
|
||||
)
|
||||
self.need_restart = True
|
||||
files.put(
|
||||
name="Pin dovecot packages to block Debian dist-upgrades",
|
||||
src=io.StringIO(
|
||||
"Package: dovecot-*\n"
|
||||
"Pin: version *\n"
|
||||
"Pin-Priority: -1\n"
|
||||
),
|
||||
dest="/etc/apt/preferences.d/pin-dovecot",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
|
||||
def configure(self):
|
||||
configure_remote_units(self.config.mail_domain, self.units)
|
||||
self.need_restart, self.daemon_reload = _configure_dovecot(self.config)
|
||||
config_restart, self.daemon_reload = _configure_dovecot(self.config)
|
||||
self.need_restart |= config_restart
|
||||
|
||||
def activate(self):
|
||||
activate_remote_units(self.units)
|
||||
|
||||
# Detect stale binary: package installed but service still runs old (deleted) binary.
|
||||
if not self.disable_mail and not self.need_restart:
|
||||
stale = host.get_fact(
|
||||
Command,
|
||||
'pid=$(systemctl show -p MainPID --value dovecot.service 2>/dev/null);'
|
||||
' [ "${pid:-0}" != "0" ] && readlink "/proc/$pid/exe" 2>/dev/null | grep -q "(deleted)"'
|
||||
" && echo STALE || true",
|
||||
)
|
||||
if stale == "STALE":
|
||||
self.need_restart = True
|
||||
|
||||
restart = False if self.disable_mail else self.need_restart
|
||||
|
||||
systemd.service(
|
||||
name="Disable dovecot for now" if self.disable_mail else "Start and enable Dovecot",
|
||||
name="Disable dovecot for now"
|
||||
if self.disable_mail
|
||||
else "Start and enable Dovecot",
|
||||
service="dovecot.service",
|
||||
running=False if self.disable_mail else True,
|
||||
enabled=False if self.disable_mail else True,
|
||||
@@ -61,43 +115,40 @@ def _pick_url(primary, fallback):
|
||||
return fallback
|
||||
|
||||
|
||||
def _install_dovecot_package(package: str, arch: str):
|
||||
def _download_dovecot_package(package: str, arch: str) -> tuple[str | None, bool]:
|
||||
"""Download a dovecot .deb if needed, return (path, changed)."""
|
||||
arch = "amd64" if arch == "x86_64" else arch
|
||||
arch = "arm64" if arch == "aarch64" else arch
|
||||
primary_url = f"https://download.delta.chat/dovecot/dovecot-{package}_2.3.21%2Bdfsg1-3_{arch}.deb"
|
||||
fallback_url = f"https://github.com/chatmail/dovecot/releases/download/upstream%2F2.3.21%2Bdfsg1/dovecot-{package}_2.3.21%2Bdfsg1-3_{arch}.deb"
|
||||
url = _pick_url(primary_url, fallback_url)
|
||||
deb_filename = "/root/" + url.split("/")[-1]
|
||||
|
||||
match (package, arch):
|
||||
case ("core", "amd64"):
|
||||
sha256 = "dd060706f52a306fa863d874717210b9fe10536c824afe1790eec247ded5b27d"
|
||||
case ("core", "arm64"):
|
||||
sha256 = "e7548e8a82929722e973629ecc40fcfa886894cef3db88f23535149e7f730dc9"
|
||||
case ("imapd", "amd64"):
|
||||
sha256 = "8d8dc6fc00bbb6cdb25d345844f41ce2f1c53f764b79a838eb2a03103eebfa86"
|
||||
case ("imapd", "arm64"):
|
||||
sha256 = "178fa877ddd5df9930e8308b518f4b07df10e759050725f8217a0c1fb3fd707f"
|
||||
case ("lmtpd", "amd64"):
|
||||
sha256 = "2f69ba5e35363de50962d42cccbfe4ed8495265044e244007d7ccddad77513ab"
|
||||
case ("lmtpd", "arm64"):
|
||||
sha256 = "89f52fb36524f5877a177dff4a713ba771fd3f91f22ed0af7238d495e143b38f"
|
||||
case _:
|
||||
apt.packages(packages=[f"dovecot-{package}"])
|
||||
return
|
||||
pkg_name = f"dovecot-{package}"
|
||||
sha256 = DOVECOT_SHA256.get((package, arch))
|
||||
if sha256 is None:
|
||||
op = apt.packages(packages=[pkg_name])
|
||||
return None, bool(getattr(op, "changed", False))
|
||||
|
||||
installed_versions = host.get_fact(DebPackages).get(pkg_name, [])
|
||||
if DOVECOT_PACKAGE_VERSION in installed_versions:
|
||||
return None, False
|
||||
|
||||
url_version = DOVECOT_ARCHIVE_VERSION.replace("+", "%2B")
|
||||
deb_base = f"{pkg_name}_{url_version}_{arch}.deb"
|
||||
primary_url = f"https://download.delta.chat/dovecot/{deb_base}"
|
||||
fallback_url = f"https://github.com/chatmail/dovecot/releases/download/upstream%2F{url_version}/{deb_base}"
|
||||
url = _pick_url(primary_url, fallback_url)
|
||||
deb_filename = f"/root/{deb_base}"
|
||||
|
||||
files.download(
|
||||
name=f"Download dovecot-{package}",
|
||||
name=f"Download {pkg_name}",
|
||||
src=url,
|
||||
dest=deb_filename,
|
||||
sha256sum=sha256,
|
||||
cache_time=60 * 60 * 24 * 365 * 10, # never redownload the package
|
||||
)
|
||||
|
||||
apt.deb(name=f"Install dovecot-{package}", src=deb_filename)
|
||||
return deb_filename, True
|
||||
|
||||
|
||||
def _configure_dovecot(config: Config, debug: bool = False) -> (bool, bool):
|
||||
def _configure_dovecot(config: Config, debug: bool = False) -> tuple[bool, bool]:
|
||||
"""Configures Dovecot IMAP server."""
|
||||
need_restart = False
|
||||
daemon_reload = False
|
||||
@@ -132,19 +183,25 @@ def _configure_dovecot(config: Config, debug: bool = False) -> (bool, bool):
|
||||
|
||||
# as per https://doc.dovecot.org/2.3/configuration_manual/os/
|
||||
# it is recommended to set the following inotify limits
|
||||
if not os.environ.get("CHATMAIL_NOSYSCTL"):
|
||||
for name in ("max_user_instances", "max_user_watches"):
|
||||
key = f"fs.inotify.{name}"
|
||||
if host.get_fact(Sysctl)[key] > 65535:
|
||||
# Skip updating limits if already sufficient
|
||||
# (enables running in incus containers where sysctl readonly)
|
||||
continue
|
||||
server.sysctl(
|
||||
name=f"Change {key}",
|
||||
key=key,
|
||||
value=65535,
|
||||
persist=True,
|
||||
can_modify = not is_in_container()
|
||||
for name in ("max_user_instances", "max_user_watches"):
|
||||
key = f"fs.inotify.{name}"
|
||||
value = host.get_fact(Sysctl)[key]
|
||||
if value > 65534:
|
||||
continue
|
||||
if not can_modify:
|
||||
print(
|
||||
"\n!!!! refusing to attempt sysctl setting in containers\n"
|
||||
f"!!!! dovecot: sysctl {key!r}={value}, should be >65534 for production setups\n"
|
||||
"!!!!"
|
||||
)
|
||||
continue
|
||||
server.sysctl(
|
||||
name=f"Change {key}",
|
||||
key=key,
|
||||
value=65535,
|
||||
persist=True,
|
||||
)
|
||||
|
||||
timezone_env = files.line(
|
||||
name="Set TZ environment variable",
|
||||
|
||||
@@ -133,6 +133,11 @@ protocol lmtp {
|
||||
# mail_lua and push_notification_lua are needed for Lua push notification handler.
|
||||
# <https://doc.dovecot.org/2.3/configuration_manual/push_notification/#configuration>
|
||||
mail_plugins = $mail_plugins mail_lua notify push_notification push_notification_lua
|
||||
|
||||
# Disable fsync for LMTP. May lose delivered message,
|
||||
# but unlikely to cause problems with multiple relays.
|
||||
# https://doc.dovecot.org/2.3/admin_manual/mailbox_formats/#fsyncing
|
||||
mail_fsync = never
|
||||
}
|
||||
|
||||
plugin {
|
||||
@@ -144,12 +149,22 @@ plugin {
|
||||
}
|
||||
|
||||
plugin {
|
||||
# for now we define static quota-rules for all users
|
||||
# for now we define static quota-rules for all users
|
||||
quota = maildir:User quota
|
||||
quota_rule = *:storage={{ config.max_mailbox_size }}
|
||||
quota_max_mail_size={{ config.max_message_size }}
|
||||
quota_grace = 0
|
||||
# quota_over_flag_value = TRUE
|
||||
|
||||
# When a user reaches 90% quota, run chatmail-quota-expire
|
||||
# to remove large/old messages until usage is below 80%.
|
||||
quota_warning = storage=90%% quota-warning {{ config.max_mailbox_size_mb * 80 // 100 }} {{ config.mailboxes_dir }}/%u
|
||||
}
|
||||
|
||||
service quota-warning {
|
||||
executable = script /usr/local/lib/chatmaild/venv/bin/chatmail-quota-expire
|
||||
user = vmail
|
||||
unix_listener quota-warning {
|
||||
}
|
||||
}
|
||||
|
||||
# push_notification configuration
|
||||
@@ -252,6 +267,9 @@ protocol imap {
|
||||
# sort -sn <(sed 's/ / C: /' *.in) <(sed 's/ / S: /' cat *.out)
|
||||
|
||||
rawlog_dir = %h
|
||||
|
||||
# Disable fsync for IMAP. May lose IMAP changes like setting flags.
|
||||
mail_fsync = never
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
|
||||
@@ -14,10 +14,10 @@ class FiltermailDeployer(Deployer):
|
||||
|
||||
def install(self):
|
||||
arch = host.get_fact(facts.server.Arch)
|
||||
url = f"https://github.com/chatmail/filtermail/releases/download/v0.5.2/filtermail-{arch}"
|
||||
url = f"https://github.com/chatmail/filtermail/releases/download/v0.6.1/filtermail-{arch}"
|
||||
sha256sum = {
|
||||
"x86_64": "ce24ca0075aa445510291d775fb3aea8f4411818c7b885ae51a0fe18c5f789ce",
|
||||
"aarch64": "c5d783eefa5332db3d97a0e6a23917d72849e3eb45da3d16ce908a9b4e5a797d",
|
||||
"x86_64": "48b3fb80c092d00b9b0a0ef77a8673496da3b9aed5ec1851e1df936d5589d62f",
|
||||
"aarch64": "c65bd5f45df187d3d65d6965a285583a3be0f44a6916ff12909ff9a8d702c22e",
|
||||
}[arch]
|
||||
self.need_restart |= files.download(
|
||||
name="Download filtermail",
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
*/5 * * * * root {{ config.execpath }} {{ config.mailboxes_dir }} >/var/www/html/metrics
|
||||
@@ -54,7 +54,7 @@ http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_certificate {{ config.tls_cert_path }};
|
||||
ssl_certificate_key {{ config.tls_key_path }};
|
||||
@@ -73,16 +73,16 @@ http {
|
||||
|
||||
access_log syslog:server=unix:/dev/log,facility=local7;
|
||||
|
||||
location /mxdeliv/ {
|
||||
proxy_pass http://127.0.0.1:{{ config.filtermail_http_port_incoming }};
|
||||
}
|
||||
|
||||
location / {
|
||||
# First attempt to serve request as file, then
|
||||
# as directory, then fall back to displaying a 404.
|
||||
try_files $uri $uri/ =404;
|
||||
}
|
||||
|
||||
location /metrics {
|
||||
default_type text/plain;
|
||||
}
|
||||
|
||||
location /new {
|
||||
{% if config.tls_cert_mode != "self" %}
|
||||
if ($request_method = GET) {
|
||||
|
||||
@@ -97,7 +97,9 @@ class PostfixDeployer(Deployer):
|
||||
server.shell(
|
||||
name="Validate postfix configuration",
|
||||
# Extract stderr and quit with error if non-zero
|
||||
commands=["""bash -c 'w=$(postconf 2>&1 >/dev/null); [[ -z "$w" ]] || { echo "$w"; false; }'"""],
|
||||
commands=[
|
||||
"""bash -c 'w=$(postconf 2>&1 >/dev/null); [[ -z "$w" ]] || { echo "$w"; false; }'"""
|
||||
],
|
||||
)
|
||||
self.need_restart = need_restart
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ smtpd_tls_key_file={{ config.tls_key_path }}
|
||||
smtpd_tls_security_level=may
|
||||
|
||||
smtp_tls_CApath=/etc/ssl/certs
|
||||
smtp_tls_security_level={{ "verify" if config.tls_cert_mode == "acme" else "encrypt" }}
|
||||
smtp_tls_security_level=verify
|
||||
# Send SNI extension when connecting to other servers.
|
||||
# <https://www.postfix.org/postconf.5.html#smtp_tls_servername>
|
||||
smtp_tls_servername = hostname
|
||||
@@ -88,6 +88,22 @@ inet_protocols = ipv4
|
||||
inet_protocols = all
|
||||
{% endif %}
|
||||
|
||||
# Postfix does not try IPv4 and IPv6 connections
|
||||
# concurrently as of version 3.7.11.
|
||||
#
|
||||
# When relay has both A (IPv4) and AAAA (IPv6) records,
|
||||
# but broken IPv6 connectivity,
|
||||
# every second message is delayed by the connection timeout
|
||||
# <https://www.postfix.org/postconf.5.html#smtp_connect_timeout>
|
||||
# which defaults to 30 seconds. Reducing timeouts is not a solution
|
||||
# as this will result in a failure to connect to slow servers.
|
||||
#
|
||||
# As a workaround we always prefer IPv4 when it is available.
|
||||
#
|
||||
# The setting is documented at
|
||||
# <https://www.postfix.org/postconf.5.html#smtp_address_preference>
|
||||
smtp_address_preference=ipv4
|
||||
|
||||
virtual_transport = lmtp:unix:private/dovecot-lmtp
|
||||
virtual_mailbox_domains = {{ config.mail_domain }}
|
||||
lmtp_header_checks = regexp:/etc/postfix/lmtp_header_cleanup
|
||||
|
||||
@@ -53,13 +53,14 @@ def get_dkim_entry(mail_domain, pre_command, dkim_selector):
|
||||
print=log_progress,
|
||||
)
|
||||
except CalledProcessError:
|
||||
return
|
||||
return None, None
|
||||
dkim_value_raw = f"v=DKIM1;k=rsa;p={dkim_pubkey};s=email;t=s"
|
||||
dkim_value = '" "'.join(re.findall(".{1,255}", dkim_value_raw))
|
||||
web_dkim_value = "".join(re.findall(".{1,255}", dkim_value_raw))
|
||||
name = f"{dkim_selector}._domainkey.{mail_domain}."
|
||||
return (
|
||||
f'{dkim_selector}._domainkey.{mail_domain}. TXT "{dkim_value}"',
|
||||
f'{dkim_selector}._domainkey.{mail_domain}. TXT "{web_dkim_value}"',
|
||||
f'{name:<40} 3600 IN TXT "{dkim_value}"',
|
||||
f'{name:<40} 3600 IN TXT "{web_dkim_value}"',
|
||||
)
|
||||
|
||||
|
||||
@@ -94,7 +95,7 @@ def check_zonefile(zonefile, verbose=True):
|
||||
if not zf_line.strip() or zf_line.startswith(";"):
|
||||
continue
|
||||
print(f"dns-checking {zf_line!r}") if verbose else log_progress("")
|
||||
zf_domain, zf_typ, zf_value = zf_line.split(maxsplit=2)
|
||||
zf_domain, _ttl, _in, zf_typ, zf_value = zf_line.split(None, 4)
|
||||
zf_domain = zf_domain.rstrip(".")
|
||||
zf_value = zf_value.strip()
|
||||
query_value = query_dns(zf_typ, zf_domain)
|
||||
|
||||
@@ -40,5 +40,5 @@ def dovecot_recalc_quota(user):
|
||||
#
|
||||
for line in output.split("\n"):
|
||||
parts = line.split()
|
||||
if parts[2] == "STORAGE":
|
||||
if len(parts) >= 6 and parts[2] == "STORAGE":
|
||||
return dict(value=int(parts[3]), limit=int(parts[4]), percent=int(parts[5]))
|
||||
|
||||
@@ -18,6 +18,8 @@ def openssl_selfsigned_args(domain, cert_path, key_path, days=36500):
|
||||
"-keyout", str(key_path),
|
||||
"-out", str(cert_path),
|
||||
"-subj", f"/CN={domain}",
|
||||
# Mark as end-entity cert so it cannot be used as a CA to sign others.
|
||||
"-addext", "basicConstraints=critical,CA:FALSE",
|
||||
"-addext", "extendedKeyUsage=serverAuth,clientAuth",
|
||||
"-addext",
|
||||
f"subjectAltName=DNS:{domain},DNS:www.{domain},DNS:mta-sts.{domain}",
|
||||
|
||||
@@ -50,9 +50,6 @@ class SSHExec:
|
||||
FuncError = FuncError
|
||||
|
||||
def __init__(self, host, verbose=False, python="python3", timeout=60):
|
||||
docker_container = os.environ.get("CHATMAIL_DOCKER")
|
||||
if docker_container:
|
||||
python = f"docker exec -i {docker_container} python3"
|
||||
self.gateway = execnet.makegateway(f"ssh=root@{host}//python={python}")
|
||||
self._remote_cmdloop_channel = bootstrap_remote(self.gateway, remote)
|
||||
self.timeout = timeout
|
||||
|
||||
@@ -1,17 +1,18 @@
|
||||
; Required DNS entries for chatmail servers
|
||||
zftest.testrun.org. A 135.181.204.127
|
||||
zftest.testrun.org. AAAA 2a01:4f9:c012:52f4::1
|
||||
zftest.testrun.org. MX 10 zftest.testrun.org.
|
||||
_mta-sts.zftest.testrun.org. TXT "v=STSv1; id=202403211706"
|
||||
mta-sts.zftest.testrun.org. CNAME zftest.testrun.org.
|
||||
www.zftest.testrun.org. CNAME zftest.testrun.org.
|
||||
opendkim._domainkey.zftest.testrun.org. TXT "v=DKIM1;k=rsa;p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoYt82CVUyz2ouaqjX2kB+5J80knAyoOU3MGU5aWppmwUwwTvj/oSTSpkc5JMtVTRmKKr8NUDWAL1Yw7dfGqqPHdHfwwjS3BIvDzYx+hzgtz62RnfNgV+/2MAoNpfX7cAFIHdRzEHNtwugc3RDLquqPoupAE3Y2YRw2T5zG5fILh4vwIcJZL5Uq6B92j8wwJqOex" "33n+vm1NKQ9rxo/UsHAmZlJzpooXcG/4igTBxJyJlamVSRR6N7Nul1v//YJb7J6v2o0iPHW6uE0StzKaPPNC2IVosSRFbD9H2oqppltptFSNPlI0E+t0JBWHem6YK7xcugiO3ImMCaaU8g6Jt/wIDAQAB;s=email;t=s"
|
||||
; Required DNS entries
|
||||
zftest.testrun.org. 3600 IN A 135.181.204.127
|
||||
zftest.testrun.org. 3600 IN AAAA 2a01:4f9:c012:52f4::1
|
||||
zftest.testrun.org. 3600 IN MX 10 zftest.testrun.org.
|
||||
_mta-sts.zftest.testrun.org. 3600 IN TXT "v=STSv1; id=202403211706"
|
||||
mta-sts.zftest.testrun.org. 3600 IN CNAME zftest.testrun.org.
|
||||
www.zftest.testrun.org. 3600 IN CNAME zftest.testrun.org.
|
||||
opendkim._domainkey.zftest.testrun.org. 3600 IN TXT "v=DKIM1;k=rsa;p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoYt82CVUyz2ouaqjX2kB+5J80knAyoOU3MGU5aWppmwUwwTvj/oSTSpkc5JMtVTRmKKr8NUDWAL1Yw7dfGqqPHdHfwwjS3BIvDzYx+hzgtz62RnfNgV+/2MAoNpfX7cAFIHdRzEHNtwugc3RDLquqPoupAE3Y2YRw2T5zG5fILh4vwIcJZL5Uq6B92j8wwJqOex" "33n+vm1NKQ9rxo/UsHAmZlJzpooXcG/4igTBxJyJlamVSRR6N7Nul1v//YJb7J6v2o0iPHW6uE0StzKaPPNC2IVosSRFbD9H2oqppltptFSNPlI0E+t0JBWHem6YK7xcugiO3ImMCaaU8g6Jt/wIDAQAB;s=email;t=s"
|
||||
|
||||
; Recommended DNS entries
|
||||
_submission._tcp.zftest.testrun.org. SRV 0 1 587 zftest.testrun.org.
|
||||
_submissions._tcp.zftest.testrun.org. SRV 0 1 465 zftest.testrun.org.
|
||||
_imap._tcp.zftest.testrun.org. SRV 0 1 143 zftest.testrun.org.
|
||||
_imaps._tcp.zftest.testrun.org. SRV 0 1 993 zftest.testrun.org.
|
||||
zftest.testrun.org. CAA 0 issue "letsencrypt.org;accounturi=https://acme-v02.api.letsencrypt.org/acme/acct/1371472956"
|
||||
zftest.testrun.org. TXT "v=spf1 a:zftest.testrun.org ~all"
|
||||
_dmarc.zftest.testrun.org. TXT "v=DMARC1;p=reject;adkim=s;aspf=s"
|
||||
_adsp._domainkey.zftest.testrun.org. TXT "dkim=discardable"
|
||||
zftest.testrun.org. 3600 IN TXT "v=spf1 a ~all"
|
||||
_dmarc.zftest.testrun.org. 3600 IN TXT "v=DMARC1;p=reject;adkim=s;aspf=s"
|
||||
zftest.testrun.org. 3600 IN CAA 0 issue "letsencrypt.org;accounturi=https://acme-v02.api.letsencrypt.org/acme/acct/1371472956"
|
||||
_adsp._domainkey.zftest.testrun.org. 3600 IN TXT "dkim=discardable"
|
||||
_submission._tcp.zftest.testrun.org. 3600 IN SRV 0 1 587 zftest.testrun.org.
|
||||
_submissions._tcp.zftest.testrun.org. 3600 IN SRV 0 1 465 zftest.testrun.org.
|
||||
_imap._tcp.zftest.testrun.org. 3600 IN SRV 0 1 143 zftest.testrun.org.
|
||||
_imaps._tcp.zftest.testrun.org. 3600 IN SRV 0 1 993 zftest.testrun.org.
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import time
|
||||
def test_tls_imap(benchmark, imap):
|
||||
def imap_connect():
|
||||
imap.connect()
|
||||
|
||||
@@ -89,7 +89,9 @@ def test_concurrent_logins_same_account(
|
||||
assert login_results.get()
|
||||
|
||||
|
||||
def test_no_vrfy(chatmail_config):
|
||||
def test_no_vrfy(cmfactory, chatmail_config):
|
||||
ac = cmfactory.get_online_account()
|
||||
addr = ac.get_config("addr")
|
||||
domain = chatmail_config.mail_domain
|
||||
|
||||
s = smtplib.SMTP(domain)
|
||||
@@ -98,7 +100,7 @@ def test_no_vrfy(chatmail_config):
|
||||
s.putcmd("vrfy", f"wrongaddress@{chatmail_config.mail_domain}")
|
||||
result = s.getreply()
|
||||
print(result)
|
||||
s.putcmd("vrfy", f"echo@{chatmail_config.mail_domain}")
|
||||
s.putcmd("vrfy", addr)
|
||||
result2 = s.getreply()
|
||||
print(result2)
|
||||
assert result[0] == result2[0] == 252
|
||||
|
||||
@@ -71,6 +71,44 @@ class TestSSHExecutor:
|
||||
assert (now - since_date).total_seconds() < 60 * 60 * 51
|
||||
|
||||
|
||||
def test_dovecot_main_process_matches_installed_binary(sshdomain):
|
||||
sshexec = get_sshexec(sshdomain)
|
||||
main_pid = int(
|
||||
sshexec(
|
||||
call=remote.rshell.shell,
|
||||
kwargs=dict(
|
||||
command="timeout 10 systemctl show -p MainPID --value dovecot.service"
|
||||
),
|
||||
).strip()
|
||||
)
|
||||
assert main_pid != 0, "dovecot.service MainPID is 0 -- service not running?"
|
||||
|
||||
exe = sshexec(
|
||||
call=remote.rshell.shell,
|
||||
kwargs=dict(command=f"timeout 10 readlink /proc/{main_pid}/exe"),
|
||||
).strip()
|
||||
status_text = sshexec(
|
||||
call=remote.rshell.shell,
|
||||
kwargs=dict(
|
||||
command="timeout 10 systemctl show -p StatusText --value dovecot.service"
|
||||
),
|
||||
).strip()
|
||||
installed_version = sshexec(
|
||||
call=remote.rshell.shell, kwargs=dict(command="timeout 10 dovecot --version")
|
||||
).strip()
|
||||
|
||||
assert not exe.endswith("(deleted)"), (
|
||||
f"running dovecot binary was deleted (stale after upgrade): {exe}"
|
||||
)
|
||||
expected_status_text = f"v{installed_version}"
|
||||
assert status_text == expected_status_text or status_text.startswith(
|
||||
f"{expected_status_text} "
|
||||
), (
|
||||
f"dovecot status version mismatch: "
|
||||
f"StatusText={status_text!r}, installed={installed_version!r}"
|
||||
)
|
||||
|
||||
|
||||
def test_timezone_env(remote):
|
||||
for line in remote.iter_output("env"):
|
||||
print(line)
|
||||
@@ -206,24 +244,6 @@ def test_exceed_rate_limit(cmsetup, gencreds, maildata, chatmail_config):
|
||||
pytest.fail("Rate limit was not exceeded")
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
def test_expunged(remote, chatmail_config):
|
||||
outdated_days = int(chatmail_config.delete_mails_after) + 1
|
||||
find_cmds = [
|
||||
f"find {chatmail_config.mailboxes_dir} -path '*/cur/*' -mtime +{outdated_days} -type f",
|
||||
f"find {chatmail_config.mailboxes_dir} -path '*/.*/cur/*' -mtime +{outdated_days} -type f",
|
||||
f"find {chatmail_config.mailboxes_dir} -path '*/new/*' -mtime +{outdated_days} -type f",
|
||||
f"find {chatmail_config.mailboxes_dir} -path '*/.*/new/*' -mtime +{outdated_days} -type f",
|
||||
f"find {chatmail_config.mailboxes_dir} -path '*/tmp/*' -mtime +{outdated_days} -type f",
|
||||
f"find {chatmail_config.mailboxes_dir} -path '*/.*/tmp/*' -mtime +{outdated_days} -type f",
|
||||
]
|
||||
outdated_days = int(chatmail_config.delete_large_after) + 1
|
||||
find_cmds.append(
|
||||
f"find {chatmail_config.mailboxes_dir} -path '*/cur/*' -mtime +{outdated_days} -size +200k -type f"
|
||||
)
|
||||
for cmd in find_cmds:
|
||||
for line in remote.iter_output(cmd):
|
||||
assert not line
|
||||
|
||||
|
||||
def test_deployed_state(remote):
|
||||
|
||||
@@ -6,8 +6,8 @@ import imap_tools
|
||||
import pytest
|
||||
import requests
|
||||
|
||||
from cmdeploy.remote import rshell
|
||||
from cmdeploy.cmdeploy import get_sshexec
|
||||
from cmdeploy.remote import rshell
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import imaplib
|
||||
import ipaddress
|
||||
import itertools
|
||||
import os
|
||||
import random
|
||||
@@ -14,6 +15,14 @@ from chatmaild.config import read_config
|
||||
conftestdir = Path(__file__).parent
|
||||
|
||||
|
||||
def _is_ip(domain):
|
||||
try:
|
||||
ipaddress.ip_address(domain)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption(
|
||||
"--slow", action="store_true", default=False, help="also run slow tests"
|
||||
@@ -35,6 +44,11 @@ def pytest_runtest_setup(item):
|
||||
|
||||
|
||||
def _get_chatmail_config():
|
||||
inipath = os.environ.get("CHATMAIL_INI")
|
||||
if inipath:
|
||||
path = Path(inipath).resolve()
|
||||
return read_config(path), path
|
||||
|
||||
current = Path().resolve()
|
||||
while 1:
|
||||
path = current.joinpath("chatmail.ini").resolve()
|
||||
@@ -277,6 +291,7 @@ def gencreds(chatmail_config):
|
||||
|
||||
def gen(domain=None):
|
||||
domain = domain if domain else chatmail_config.mail_domain
|
||||
addr_domain = f"[{domain}]" if _is_ip(domain) else domain
|
||||
while 1:
|
||||
num = next(count)
|
||||
alphanumeric = "abcdefghijklmnopqrstuvwxyz1234567890"
|
||||
@@ -290,7 +305,7 @@ def gencreds(chatmail_config):
|
||||
password = "".join(
|
||||
random.choices(alphanumeric, k=chatmail_config.password_min_length)
|
||||
)
|
||||
yield f"{user}@{domain}", f"{password}"
|
||||
yield f"{user}@{addr_domain}", f"{password}"
|
||||
|
||||
return lambda domain=None: next(gen(domain))
|
||||
|
||||
@@ -339,9 +354,22 @@ class ChatmailACFactory:
|
||||
accounts = []
|
||||
for _ in range(num):
|
||||
account = self.dc.add_account()
|
||||
future = account.add_or_update_transport.future(
|
||||
self._make_transport(domain)
|
||||
)
|
||||
addr, password = self.gencreds(domain)
|
||||
if _is_ip(domain):
|
||||
# Use DCLOGIN scheme with explicit server hosts,
|
||||
# matching how madmail presents its addresses to users.
|
||||
qr = (
|
||||
f"dclogin:{addr}"
|
||||
f"?p={password}&v=1"
|
||||
f"&ih={domain}&ip=993"
|
||||
f"&sh={domain}&sp=465"
|
||||
f"&ic=3&ss=default"
|
||||
)
|
||||
future = account.add_transport_from_qr.future(qr)
|
||||
else:
|
||||
future = account.add_or_update_transport.future(
|
||||
self._make_transport(domain)
|
||||
)
|
||||
futures.append(future)
|
||||
|
||||
# ensure messages stay in INBOX so that they can be
|
||||
@@ -388,12 +416,15 @@ def cmfactory(rpc, gencreds, maildomain, chatmail_config):
|
||||
|
||||
@pytest.fixture
|
||||
def remote(sshdomain):
|
||||
return Remote(sshdomain)
|
||||
r = Remote(sshdomain)
|
||||
yield r
|
||||
r.close()
|
||||
|
||||
|
||||
class Remote:
|
||||
def __init__(self, sshdomain):
|
||||
self.sshdomain = sshdomain
|
||||
self._procs = []
|
||||
|
||||
def iter_output(self, logcmd="", ready=None):
|
||||
getjournal = "journalctl -f" if not logcmd else logcmd
|
||||
@@ -402,23 +433,33 @@ class Remote:
|
||||
case "@local": command = []
|
||||
case "localhost": command = []
|
||||
case _: command = ["ssh", f"root@{self.sshdomain}"]
|
||||
docker_container = os.environ.get("CHATMAIL_DOCKER")
|
||||
if docker_container:
|
||||
command += ["docker", "exec", docker_container]
|
||||
[command.append(arg) for arg in getjournal.split()]
|
||||
self.popen = subprocess.Popen(
|
||||
popen = subprocess.Popen(
|
||||
command,
|
||||
stdin=subprocess.DEVNULL,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
while 1:
|
||||
line = self.popen.stdout.readline()
|
||||
res = line.decode().strip().lower()
|
||||
if not res:
|
||||
break
|
||||
if ready is not None:
|
||||
ready()
|
||||
ready = None
|
||||
yield res
|
||||
self._procs.append(popen)
|
||||
try:
|
||||
while 1:
|
||||
line = popen.stdout.readline()
|
||||
res = line.decode().strip().lower()
|
||||
if not res:
|
||||
break
|
||||
if ready is not None:
|
||||
ready()
|
||||
ready = None
|
||||
yield res
|
||||
finally:
|
||||
popen.terminate()
|
||||
popen.wait()
|
||||
|
||||
def close(self):
|
||||
while self._procs:
|
||||
proc = self._procs.pop()
|
||||
proc.kill()
|
||||
proc.wait()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
||||
@@ -23,15 +23,19 @@ class TestCmdline:
|
||||
run = parser.parse_args(["run"])
|
||||
assert init and run
|
||||
|
||||
def test_init_not_overwrite(self, capsys):
|
||||
assert main(["init", "chat.example.org"]) == 0
|
||||
def test_init_not_overwrite(self, capsys, tmp_path, monkeypatch):
|
||||
monkeypatch.delenv("CHATMAIL_INI", raising=False)
|
||||
inipath = tmp_path / "chatmail.ini"
|
||||
args = ["init", "--config", str(inipath), "chat.example.org"]
|
||||
assert main(args) == 0
|
||||
capsys.readouterr()
|
||||
|
||||
assert main(["init", "chat.example.org"]) == 1
|
||||
assert main(args) == 1
|
||||
out, err = capsys.readouterr()
|
||||
assert "path exists" in out.lower()
|
||||
|
||||
assert main(["init", "chat.example.org", "--force"]) == 0
|
||||
args.insert(1, "--force")
|
||||
assert main(args) == 0
|
||||
out, err = capsys.readouterr()
|
||||
assert "deleting config file" in out.lower()
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ from copy import deepcopy
|
||||
import pytest
|
||||
|
||||
from cmdeploy import remote
|
||||
from cmdeploy.dns import check_full_zone, check_initial_remote_data
|
||||
from cmdeploy.dns import check_full_zone, check_initial_remote_data, parse_zone_records
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -60,6 +60,29 @@ def mockdns(request, mockdns_base, mockdns_expected):
|
||||
return mockdns_base
|
||||
|
||||
|
||||
class TestGetDkimEntry:
|
||||
def test_dkim_entry_returns_tuple_on_success(self, mockdns):
|
||||
entry, web_entry = remote.rdns.get_dkim_entry(
|
||||
"some.domain", "", dkim_selector="opendkim"
|
||||
)
|
||||
# May return None,None if openssl not available, but should never crash
|
||||
if entry is not None:
|
||||
assert "opendkim._domainkey.some.domain" in entry
|
||||
assert "opendkim._domainkey.some.domain" in web_entry
|
||||
|
||||
def test_dkim_entry_returns_none_tuple_on_error(self, monkeypatch):
|
||||
"""CalledProcessError must return (None, None), not bare None."""
|
||||
from subprocess import CalledProcessError
|
||||
|
||||
def failing_shell(command, fail_ok=False, print=print):
|
||||
raise CalledProcessError(1, command)
|
||||
|
||||
monkeypatch.setattr(remote.rdns, "shell", failing_shell)
|
||||
result = remote.rdns.get_dkim_entry("some.domain", "", dkim_selector="opendkim")
|
||||
assert result == (None, None)
|
||||
assert result[0] is None and result[1] is None
|
||||
|
||||
|
||||
class TestPerformInitialChecks:
|
||||
def test_perform_initial_checks_ok1(self, mockdns, mockdns_expected):
|
||||
remote_data = remote.rdns.perform_initial_checks("some.domain")
|
||||
@@ -102,18 +125,49 @@ class TestPerformInitialChecks:
|
||||
assert not l
|
||||
|
||||
|
||||
def test_parse_zone_records():
|
||||
text = """
|
||||
; This is a comment
|
||||
some.domain. 3600 IN A 1.1.1.1
|
||||
|
||||
; Another comment
|
||||
www.some.domain. 3600 IN CNAME some.domain.
|
||||
|
||||
; Multi-word rdata
|
||||
some.domain. 3600 IN MX 10 mail.some.domain.
|
||||
|
||||
; DKIM record (single line, multi-word TXT rdata)
|
||||
dkim._domainkey.some.domain. 3600 IN TXT "v=DKIM1;k=rsa;p=MIIBIjANBgkqhkiG" "9w0BAQEFAAOCAQ8AMIIBCgKCAQEA"
|
||||
|
||||
; Another TXT record
|
||||
_dmarc.some.domain. 3600 IN TXT "v=DMARC1;p=reject"
|
||||
"""
|
||||
records = list(parse_zone_records(text))
|
||||
assert records == [
|
||||
("some.domain", "3600", "A", "1.1.1.1"),
|
||||
("www.some.domain", "3600", "CNAME", "some.domain."),
|
||||
("some.domain", "3600", "MX", "10 mail.some.domain."),
|
||||
(
|
||||
"dkim._domainkey.some.domain",
|
||||
"3600",
|
||||
"TXT",
|
||||
'"v=DKIM1;k=rsa;p=MIIBIjANBgkqhkiG" "9w0BAQEFAAOCAQ8AMIIBCgKCAQEA"',
|
||||
),
|
||||
("_dmarc.some.domain", "3600", "TXT", '"v=DMARC1;p=reject"'),
|
||||
]
|
||||
|
||||
|
||||
def test_parse_zone_records_invalid_line():
|
||||
text = "invalid line"
|
||||
with pytest.raises(ValueError, match="Bad zone record line"):
|
||||
list(parse_zone_records(text))
|
||||
|
||||
|
||||
def parse_zonefile_into_dict(zonefile, mockdns_base, only_required=False):
|
||||
for zf_line in zonefile.split("\n"):
|
||||
if zf_line.startswith("#"):
|
||||
if "Recommended" in zf_line and only_required:
|
||||
return
|
||||
continue
|
||||
if not zf_line.strip():
|
||||
continue
|
||||
zf_domain, zf_typ, zf_value = zf_line.split(maxsplit=2)
|
||||
zf_domain = zf_domain.rstrip(".")
|
||||
zf_value = zf_value.strip()
|
||||
mockdns_base.setdefault(zf_typ, {})[zf_domain] = zf_value
|
||||
if only_required:
|
||||
zonefile = zonefile.split("; Recommended")[0]
|
||||
for name, ttl, rtype, rdata in parse_zone_records(zonefile):
|
||||
mockdns_base.setdefault(rtype, {})[name] = rdata
|
||||
|
||||
|
||||
class MockSSHExec:
|
||||
|
||||
238
cmdeploy/src/cmdeploy/tests/test_dovecot_deployer.py
Normal file
238
cmdeploy/src/cmdeploy/tests/test_dovecot_deployer.py
Normal file
@@ -0,0 +1,238 @@
|
||||
from contextlib import nullcontext
|
||||
from types import SimpleNamespace
|
||||
|
||||
import pytest
|
||||
from pyinfra.facts.deb import DebPackages
|
||||
|
||||
from cmdeploy.dovecot import deployer as dovecot_deployer
|
||||
|
||||
|
||||
def make_host(*fact_pairs):
|
||||
"""Build a mock host; get_fact(cls) dispatches to the provided facts mapping.
|
||||
|
||||
Args:
|
||||
*fact_pairs: tuples of (fact_class, fact_value) to register
|
||||
|
||||
Returns:
|
||||
SimpleNamespace with get_fact that raises a clear error if an
|
||||
unexpected fact type is requested.
|
||||
"""
|
||||
facts = dict(fact_pairs)
|
||||
|
||||
def get_fact(cls):
|
||||
if cls not in facts:
|
||||
registered = ", ".join(c.__name__ for c in facts)
|
||||
raise LookupError(
|
||||
f"unexpected get_fact({cls.__name__}); "
|
||||
f"only registered: {registered}"
|
||||
)
|
||||
return facts[cls]
|
||||
|
||||
return SimpleNamespace(get_fact=get_fact)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def deployer():
|
||||
return dovecot_deployer.DovecotDeployer(
|
||||
SimpleNamespace(mail_domain="chat.example.org"),
|
||||
disable_mail=False,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def patch_blocked(monkeypatch):
|
||||
monkeypatch.setattr(dovecot_deployer, "blocked_service_startup", nullcontext)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_files_put(monkeypatch):
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer.files,
|
||||
"put",
|
||||
lambda **kwargs: SimpleNamespace(changed=False),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def track_shell(monkeypatch):
|
||||
calls = []
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer.server,
|
||||
"shell",
|
||||
lambda **kwargs: calls.append(kwargs) or SimpleNamespace(changed=False),
|
||||
)
|
||||
return calls
|
||||
|
||||
|
||||
def test_download_dovecot_package_skips_epoch_matched_install(monkeypatch):
|
||||
epoch_version = dovecot_deployer.DOVECOT_PACKAGE_VERSION
|
||||
downloads = []
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer,
|
||||
"host",
|
||||
make_host((DebPackages, {"dovecot-core": [epoch_version]})),
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer,
|
||||
"_pick_url",
|
||||
lambda primary, fallback: primary,
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer.files,
|
||||
"download",
|
||||
lambda **kwargs: downloads.append(kwargs),
|
||||
)
|
||||
|
||||
deb, changed = dovecot_deployer._download_dovecot_package("core", "amd64")
|
||||
|
||||
assert deb is None, f"expected no deb path when version matches, got {deb!r}"
|
||||
assert changed is False, "should not flag changed when version already installed"
|
||||
assert downloads == [], "should not download when version already installed"
|
||||
|
||||
|
||||
def test_download_dovecot_package_uses_archive_version_for_url_and_filename(
|
||||
monkeypatch,
|
||||
):
|
||||
downloads = []
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer,
|
||||
"host",
|
||||
make_host((DebPackages, {})),
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer,
|
||||
"_pick_url",
|
||||
lambda primary, fallback: primary,
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer.files,
|
||||
"download",
|
||||
lambda **kwargs: downloads.append(kwargs),
|
||||
)
|
||||
|
||||
deb, changed = dovecot_deployer._download_dovecot_package("core", "amd64")
|
||||
|
||||
archive_version = dovecot_deployer.DOVECOT_ARCHIVE_VERSION.replace("+", "%2B")
|
||||
expected_deb = f"/root/dovecot-core_{archive_version}_amd64.deb"
|
||||
|
||||
# Verify the returned path uses archive version, not package version (with epoch)
|
||||
assert changed is True, "should flag changed when package not yet installed"
|
||||
assert deb == expected_deb, f"deb path mismatch: {deb!r} != {expected_deb!r}"
|
||||
assert dovecot_deployer.DOVECOT_PACKAGE_VERSION not in deb, (
|
||||
f"deb path should use archive version (no epoch), got {deb!r}"
|
||||
)
|
||||
assert len(downloads) == 1, "files.download should be called exactly once"
|
||||
|
||||
|
||||
def test_install_skips_dpkg_path_when_epoch_matched_packages_present(
|
||||
deployer, patch_blocked, mock_files_put, track_shell, monkeypatch
|
||||
):
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer,
|
||||
"host",
|
||||
make_host(
|
||||
(
|
||||
dovecot_deployer.DebPackages,
|
||||
{
|
||||
"dovecot-core": [dovecot_deployer.DOVECOT_PACKAGE_VERSION],
|
||||
"dovecot-imapd": [dovecot_deployer.DOVECOT_PACKAGE_VERSION],
|
||||
"dovecot-lmtpd": [dovecot_deployer.DOVECOT_PACKAGE_VERSION],
|
||||
},
|
||||
),
|
||||
(dovecot_deployer.Arch, "x86_64"),
|
||||
),
|
||||
)
|
||||
downloads = []
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer.files,
|
||||
"download",
|
||||
lambda **kwargs: downloads.append(kwargs),
|
||||
)
|
||||
|
||||
deployer.install()
|
||||
|
||||
assert downloads == [], "should not download when all packages epoch-matched"
|
||||
assert track_shell == [], "should not run dpkg when all packages epoch-matched"
|
||||
assert deployer.need_restart is False, (
|
||||
"need_restart should be False when nothing changed"
|
||||
)
|
||||
|
||||
|
||||
def test_install_unsupported_arch_falls_back_to_apt(
|
||||
deployer, patch_blocked, mock_files_put, track_shell, monkeypatch
|
||||
):
|
||||
# For unsupported architectures, all fact lookups return the arch string.
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer,
|
||||
"host",
|
||||
SimpleNamespace(get_fact=lambda cls: "riscv64"),
|
||||
)
|
||||
apt_calls = []
|
||||
|
||||
# Mirrors apt.packages() return value: OperationMeta with .changed property.
|
||||
# Only lmtpd triggers a change to verify |= accumulation of changed flags.
|
||||
def fake_apt(**kwargs):
|
||||
apt_calls.append(kwargs)
|
||||
changed = "lmtpd" in kwargs["packages"][0]
|
||||
return SimpleNamespace(changed=changed)
|
||||
|
||||
monkeypatch.setattr(dovecot_deployer.apt, "packages", fake_apt)
|
||||
|
||||
deployer.install()
|
||||
|
||||
actual_pkgs = [c["packages"] for c in apt_calls]
|
||||
assert actual_pkgs == [["dovecot-core"], ["dovecot-imapd"], ["dovecot-lmtpd"]], (
|
||||
f"expected apt install of core/imapd/lmtpd, got {actual_pkgs}"
|
||||
)
|
||||
assert track_shell == [], "should not run dpkg for unsupported arch"
|
||||
assert deployer.need_restart is True, (
|
||||
"need_restart should be True when apt installed a package"
|
||||
)
|
||||
|
||||
|
||||
def test_install_runs_dpkg_when_packages_need_download(
|
||||
deployer, patch_blocked, mock_files_put, track_shell, monkeypatch
|
||||
):
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer,
|
||||
"host",
|
||||
make_host(
|
||||
(dovecot_deployer.DebPackages, {}),
|
||||
(dovecot_deployer.Arch, "x86_64"),
|
||||
),
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer,
|
||||
"_pick_url",
|
||||
lambda primary, fallback: primary,
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer.files,
|
||||
"download",
|
||||
lambda **kwargs: SimpleNamespace(changed=True),
|
||||
)
|
||||
|
||||
deployer.install()
|
||||
|
||||
assert len(track_shell) == 1, (
|
||||
f"expected one server.shell() call for dpkg install, got {len(track_shell)}"
|
||||
)
|
||||
cmds = track_shell[0]["commands"]
|
||||
assert len(cmds) == 3, f"expected 3 dpkg/apt commands, got: {cmds}"
|
||||
assert cmds[0].startswith("dpkg --force-confdef --force-confold -i ")
|
||||
assert "apt-get -y --fix-broken install" in cmds[1]
|
||||
assert cmds[2].startswith("dpkg --force-confdef --force-confold -i ")
|
||||
assert deployer.need_restart is True, (
|
||||
"need_restart should be True after dpkg install"
|
||||
)
|
||||
|
||||
|
||||
def test_pick_url_falls_back_on_primary_error(monkeypatch):
|
||||
def raise_error(req, timeout):
|
||||
raise OSError("connection timeout")
|
||||
|
||||
monkeypatch.setattr(dovecot_deployer.urllib.request, "urlopen", raise_error)
|
||||
result = dovecot_deployer._pick_url("http://primary", "http://fallback")
|
||||
assert result == "http://fallback", (
|
||||
f"should fall back when primary fails, got {result!r}"
|
||||
)
|
||||
68
cmdeploy/src/cmdeploy/tests/test_rshell.py
Normal file
68
cmdeploy/src/cmdeploy/tests/test_rshell.py
Normal file
@@ -0,0 +1,68 @@
|
||||
from unittest.mock import patch
|
||||
|
||||
from cmdeploy.remote.rshell import dovecot_recalc_quota
|
||||
|
||||
|
||||
def test_dovecot_recalc_quota_normal_output():
|
||||
"""Normal doveadm output returns parsed dict."""
|
||||
normal_output = (
|
||||
"Quota name Type Value Limit %\n"
|
||||
"User quota STORAGE 5 102400 0\n"
|
||||
"User quota MESSAGE 2 - 0\n"
|
||||
)
|
||||
|
||||
with patch("cmdeploy.remote.rshell.shell", return_value=normal_output):
|
||||
result = dovecot_recalc_quota("user@example.org")
|
||||
|
||||
# shell is called twice (recalc + get), patch returns same for both
|
||||
assert result == {"value": 5, "limit": 102400, "percent": 0}
|
||||
|
||||
|
||||
def test_dovecot_recalc_quota_empty_output():
|
||||
"""Empty doveadm output (trailing newline) must not IndexError."""
|
||||
call_count = [0]
|
||||
|
||||
def mock_shell(cmd):
|
||||
call_count[0] += 1
|
||||
if "recalc" in cmd:
|
||||
return ""
|
||||
# quota get returns only empty lines
|
||||
return "\n\n"
|
||||
|
||||
with patch("cmdeploy.remote.rshell.shell", side_effect=mock_shell):
|
||||
result = dovecot_recalc_quota("user@example.org")
|
||||
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_dovecot_recalc_quota_malformed_output():
|
||||
"""Malformed output with too few columns must not crash."""
|
||||
call_count = [0]
|
||||
|
||||
def mock_shell(cmd):
|
||||
call_count[0] += 1
|
||||
if "recalc" in cmd:
|
||||
return ""
|
||||
# partial line, fewer than 6 parts
|
||||
return "Quota name\nUser quota STORAGE\n"
|
||||
|
||||
with patch("cmdeploy.remote.rshell.shell", side_effect=mock_shell):
|
||||
result = dovecot_recalc_quota("user@example.org")
|
||||
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_dovecot_recalc_quota_header_only():
|
||||
"""Only header line, no data rows."""
|
||||
call_count = [0]
|
||||
|
||||
def mock_shell(cmd):
|
||||
call_count[0] += 1
|
||||
if "recalc" in cmd:
|
||||
return ""
|
||||
return "Quota name Type Value Limit %\n"
|
||||
|
||||
with patch("cmdeploy.remote.rshell.shell", side_effect=mock_shell):
|
||||
result = dovecot_recalc_quota("user@example.org")
|
||||
|
||||
assert result is None
|
||||
@@ -1,266 +0,0 @@
|
||||
Docker installation
|
||||
===================
|
||||
|
||||
This section provides instructions for installing a chatmail relay
|
||||
using Docker Compose.
|
||||
|
||||
.. note::
|
||||
|
||||
- Docker support is experimental, CI builds and tests the image automatically, but please report bugs.
|
||||
- The image wraps the cmdeploy process detailed in the :doc:`getting_started` instructions in a Debian-systemd image with r/w access to `/sys/fs`
|
||||
- Currently amd64-only (arm64 should work but is untested).
|
||||
|
||||
|
||||
Setup Preparation
|
||||
-----------------
|
||||
|
||||
We use ``chat.example.org`` as the chatmail domain in the following
|
||||
steps. Please substitute it with your own domain.
|
||||
|
||||
1. Install docker and docker compose v2 (check with `docker compose version`), install, e.g., through
|
||||
- Debian 12 through the `official install instructions <https://docs.docker.com/engine/install/debian/#install-using-the-repository>`_
|
||||
- Debian 13+ with `apt install docker docker-compose`
|
||||
|
||||
If you must use v1 (EOL since 2023), use `docker-compose` in the following and modify the `docker-compose.yaml` to use `privileged: true` instead of `cgroup: host`, though that gives the container full privileges.
|
||||
|
||||
2. Setup the initial DNS records.
|
||||
The following is an example in the familiar BIND zone file format with
|
||||
a TTL of 1 hour (3600 seconds).
|
||||
Please substitute your domain and IP addresses.
|
||||
|
||||
::
|
||||
|
||||
chat.example.org. 3600 IN A 198.51.100.5
|
||||
chat.example.org. 3600 IN AAAA 2001:db8::5
|
||||
www.chat.example.org. 3600 IN CNAME chat.example.org.
|
||||
mta-sts.chat.example.org. 3600 IN CNAME chat.example.org.
|
||||
|
||||
3. Configure kernel parameters on the host, as these can not be set from the container::
|
||||
|
||||
echo "fs.inotify.max_user_instances=65536" | sudo tee -a /etc/sysctl.d/99-inotify.conf
|
||||
echo "fs.inotify.max_user_watches=65536" | sudo tee -a /etc/sysctl.d/99-inotify.conf
|
||||
sudo sysctl --system
|
||||
|
||||
|
||||
Docker Compose Setup
|
||||
--------------------
|
||||
|
||||
Pre-built images are available from GitHub Container Registry. The
|
||||
``main`` branch and tagged releases are pushed automatically by CI::
|
||||
|
||||
docker pull ghcr.io/chatmail/relay:main # latest main branch
|
||||
docker pull ghcr.io/chatmail/relay:1.2.3 # tagged release
|
||||
|
||||
|
||||
Create service directory
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Either:
|
||||
|
||||
- Create a service directory, e.g., `/srv/chatmail-relay`::
|
||||
|
||||
mkdir -p /srv/chatmail-relay && cd /srv/chatmail-relay
|
||||
wget https://raw.githubusercontent.com/chatmail/relay/refs/heads/main/docker-compose.yaml
|
||||
wget https://raw.githubusercontent.com/chatmail/relay/refs/heads/main/docker-compose.override.yaml.example -O docker-compose.override.yaml
|
||||
|
||||
- or clone the chatmail repo ::
|
||||
|
||||
git clone https://github.com/chatmail/relay
|
||||
cd relay
|
||||
|
||||
|
||||
Customize and start
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
1. Set the fully qualified domain name of the relay::
|
||||
|
||||
echo 'MAIL_DOMAIN=chat.example.org' > .env
|
||||
|
||||
The container generates a ``chatmail.ini`` with defaults from
|
||||
``MAIL_DOMAIN`` on first start. To customize chatmail settings, mount
|
||||
your own ``chatmail.ini`` instead (see `Custom chatmail.ini`_ below).
|
||||
|
||||
2. All local customizations (data paths, extra volumes, config mounts) go in
|
||||
``docker-compose.override.yaml``, which Compose merges automatically with
|
||||
the base file. By default, all data is stored in docker volumes, you will
|
||||
likely want to at least create and configure the mail storage location, but
|
||||
you might also want to configure external TLS certificates there.
|
||||
|
||||
3. Start the container::
|
||||
|
||||
docker compose up -d
|
||||
docker compose logs -f chatmail # view logs, Ctrl+C to exit
|
||||
|
||||
4. After installation is complete, open ``https://chat.example.org`` in
|
||||
your browser.
|
||||
|
||||
Finish install and test
|
||||
-----------------------
|
||||
|
||||
You can test the installation with::
|
||||
|
||||
pip install cmping chat.example.org # or
|
||||
uvx cmping chat.example.org # if you use https://docs.astral.sh/uv/
|
||||
|
||||
You should check and extend your DNS records for better interoperability::
|
||||
|
||||
# Show required DNS records
|
||||
docker exec chatmail cmdeploy dns --ssh-host @local
|
||||
|
||||
You can check server status with::
|
||||
|
||||
docker exec chatmail cmdeploy status --ssh-host @local
|
||||
|
||||
You can run some benchmarks (can also run from any machine with cmdeploy installed)::
|
||||
|
||||
docker exec chatmail cmdeploy bench
|
||||
|
||||
You can run the test suite with::
|
||||
|
||||
docker exec chatmail cmdeploy test --ssh-host localhost
|
||||
|
||||
You can look at logs::
|
||||
|
||||
docker exec chatmail journalctl -fu postfix@-
|
||||
|
||||
|
||||
Customization
|
||||
-------------
|
||||
|
||||
Website
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
You can customize the chatmail landing page by mounting a directory with
|
||||
your own website source files.
|
||||
|
||||
1. Create a directory with your custom website source::
|
||||
|
||||
mkdir -p ./custom/www/src
|
||||
nano ./custom/www/src/index.md
|
||||
|
||||
2. Add the volume mount in ``docker-compose.override.yaml``::
|
||||
|
||||
services:
|
||||
chatmail:
|
||||
volumes:
|
||||
- ./custom/www:/opt/chatmail-www
|
||||
|
||||
3. Restart the service::
|
||||
|
||||
docker compose down
|
||||
docker compose up -d
|
||||
|
||||
|
||||
Custom chatmail.ini
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If you want to go beyond simply setting the ``MAIL_DOMAIN`` in ``.env``, you
|
||||
can use a regular `chatmail.ini` to give you full control.
|
||||
|
||||
1. Extract the generated config from a running container::
|
||||
|
||||
docker cp chatmail:/etc/chatmail/chatmail.ini ./chatmail.ini
|
||||
|
||||
2. Edit ``chatmail.ini`` as needed.
|
||||
|
||||
3. Add the volume mount in ``docker-compose.override.yaml`` ::
|
||||
|
||||
services:
|
||||
chatmail:
|
||||
volumes:
|
||||
- ./chatmail.ini:/etc/chatmail/chatmail.ini
|
||||
|
||||
4. Restart the container, the container skips generating a new one: ::
|
||||
|
||||
docker compose down && docker compose up -d
|
||||
|
||||
|
||||
External TLS certificates
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If TLS certificates are managed outside the container (e.g. by certbot,
|
||||
acmetool, or Traefik on the host), mount them into the container and set
|
||||
``TLS_EXTERNAL_CERT_AND_KEY`` in ``docker-compose.override.yaml``.
|
||||
Changed certificates are picked up automatically via inotify.
|
||||
See the examples in the example override and :ref:`external-tls` in the getting started guide for details.
|
||||
|
||||
|
||||
Migrating from a bare-metal install
|
||||
------------------------------------
|
||||
|
||||
If you have an existing bare-metal chatmail installation and want to
|
||||
switch to Docker:
|
||||
|
||||
1. Stop all existing services::
|
||||
|
||||
systemctl stop postfix dovecot doveauth nginx opendkim unbound \
|
||||
acmetool-redirector filtermail filtermail-incoming chatmail-turn \
|
||||
iroh-relay chatmail-metadata lastlogin mtail
|
||||
systemctl disable postfix dovecot doveauth nginx opendkim unbound \
|
||||
acmetool-redirector filtermail filtermail-incoming chatmail-turn \
|
||||
iroh-relay chatmail-metadata lastlogin mtail
|
||||
|
||||
2. Copy your existing ``chatmail.ini`` and mount it into the container
|
||||
(see `Custom chatmail.ini`_ above)::
|
||||
|
||||
cp /usr/local/lib/chatmaild/chatmail.ini ./chatmail.ini
|
||||
|
||||
3. Copy persistent data into the ``./data/`` subdirectories (for example, as configured in `Customize and start`_) ::
|
||||
|
||||
mkdir -p data/dkim data/certs data/mail
|
||||
|
||||
# DKIM keys
|
||||
cp -a /etc/dkimkeys/* data/dkim/
|
||||
|
||||
# TLS certificates
|
||||
rsync -a /var/lib/acme/ data/certs/
|
||||
|
||||
Note that ownership of dkim and acme is adjusted on container start.
|
||||
|
||||
For the mail directory::
|
||||
|
||||
rsync -a /home/vmail/ data/mail/
|
||||
|
||||
Alternatively, mount ``/home/vmail`` directly by changing the volume
|
||||
in ``docker-compose-override.yaml``::
|
||||
|
||||
- /home/vmail:/home/vmail
|
||||
|
||||
The three ``./data/`` subdirectories cover all persistent state.
|
||||
Everything else is regenerated by the ``configure`` and ``activate``
|
||||
stages on container start.
|
||||
|
||||
Building the image
|
||||
------------------
|
||||
|
||||
Clone the repository and build the Docker image::
|
||||
|
||||
git clone https://github.com/chatmail/relay
|
||||
cd relay
|
||||
docker/build.sh
|
||||
|
||||
The build bakes all binaries, Python packages, and the install stage
|
||||
into the image. After building, only ``docker-compose.yaml`` and a ``.env``
|
||||
with ``MAIL_DOMAIN`` are needed to run the container. The `build.sh` passes the
|
||||
git hash onto the docker build so it can be determined if there has been a
|
||||
change that warrants a redeploy.
|
||||
|
||||
You can transfer a locally built image to your server directly (pigz is parallel `gzip` which can be used instead as well) ::
|
||||
|
||||
docker save chatmail-relay:latest | pigz | ssh chat.example.org 'pigz -d | docker load'
|
||||
|
||||
|
||||
Forcing a full reinstall
|
||||
------------------------
|
||||
|
||||
On container start, only the ``configure`` and ``activate`` stages run by default.
|
||||
|
||||
To force a full reinstall (e.g. after updating the source), either
|
||||
rebuild the image::
|
||||
|
||||
docker compose build chatmail
|
||||
docker compose up -d
|
||||
|
||||
Or override the stages at runtime without rebuilding::
|
||||
|
||||
CMDEPLOY_STAGES="install,configure,activate" docker compose up -d
|
||||
@@ -98,12 +98,6 @@ steps. Please substitute it with your own domain.
|
||||
configure at your DNS provider (it can take some time until they are
|
||||
public).
|
||||
|
||||
Docker installation
|
||||
-------------------
|
||||
|
||||
There is experimental support for running chatmail via Docker Compose.
|
||||
See :doc:`docker` for full setup instructions.
|
||||
|
||||
Other helpful commands
|
||||
----------------------
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ Contributions and feedback welcome through the https://github.com/chatmail/relay
|
||||
:maxdepth: 5
|
||||
|
||||
getting_started
|
||||
docker
|
||||
proxy
|
||||
migrate
|
||||
overview
|
||||
|
||||
@@ -102,17 +102,19 @@ short overview of ``chatmaild`` services:
|
||||
Apple/Google/Huawei.
|
||||
|
||||
- `chatmail-expire <https://github.com/chatmail/relay/blob/main/chatmaild/src/chatmaild/expire.py>`_
|
||||
deletes users if they have not logged in for a longer while.
|
||||
The timeframe can be configured in ``chatmail.ini``.
|
||||
deletes entire mailboxes of users who have not logged in
|
||||
for longer than ``delete_inactive_users_after`` days.
|
||||
|
||||
- `chatmail-quota-expire <https://github.com/chatmail/relay/blob/main/chatmaild/src/chatmaild/quota_expire.py>`_
|
||||
is called by Dovecot's ``quota_warning`` mechanism when a
|
||||
user reaches 90% of their mailbox quota.
|
||||
It removes the largest and oldest messages
|
||||
until usage drops below 80% of the quota.
|
||||
|
||||
- `lastlogin <https://github.com/chatmail/relay/blob/main/chatmaild/src/chatmaild/lastlogin.py>`_
|
||||
is contacted by Dovecot when a user logs in and stores the date of
|
||||
the login.
|
||||
|
||||
- `metrics <https://github.com/chatmail/relay/blob/main/chatmaild/src/chatmaild/metrics.py>`_
|
||||
collects some metrics and displays them at
|
||||
``https://example.org/metrics``.
|
||||
|
||||
``www/``
|
||||
~~~~~~~~~
|
||||
|
||||
@@ -142,11 +144,9 @@ Chatmail relay dependency diagram
|
||||
nginx-internal --- autoconfig.xml;
|
||||
certs-nginx[("`TLS certs
|
||||
/var/lib/acme`")] --> nginx-internal;
|
||||
systemd-timer --- chatmail-metrics;
|
||||
systemd-timer --- acmetool;
|
||||
systemd-timer --- chatmail-expire-daily;
|
||||
systemd-timer --- chatmail-expire-inactive;
|
||||
systemd-timer --- chatmail-fsreport-daily;
|
||||
chatmail-metrics --- website;
|
||||
acmetool --> certs[("`TLS certs
|
||||
/var/lib/acme`")];
|
||||
nginx-external --- |993|dovecot;
|
||||
@@ -162,9 +162,11 @@ Chatmail relay dependency diagram
|
||||
/home/vmail/.../user"];
|
||||
dovecot --- |lastlogin.socket|lastlogin;
|
||||
dovecot --- chatmail-metadata;
|
||||
dovecot --- |quota-warning|chatmail-quota-expire;
|
||||
chatmail-quota-expire --- maildir;
|
||||
lastlogin --- maildir;
|
||||
doveauth --- maildir;
|
||||
chatmail-expire-daily --- maildir;
|
||||
chatmail-expire-inactive --- maildir;
|
||||
chatmail-fsreport-daily --- maildir;
|
||||
chatmail-metadata --- iroh-relay;
|
||||
chatmail-metadata --- |encrypted device token| notifications.delta.chat;
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
# Local overrides: copy to docker-compose.override.yaml in the repo root.
|
||||
# Compose automatically merges this with docker-compose.yaml.
|
||||
#
|
||||
# cp docker-compose.override.yaml.example docker-compose.override.yaml
|
||||
#
|
||||
# Volumes are APPENDED to the base file's volumes list, environment and other scalar keys are MERGED by key.
|
||||
services:
|
||||
chatmail:
|
||||
volumes:
|
||||
## Data paths — bind-mount to host directories for easy access/backup.
|
||||
|
||||
# - ./data/dkim:/etc/dkimkeys
|
||||
# - ./data/certs:/var/lib/acme
|
||||
|
||||
# - ./data/mail:/home/vmail
|
||||
## Or mount from an existing bare-metal install.
|
||||
# - /home/vmail:/home/vmail
|
||||
|
||||
## Mount your own chatmail.ini (skips auto-generation):
|
||||
# - ./chatmail.ini:/etc/chatmail/chatmail.ini
|
||||
|
||||
## Custom website:
|
||||
# - ./custom/www:/opt/chatmail-www
|
||||
|
||||
## Debug — mount scripts from the repo for live editing:
|
||||
# - ./docker/chatmail-init.sh:/chatmail-init.sh
|
||||
# - ./docker/entrypoint.sh:/entrypoint.sh
|
||||
|
||||
# environment:
|
||||
## Mount certs (above) and set TLS_EXTERNAL_CERT_AND_KEY to in-container paths.
|
||||
## A tls-cert-reload.path watcher inside the container reloads services
|
||||
## when the cert file changes. However, inotify does not cross bind-mount
|
||||
## boundaries, so host-side renewals (certbot, acmetool, etc.) must
|
||||
## notify the container explicitly. Add this to your renewal hook:
|
||||
##
|
||||
## docker exec chatmail systemctl start tls-cert-reload.service
|
||||
##
|
||||
## Host acmetool (bare-metal migration): create mount above, and
|
||||
## rsync -a /var/lib/acme/live data/certs
|
||||
# TLS_EXTERNAL_CERT_AND_KEY: "/var/lib/acme/live/${MAIL_DOMAIN}/fullchain /var/lib/acme/live/${MAIL_DOMAIN}/privkey"
|
||||
##
|
||||
## (Untested) Traefik certs-dumper (see docker/docker-compose-traefik.yaml) - also add volume:
|
||||
## - traefik-certs:/certs:ro
|
||||
# TLS_EXTERNAL_CERT_AND_KEY: "/certs/${MAIL_DOMAIN}/certificate.crt /certs/${MAIL_DOMAIN}/privatekey.key"
|
||||
@@ -1,48 +0,0 @@
|
||||
# Base compose file — do not edit. Put customizations (data paths, extra
|
||||
# volumes, env overrides) in docker-compose.override.yaml instead.
|
||||
# See docker/docker-compose.override.yaml.example for a starting point.
|
||||
#
|
||||
# Security notes: this container uses
|
||||
# - network_mode:host chatmail needs many ports (25, 53, 80, 143, 443, 465,
|
||||
# 587, 993, 3340, 8443) and needs to operate from the real IP, which bridging
|
||||
# would make tricky
|
||||
# - cgroup:host (required for systemd).
|
||||
# Together these give the container near-host-level access. This is acceptable
|
||||
# for a dedicated mail server, but be aware that the container can bind any
|
||||
# port and see all host network traffic.
|
||||
|
||||
services:
|
||||
chatmail:
|
||||
build:
|
||||
context: ./
|
||||
dockerfile: docker/chatmail_relay.dockerfile
|
||||
args:
|
||||
GIT_HASH: ${GIT_HASH:-unknown}
|
||||
image: chatmail-relay:latest
|
||||
restart: unless-stopped
|
||||
container_name: chatmail
|
||||
# Required for systemd — use only one of the following:
|
||||
cgroup: host # compose v2
|
||||
# privileged: true # compose v1 (less restricted)
|
||||
tty: true # required for logs
|
||||
tmpfs: # required for systemd
|
||||
- /tmp
|
||||
- /run
|
||||
- /run/lock
|
||||
logging:
|
||||
driver: none
|
||||
environment:
|
||||
MAIL_DOMAIN: $MAIL_DOMAIN
|
||||
network_mode: "host"
|
||||
volumes:
|
||||
## system (required)
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:rw
|
||||
## data (defaults — override in docker-compose.override.yaml)
|
||||
- mail:/home/vmail
|
||||
- dkim:/etc/dkimkeys
|
||||
- certs:/var/lib/acme
|
||||
|
||||
volumes:
|
||||
mail:
|
||||
dkim:
|
||||
certs:
|
||||
@@ -1,9 +0,0 @@
|
||||
#!/bin/sh
|
||||
# Build the chatmail Docker image with the current git hash baked in.
|
||||
# Usage: ./docker/build.sh [extra docker-compose build args...]
|
||||
#
|
||||
# .git/ is excluded from the build context (.dockerignore) so the hash
|
||||
# must be passed as a build arg from the host.
|
||||
|
||||
export GIT_HASH=$(git rev-parse HEAD)
|
||||
exec docker compose build "$@"
|
||||
@@ -1,14 +0,0 @@
|
||||
[Unit]
|
||||
Description=Run container setup commands
|
||||
After=multi-user.target
|
||||
ConditionPathExists=/chatmail-init.sh
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/bin/bash /chatmail-init.sh
|
||||
RemainAfterExit=true
|
||||
WorkingDirectory=/opt/chatmail
|
||||
PassEnvironment=<envs_list>
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,87 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
export CHATMAIL_INI="${CHATMAIL_INI:-/etc/chatmail/chatmail.ini}"
|
||||
export CHATMAIL_NOSYSCTL=True
|
||||
export CHATMAIL_NOPORTCHECK=True
|
||||
|
||||
CMDEPLOY=/opt/cmdeploy/bin/cmdeploy
|
||||
|
||||
if [ -z "$MAIL_DOMAIN" ]; then
|
||||
echo "ERROR: Environment variable 'MAIL_DOMAIN' must be set!" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Generate DKIM keys if not mounted
|
||||
if [ ! -f /etc/dkimkeys/opendkim.private ]; then
|
||||
/usr/sbin/opendkim-genkey -D /etc/dkimkeys -d "$MAIL_DOMAIN" -s opendkim
|
||||
fi
|
||||
# Fix ownership for bind-mounted keys (host opendkim UID may differ from container)
|
||||
chown -R opendkim:opendkim /etc/dkimkeys
|
||||
|
||||
# Create chatmail.ini, skip if mounted
|
||||
mkdir -p "$(dirname "$CHATMAIL_INI")"
|
||||
if [ ! -f "$CHATMAIL_INI" ]; then
|
||||
$CMDEPLOY init --config "$CHATMAIL_INI" "$MAIL_DOMAIN"
|
||||
fi
|
||||
|
||||
# Auto-detect IPv6: if the host has no IPv6 connectivity, set disable_ipv6
|
||||
# in the ini so dovecot/postfix/nginx bind to IPv4 only.
|
||||
# Uses network_mode:host so /proc/net/if_inet6 reflects the host's stack.
|
||||
if [ ! -e /proc/net/if_inet6 ]; then
|
||||
if grep -q '^disable_ipv6 = False' "$CHATMAIL_INI"; then
|
||||
sed -i 's/^disable_ipv6 = False/disable_ipv6 = True/' "$CHATMAIL_INI"
|
||||
echo "[INFO] IPv6 not available, set disable_ipv6 = True"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Inject external TLS paths from env var unless defined in chatmail.ini
|
||||
if [ -n "${TLS_EXTERNAL_CERT_AND_KEY:-}" ]; then
|
||||
if ! grep -q '^tls_external_cert_and_key' "$CHATMAIL_INI"; then
|
||||
echo "tls_external_cert_and_key = $TLS_EXTERNAL_CERT_AND_KEY" >> "$CHATMAIL_INI"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Ensure mailboxes directory exists (chatmail-metadata needs it at startup,
|
||||
# but Dovecot only creates it on first mail delivery)
|
||||
mkdir -p "/home/vmail/mail/${MAIL_DOMAIN}"
|
||||
chown vmail:vmail "/home/vmail/mail/${MAIL_DOMAIN}"
|
||||
|
||||
# --- Deploy fingerprint: skip cmdeploy run if nothing changed ---
|
||||
# On restart with identical image+config, systemd already brings up all
|
||||
# enabled services only configure+activate are needed here.
|
||||
IMAGE_VERSION_FILE="/etc/chatmail-image-version"
|
||||
FINGERPRINT_FILE="/etc/chatmail/.deploy-fingerprint"
|
||||
image_ver="none"
|
||||
[ -f "$IMAGE_VERSION_FILE" ] && image_ver=$(cat "$IMAGE_VERSION_FILE")
|
||||
config_hash=$(sha256sum "$CHATMAIL_INI" | cut -c1-16)
|
||||
current_fp="${image_ver}:${config_hash}"
|
||||
|
||||
# CMDEPLOY_STAGES non-empty in env = operator override -> always run.
|
||||
# Otherwise, if fingerprint matches the last successful deploy, skip.
|
||||
if [ -z "${CMDEPLOY_STAGES:-}" ] \
|
||||
&& [ -f "$FINGERPRINT_FILE" ] \
|
||||
&& [ "$(cat "$FINGERPRINT_FILE")" = "$current_fp" ]; then
|
||||
echo "[INFO] No changes detected ($current_fp), skipping deploy."
|
||||
else
|
||||
export CMDEPLOY_STAGES="${CMDEPLOY_STAGES:-configure,activate}"
|
||||
|
||||
# Skip DNS check when MAIL_DOMAIN is a bare IP address
|
||||
SKIP_DNS=""
|
||||
if [[ "$MAIL_DOMAIN" =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]] || [[ "$MAIL_DOMAIN" =~ : ]]; then
|
||||
SKIP_DNS="--skip-dns-check"
|
||||
fi
|
||||
$CMDEPLOY run --config "$CHATMAIL_INI" --ssh-host @local $SKIP_DNS
|
||||
|
||||
# Restore the build-time hash
|
||||
cp /etc/chatmail-image-version /etc/chatmail-version
|
||||
echo "$current_fp" > "$FINGERPRINT_FILE"
|
||||
fi
|
||||
|
||||
# Signal success to Docker healthcheck
|
||||
touch /run/chatmail-init.done
|
||||
|
||||
# Forward journald to console so `docker compose logs` works
|
||||
grep -q '^ForwardToConsole=yes' /etc/systemd/journald.conf \
|
||||
|| echo "ForwardToConsole=yes" >> /etc/systemd/journald.conf
|
||||
systemctl restart systemd-journald
|
||||
@@ -1,101 +0,0 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
FROM jrei/systemd-debian:12 AS base
|
||||
|
||||
ENV LANG=en_US.UTF-8
|
||||
|
||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt/lists,sharing=locked \
|
||||
echo 'APT::Install-Recommends "0";' > /etc/apt/apt.conf.d/01norecommend && \
|
||||
echo 'APT::Install-Suggests "0";' >> /etc/apt/apt.conf.d/01norecommend && \
|
||||
apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive TZ=UTC \
|
||||
apt-get install -y \
|
||||
ca-certificates \
|
||||
gcc \
|
||||
git \
|
||||
python3 \
|
||||
python3-dev \
|
||||
python3-venv \
|
||||
tzdata \
|
||||
locales && \
|
||||
sed -i -e "s/# $LANG.*/$LANG UTF-8/" /etc/locale.gen && \
|
||||
dpkg-reconfigure --frontend=noninteractive locales && \
|
||||
update-locale LANG=$LANG
|
||||
|
||||
# --- Build-time: install cmdeploy venv and run install stage ---
|
||||
# Editable install so importlib.resources reads directly from the source tree.
|
||||
# On container start only "configure,activate" stages run.
|
||||
|
||||
# Copy dependency metadata first so pip install layer is cached
|
||||
COPY cmdeploy/pyproject.toml /opt/chatmail/cmdeploy/pyproject.toml
|
||||
COPY chatmaild/pyproject.toml /opt/chatmail/chatmaild/pyproject.toml
|
||||
|
||||
# Dummy scaffolding so editable install can discover packages
|
||||
RUN mkdir -p /opt/chatmail/cmdeploy/src/cmdeploy \
|
||||
/opt/chatmail/chatmaild/src/chatmaild && \
|
||||
touch /opt/chatmail/cmdeploy/src/cmdeploy/__init__.py \
|
||||
/opt/chatmail/chatmaild/src/chatmaild/__init__.py
|
||||
|
||||
# Dummy git repo: .git/ is excluded from the build context (.dockerignore)
|
||||
# but setuptools calls `git ls-files` when building the sdist.
|
||||
WORKDIR /opt/chatmail
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
git init -q && \
|
||||
python3 -m venv /opt/cmdeploy && \
|
||||
/opt/cmdeploy/bin/pip install -e chatmaild/ -e cmdeploy/
|
||||
|
||||
# Full source copy (editable install's .egg-link still points here)
|
||||
COPY . /opt/chatmail/
|
||||
|
||||
# Minimal chatmail.ini
|
||||
RUN printf '[params]\nmail_domain = build.local\n' > /tmp/chatmail.ini
|
||||
|
||||
RUN CMDEPLOY_STAGES=install \
|
||||
CHATMAIL_INI=/tmp/chatmail.ini \
|
||||
CHATMAIL_NOSYSCTL=True \
|
||||
CHATMAIL_NOPORTCHECK=True \
|
||||
/opt/cmdeploy/bin/pyinfra @local \
|
||||
/opt/chatmail/cmdeploy/src/cmdeploy/run.py -y
|
||||
|
||||
RUN cp -a www/ /opt/chatmail-www/
|
||||
|
||||
# Remove build-only packages and their deps — not needed at runtime
|
||||
RUN apt-get purge -y gcc git python3-dev && \
|
||||
apt-get autoremove -y && \
|
||||
rm -f /tmp/chatmail.ini
|
||||
|
||||
# Record image version (used in deploy fingerprint at runtime).
|
||||
# GIT_HASH is passed as a build arg (from docker-compose or CI) so that
|
||||
# .git/ can be excluded from the build context via .dockerignore.
|
||||
# Two files: chatmail-image-version is the immutable build hash (survives
|
||||
# deploys); chatmail-version is overwritten by cmdeploy run and restored
|
||||
# from the image version after each deploy in chatmail-init.sh.
|
||||
ARG GIT_HASH=unknown
|
||||
RUN echo "$GIT_HASH" > /etc/chatmail-image-version && \
|
||||
echo "$GIT_HASH" > /etc/chatmail-version
|
||||
# --- End build-time install ---
|
||||
|
||||
ENV TZ=:/etc/localtime
|
||||
ENV PATH="/opt/cmdeploy/bin:${PATH}"
|
||||
RUN ln -s /etc/chatmail/chatmail.ini /opt/chatmail/chatmail.ini
|
||||
|
||||
ARG CHATMAIL_INIT_SERVICE_PATH=/lib/systemd/system/chatmail-init.service
|
||||
COPY ./docker/chatmail-init.service "$CHATMAIL_INIT_SERVICE_PATH"
|
||||
RUN ln -sf "$CHATMAIL_INIT_SERVICE_PATH" "/etc/systemd/system/multi-user.target.wants/chatmail-init.service"
|
||||
|
||||
# Remove default nginx site config at build time (not in entrypoint)
|
||||
RUN rm -f /etc/nginx/sites-enabled/default
|
||||
|
||||
COPY --chmod=555 ./docker/chatmail-init.sh /chatmail-init.sh
|
||||
COPY --chmod=555 ./docker/entrypoint.sh /entrypoint.sh
|
||||
COPY --chmod=555 ./docker/healthcheck.sh /healthcheck.sh
|
||||
|
||||
HEALTHCHECK --interval=10s --start-period=180s --timeout=10s --retries=3 \
|
||||
CMD /healthcheck.sh
|
||||
|
||||
STOPSIGNAL SIGRTMIN+3
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
|
||||
CMD [ "--default-standard-output=journal+console", \
|
||||
"--default-standard-error=journal+console" ]
|
||||
@@ -1,11 +0,0 @@
|
||||
# Used by .github/workflows/docker-ci.yaml
|
||||
# The GHCR image is set via CHATMAIL_IMAGE env var at deploy time.
|
||||
services:
|
||||
chatmail:
|
||||
image: ${CHATMAIL_IMAGE:-chatmail-relay:latest}
|
||||
volumes:
|
||||
- /srv/chatmail/chatmail.ini:/etc/chatmail/chatmail.ini
|
||||
- /srv/chatmail/dkim:/etc/dkimkeys
|
||||
- /srv/chatmail/certs:/var/lib/acme
|
||||
environment:
|
||||
TLS_EXTERNAL_CERT_AND_KEY: /var/lib/acme/live/${MAIL_DOMAIN}/fullchain /var/lib/acme/live/${MAIL_DOMAIN}/privkey
|
||||
@@ -1,9 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -eo pipefail
|
||||
|
||||
CHATMAIL_INIT_SERVICE_PATH="${CHATMAIL_INIT_SERVICE_PATH:-/lib/systemd/system/chatmail-init.service}"
|
||||
|
||||
env_vars="MAIL_DOMAIN CMDEPLOY_STAGES CHATMAIL_INI TLS_EXTERNAL_CERT_AND_KEY PATH"
|
||||
sed -i "s|<envs_list>|$env_vars|g" "$CHATMAIL_INIT_SERVICE_PATH"
|
||||
|
||||
exec /lib/systemd/systemd "$@"
|
||||
@@ -1,16 +0,0 @@
|
||||
#!/bin/bash
|
||||
# returns 0 when chatmail-init succeeded and all expected services are running.
|
||||
|
||||
set -e
|
||||
|
||||
test -f /run/chatmail-init.done
|
||||
|
||||
# Core services
|
||||
services="chatmail-metadata doveauth dovecot filtermail filtermail-incoming nginx postfix unbound"
|
||||
|
||||
# Optional services
|
||||
for svc in iroh-relay turnserver; do
|
||||
systemctl is-enabled "$svc" 2>/dev/null && services="$services $svc"
|
||||
done
|
||||
|
||||
exec systemctl is-active $services
|
||||
@@ -1 +0,0 @@
|
||||
MAIL_DOMAIN=chat.example.com
|
||||
Reference in New Issue
Block a user