mirror of
https://github.com/chatmail/relay.git
synced 2026-05-10 16:04:37 +00:00
Compare commits
94 Commits
docker-reb
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
45fafa10a9 | ||
|
|
ee435a7ef7 | ||
|
|
8fafd4e79f | ||
|
|
129b8a20bc | ||
|
|
a1f64ebd96 | ||
|
|
fb64be97b5 | ||
|
|
b05e26819f | ||
|
|
1db586b3eb | ||
|
|
44fe2dc08f | ||
|
|
8721600d13 | ||
|
|
dfed2b4681 | ||
|
|
f5fd286663 | ||
|
|
16b00da373 | ||
|
|
75606f5eb8 | ||
|
|
d256538f81 | ||
|
|
fdf8e5e345 | ||
|
|
81a161d433 | ||
|
|
454ac6248a | ||
|
|
85915652b3 | ||
|
|
1e8c56e08a | ||
|
|
a65f082817 | ||
|
|
6c18d37772 | ||
|
|
df4ff92133 | ||
|
|
825831ee81 | ||
|
|
0aa08b7413 | ||
|
|
14dfabf2ff | ||
|
|
0a77b3339b | ||
|
|
001d8c80fc | ||
|
|
1e376f7945 | ||
|
|
1ae92e0639 | ||
|
|
56386c231b | ||
|
|
2bdfecff72 | ||
|
|
cef739e3b3 | ||
|
|
3d128d3c64 | ||
|
|
79f68342f4 | ||
|
|
54863453c2 | ||
|
|
74326a8c54 | ||
|
|
59e5dea597 | ||
|
|
d7d89d66c1 | ||
|
|
00d723bd6e | ||
|
|
c257bfca4b | ||
|
|
82c9831369 | ||
|
|
b835318ce9 | ||
|
|
b4a46d23e6 | ||
|
|
c6d9d27a84 | ||
|
|
4521f03c99 | ||
|
|
c78859aec6 | ||
|
|
98bd5944cc | ||
|
|
e8933c455f | ||
|
|
d3a483c403 | ||
|
|
e687120d96 | ||
|
|
7409bd3452 | ||
|
|
1a34172487 | ||
|
|
38246ca8ea | ||
|
|
2635ac7e6d | ||
|
|
4fabfb31f8 | ||
|
|
36478dbfcf | ||
|
|
ff541b81ea | ||
|
|
ed9b4092a8 | ||
|
|
1b8ad3ca12 | ||
|
|
f85d304e65 | ||
|
|
4d1856d8f1 | ||
|
|
ae2ab52aa9 | ||
|
|
d0c396538b | ||
|
|
78a4e28408 | ||
|
|
2432d4f498 | ||
|
|
31301abb42 | ||
|
|
6b4edd8502 | ||
|
|
9c467ab3e8 | ||
|
|
774350778b | ||
|
|
06d53503e5 | ||
|
|
b128935940 | ||
|
|
2e38c61ca2 | ||
|
|
9dd8ce8ce1 | ||
|
|
0ae3f94ecc | ||
|
|
4481a12369 | ||
|
|
a47016e9f2 | ||
|
|
4e6ba7378d | ||
|
|
e428c646d1 | ||
|
|
dbd5cd16f5 | ||
|
|
e21f2a0fa2 | ||
|
|
8ca0909fa5 | ||
|
|
2c99cc84aa | ||
|
|
73309778c2 | ||
|
|
50ecc2b315 | ||
|
|
7b5b180b4b | ||
|
|
193624e522 | ||
|
|
437287fadc | ||
|
|
0ad679997a | ||
|
|
38cc1c7cd6 | ||
|
|
7a6ed8340e | ||
|
|
2ce9e5fe78 | ||
|
|
cf96be2cbb | ||
|
|
36eb63faa1 |
@@ -1,7 +0,0 @@
|
||||
.git
|
||||
data/
|
||||
venv/
|
||||
__pycache__
|
||||
*.pyc
|
||||
*.orig
|
||||
.pytest_cache
|
||||
50
.github/workflows/ci.yaml
vendored
50
.github/workflows/ci.yaml
vendored
@@ -1,21 +1,35 @@
|
||||
name: CI
|
||||
name: Run unit-tests and container-based deploy+test verification
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
# Triggers when a PR is merged into main or a direct push occurs
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
|
||||
# Triggers for any PR (and its subsequent commits) targeting the main branch
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
|
||||
permissions: {}
|
||||
|
||||
# Newest push wins: Prevents multiple runs from clashing and wasting runner efforts
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
|
||||
jobs:
|
||||
tox:
|
||||
name: isolated chatmaild tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
# Checkout pull request HEAD commit instead of merge commit
|
||||
# Otherwise `test_deployed_state` will be unhappy.
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
persist-credentials: false
|
||||
- name: download filtermail
|
||||
run: curl -L https://github.com/chatmail/filtermail/releases/download/v0.3.0/filtermail-x86_64 -o /usr/local/bin/filtermail && chmod +x /usr/local/bin/filtermail
|
||||
run: curl -L https://github.com/chatmail/filtermail/releases/download/v0.6.4/filtermail-x86_64 -o /usr/local/bin/filtermail && chmod +x /usr/local/bin/filtermail
|
||||
- name: run chatmaild tests
|
||||
working-directory: chatmaild
|
||||
run: pipx run tox
|
||||
@@ -24,7 +38,10 @@ jobs:
|
||||
name: deploy-chatmail tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
persist-credentials: false
|
||||
|
||||
- name: initenv
|
||||
run: scripts/initenv.sh
|
||||
@@ -38,5 +55,24 @@ jobs:
|
||||
- name: run deploy-chatmail offline tests
|
||||
run: pytest --pyargs cmdeploy
|
||||
|
||||
# all other cmdeploy commands require a staging server
|
||||
# see https://github.com/deltachat/chatmail/issues/100
|
||||
lxc-test:
|
||||
name: LXC deploy and test
|
||||
uses: chatmail/cmlxc/.github/workflows/lxc-test.yml@v0.13.5
|
||||
with:
|
||||
cmlxc_version: v0.13.5
|
||||
cmlxc_commands: |
|
||||
cmlxc init
|
||||
# single cmdeploy relay test
|
||||
cmlxc -v deploy-cmdeploy --source ./repo cm0
|
||||
cmlxc -v test-mini cm0
|
||||
cmlxc -v test-cmdeploy cm0
|
||||
|
||||
# cross cmdeploy relay test
|
||||
cmlxc -v deploy-cmdeploy --source ./repo --ipv4-only cm1
|
||||
cmlxc -v test-cmdeploy cm0 cm1
|
||||
|
||||
# cross cmdeploy/madmail relay tests
|
||||
cmlxc -v deploy-madmail mad0
|
||||
cmlxc -v test-cmdeploy cm0 mad0
|
||||
cmlxc -v test-mini cm0 mad0
|
||||
cmlxc -v test-mini mad0 cm0
|
||||
|
||||
37
.github/workflows/docker-dispatch.yaml
vendored
Normal file
37
.github/workflows/docker-dispatch.yaml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
# Notify the docker repo to build and test a new image after relay CI passes.
|
||||
#
|
||||
# Sends a repository_dispatch event to chatmail/docker with the relay ref
|
||||
# and short SHA, which triggers docker-ci.yaml to build, push to GHCR,
|
||||
# and run integration tests via cmlxc.
|
||||
|
||||
name: Trigger Docker build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
workflow_dispatch:
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
dispatch:
|
||||
name: Dispatch build to chatmail/docker
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'chatmail/relay'
|
||||
steps:
|
||||
- name: Compute short SHA
|
||||
id: sha
|
||||
run: echo "short=$(echo '${{ github.sha }}' | cut -c1-7)" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Send repository_dispatch
|
||||
uses: peter-evans/repository-dispatch@ff45666b9427631e3450c54a1bcbee4d9ff4d7c0 # v3
|
||||
with:
|
||||
token: ${{ secrets.CHATMAIL_DOCKER_DISPATCH_TOKEN }}
|
||||
repository: chatmail/docker
|
||||
event-type: relay-updated
|
||||
client-payload: >-
|
||||
{
|
||||
"relay_ref": "${{ github.ref_name }}",
|
||||
"relay_sha": "${{ github.sha }}",
|
||||
"relay_sha_short": "${{ steps.sha.outputs.short }}"
|
||||
}
|
||||
14
.github/workflows/docs-preview.yaml
vendored
14
.github/workflows/docs-preview.yaml
vendored
@@ -7,6 +7,8 @@ on:
|
||||
- 'scripts/build-docs.sh'
|
||||
- '.github/workflows/docs-preview.yaml'
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
scripts:
|
||||
name: build
|
||||
@@ -16,6 +18,8 @@ jobs:
|
||||
url: https://staging.chatmail.at/doc/relay/${{ steps.prepare.outputs.prid }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: initenv
|
||||
run: scripts/initenv.sh
|
||||
@@ -34,18 +38,22 @@ jobs:
|
||||
- name: Get Pullrequest ID
|
||||
id: prepare
|
||||
run: |
|
||||
export PULLREQUEST_ID=$(echo "${{ github.ref }}" | cut -d "/" -f3)
|
||||
export PULLREQUEST_ID=$(echo "${GITHUB_REF}" | cut -d "/" -f3)
|
||||
echo "prid=$PULLREQUEST_ID" >> $GITHUB_OUTPUT
|
||||
if [ $(expr length "${{ secrets.USERNAME }}") -gt "1" ]; then echo "uploadtoserver=true" >> $GITHUB_OUTPUT; fi
|
||||
- run: |
|
||||
echo "baseurl: /${{ steps.prepare.outputs.prid }}" >> _config.yml
|
||||
echo "baseurl: /${STEPS_PREPARE_OUTPUTS_PRID}" >> _config.yml
|
||||
env:
|
||||
STEPS_PREPARE_OUTPUTS_PRID: ${{ steps.prepare.outputs.prid }}
|
||||
|
||||
- name: Upload preview
|
||||
run: |
|
||||
mkdir -p "$HOME/.ssh"
|
||||
echo "${{ secrets.CHATMAIL_STAGING_SSHKEY }}" > "$HOME/.ssh/key"
|
||||
chmod 600 "$HOME/.ssh/key"
|
||||
rsync -rILvh -e "ssh -i $HOME/.ssh/key -o StrictHostKeyChecking=no" $GITHUB_WORKSPACE/doc/build/ "${{ secrets.USERNAME }}@chatmail.at:/var/www/html/staging.chatmail.at/doc/relay/${{ steps.prepare.outputs.prid }}/"
|
||||
rsync -rILvh -e "ssh -i $HOME/.ssh/key -o StrictHostKeyChecking=no" $GITHUB_WORKSPACE/doc/build/ "${{ secrets.USERNAME }}@chatmail.at:/var/www/html/staging.chatmail.at/doc/relay/${STEPS_PREPARE_OUTPUTS_PRID}/"
|
||||
env:
|
||||
STEPS_PREPARE_OUTPUTS_PRID: ${{ steps.prepare.outputs.prid }}
|
||||
|
||||
- name: check links
|
||||
working-directory: doc
|
||||
|
||||
4
.github/workflows/docs.yaml
vendored
4
.github/workflows/docs.yaml
vendored
@@ -10,6 +10,8 @@ on:
|
||||
- 'scripts/build-docs.sh'
|
||||
- '.github/workflows/docs.yaml'
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
scripts:
|
||||
name: build
|
||||
@@ -19,6 +21,8 @@ jobs:
|
||||
url: https://chatmail.at/doc/relay/
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: initenv
|
||||
run: scripts/initenv.sh
|
||||
|
||||
96
.github/workflows/test-and-deploy-ipv4only.yaml
vendored
96
.github/workflows/test-and-deploy-ipv4only.yaml
vendored
@@ -1,96 +0,0 @@
|
||||
name: deploy on staging-ipv4.testrun.org, and run tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- 'scripts/**'
|
||||
- '**/README.md'
|
||||
- 'CHANGELOG.md'
|
||||
- 'LICENSE'
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: deploy on staging-ipv4.testrun.org, and run tests
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
environment:
|
||||
name: staging-ipv4.testrun.org
|
||||
url: https://staging-ipv4.testrun.org/
|
||||
concurrency: staging-ipv4.testrun.org
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: prepare SSH
|
||||
run: |
|
||||
mkdir ~/.ssh
|
||||
echo "${{ secrets.STAGING_SSH_KEY }}" >> ~/.ssh/id_ed25519
|
||||
chmod 600 ~/.ssh/id_ed25519
|
||||
ssh-keyscan staging-ipv4.testrun.org > ~/.ssh/known_hosts
|
||||
# save previous acme & dkim state
|
||||
rsync -avz root@staging-ipv4.testrun.org:/var/lib/acme acme-ipv4 || true
|
||||
rsync -avz root@staging-ipv4.testrun.org:/etc/dkimkeys dkimkeys-ipv4 || true
|
||||
# store previous acme & dkim state on ns.testrun.org, if it contains useful certs
|
||||
if [ -f dkimkeys-ipv4/dkimkeys/opendkim.private ]; then rsync -avz -e "ssh -o StrictHostKeyChecking=accept-new" dkimkeys-ipv4 root@ns.testrun.org:/tmp/ || true; fi
|
||||
if [ "$(ls -A acme-ipv4/acme/certs)" ]; then rsync -avz -e "ssh -o StrictHostKeyChecking=accept-new" acme-ipv4 root@ns.testrun.org:/tmp/ || true; fi
|
||||
# make sure CAA record isn't set
|
||||
scp -o StrictHostKeyChecking=accept-new .github/workflows/staging-ipv4.testrun.org-default.zone root@ns.testrun.org:/etc/nsd/staging-ipv4.testrun.org.zone
|
||||
ssh root@ns.testrun.org sed -i '/CAA/d' /etc/nsd/staging-ipv4.testrun.org.zone
|
||||
ssh root@ns.testrun.org nsd-checkzone staging-ipv4.testrun.org /etc/nsd/staging-ipv4.testrun.org.zone
|
||||
ssh root@ns.testrun.org systemctl reload nsd
|
||||
|
||||
- name: rebuild staging-ipv4.testrun.org to have a clean VPS
|
||||
run: |
|
||||
curl -X POST \
|
||||
-H "Authorization: Bearer ${{ secrets.HETZNER_API_TOKEN }}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"image":"debian-12"}' \
|
||||
"https://api.hetzner.cloud/v1/servers/${{ secrets.STAGING_IPV4_SERVER_ID }}/actions/rebuild"
|
||||
|
||||
- run: scripts/initenv.sh
|
||||
|
||||
- name: append venv/bin to PATH
|
||||
run: echo venv/bin >>$GITHUB_PATH
|
||||
|
||||
- name: upload TLS cert after rebuilding
|
||||
run: |
|
||||
echo " --- wait until staging-ipv4.testrun.org VPS is rebuilt --- "
|
||||
rm ~/.ssh/known_hosts
|
||||
while ! ssh -o ConnectTimeout=180 -o StrictHostKeyChecking=accept-new -v root@staging-ipv4.testrun.org id -u ; do sleep 1 ; done
|
||||
ssh -o StrictHostKeyChecking=accept-new -v root@staging-ipv4.testrun.org id -u
|
||||
# download acme & dkim state from ns.testrun.org
|
||||
rsync -e "ssh -o StrictHostKeyChecking=accept-new" -avz root@ns.testrun.org:/tmp/acme-ipv4/acme acme-restore || true
|
||||
rsync -avz root@ns.testrun.org:/tmp/dkimkeys-ipv4/dkimkeys dkimkeys-restore || true
|
||||
# restore acme & dkim state to staging2.testrun.org
|
||||
rsync -avz acme-restore/acme root@staging-ipv4.testrun.org:/var/lib/ || true
|
||||
rsync -avz dkimkeys-restore/dkimkeys root@staging-ipv4.testrun.org:/etc/ || true
|
||||
ssh -o StrictHostKeyChecking=accept-new -v root@staging-ipv4.testrun.org chown root:root -R /var/lib/acme || true
|
||||
|
||||
- name: run deploy-chatmail offline tests
|
||||
run: pytest --pyargs cmdeploy
|
||||
|
||||
- run: |
|
||||
cmdeploy init staging-ipv4.testrun.org
|
||||
sed -i 's#disable_ipv6 = False#disable_ipv6 = True#' chatmail.ini
|
||||
sed -i 's/#\s*mtail_address/mtail_address/' chatmail.ini
|
||||
|
||||
- run: cmdeploy run --verbose --skip-dns-check
|
||||
|
||||
- name: set DNS entries
|
||||
run: |
|
||||
ssh -o StrictHostKeyChecking=accept-new -v root@staging-ipv4.testrun.org chown opendkim:opendkim -R /etc/dkimkeys
|
||||
cmdeploy dns --zonefile staging-generated.zone
|
||||
cat staging-generated.zone >> .github/workflows/staging-ipv4.testrun.org-default.zone
|
||||
cat .github/workflows/staging-ipv4.testrun.org-default.zone
|
||||
scp .github/workflows/staging-ipv4.testrun.org-default.zone root@ns.testrun.org:/etc/nsd/staging-ipv4.testrun.org.zone
|
||||
ssh root@ns.testrun.org nsd-checkzone staging-ipv4.testrun.org /etc/nsd/staging-ipv4.testrun.org.zone
|
||||
ssh root@ns.testrun.org systemctl reload nsd
|
||||
|
||||
- name: cmdeploy test
|
||||
run: CHATMAIL_DOMAIN2=ci-chatmail.testrun.org cmdeploy test --slow
|
||||
|
||||
- name: cmdeploy dns
|
||||
run: cmdeploy dns -v
|
||||
|
||||
98
.github/workflows/test-and-deploy.yaml
vendored
98
.github/workflows/test-and-deploy.yaml
vendored
@@ -1,98 +0,0 @@
|
||||
name: deploy on staging2.testrun.org, and run tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- 'scripts/**'
|
||||
- '**/README.md'
|
||||
- 'CHANGELOG.md'
|
||||
- 'LICENSE'
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: deploy on staging2.testrun.org, and run tests
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
environment:
|
||||
name: staging2.testrun.org
|
||||
url: https://staging2.testrun.org/
|
||||
concurrency: staging2.testrun.org
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: prepare SSH
|
||||
run: |
|
||||
mkdir ~/.ssh
|
||||
echo "${{ secrets.STAGING_SSH_KEY }}" >> ~/.ssh/id_ed25519
|
||||
chmod 600 ~/.ssh/id_ed25519
|
||||
ssh-keyscan staging2.testrun.org > ~/.ssh/known_hosts
|
||||
# save previous acme & dkim state
|
||||
rsync -avz root@staging2.testrun.org:/var/lib/acme . || true
|
||||
rsync -avz root@staging2.testrun.org:/etc/dkimkeys . || true
|
||||
# store previous acme & dkim state on ns.testrun.org, if it contains useful certs
|
||||
if [ -f dkimkeys/opendkim.private ]; then rsync -avz -e "ssh -o StrictHostKeyChecking=accept-new" dkimkeys root@ns.testrun.org:/tmp/ || true; fi
|
||||
if [ "$(ls -A acme/certs)" ]; then rsync -avz -e "ssh -o StrictHostKeyChecking=accept-new" acme root@ns.testrun.org:/tmp/ || true; fi
|
||||
# make sure CAA record isn't set
|
||||
scp -o StrictHostKeyChecking=accept-new .github/workflows/staging.testrun.org-default.zone root@ns.testrun.org:/etc/nsd/staging2.testrun.org.zone
|
||||
ssh root@ns.testrun.org sed -i '/CAA/d' /etc/nsd/staging2.testrun.org.zone
|
||||
ssh root@ns.testrun.org nsd-checkzone staging2.testrun.org /etc/nsd/staging2.testrun.org.zone
|
||||
ssh root@ns.testrun.org systemctl reload nsd
|
||||
|
||||
- name: rebuild staging2.testrun.org to have a clean VPS
|
||||
run: |
|
||||
curl -X POST \
|
||||
-H "Authorization: Bearer ${{ secrets.HETZNER_API_TOKEN }}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"image":"debian-12"}' \
|
||||
"https://api.hetzner.cloud/v1/servers/${{ secrets.STAGING_SERVER_ID }}/actions/rebuild"
|
||||
|
||||
- run: scripts/initenv.sh
|
||||
|
||||
- name: append venv/bin to PATH
|
||||
run: echo venv/bin >>$GITHUB_PATH
|
||||
|
||||
- name: upload TLS cert after rebuilding
|
||||
run: |
|
||||
echo " --- wait until staging2.testrun.org VPS is rebuilt --- "
|
||||
rm ~/.ssh/known_hosts
|
||||
while ! ssh -o ConnectTimeout=180 -o StrictHostKeyChecking=accept-new -v root@staging2.testrun.org id -u ; do sleep 1 ; done
|
||||
ssh -o StrictHostKeyChecking=accept-new -v root@staging2.testrun.org id -u
|
||||
# download acme & dkim state from ns.testrun.org
|
||||
rsync -e "ssh -o StrictHostKeyChecking=accept-new" -avz root@ns.testrun.org:/tmp/acme acme-restore || true
|
||||
rsync -avz root@ns.testrun.org:/tmp/dkimkeys dkimkeys-restore || true
|
||||
# restore acme & dkim state to staging2.testrun.org
|
||||
rsync -avz acme-restore/acme root@staging2.testrun.org:/var/lib/ || true
|
||||
rsync -avz dkimkeys-restore/dkimkeys root@staging2.testrun.org:/etc/ || true
|
||||
ssh -o StrictHostKeyChecking=accept-new -v root@staging2.testrun.org chown root:root -R /var/lib/acme || true
|
||||
|
||||
- name: add hpk42 key to staging server
|
||||
run: ssh root@staging2.testrun.org 'curl -s https://github.com/hpk42.keys >> .ssh/authorized_keys'
|
||||
|
||||
- name: run deploy-chatmail offline tests
|
||||
run: pytest --pyargs cmdeploy
|
||||
|
||||
- run: |
|
||||
cmdeploy init staging2.testrun.org
|
||||
sed -i 's/#\s*mtail_address/mtail_address/' chatmail.ini
|
||||
|
||||
- run: cmdeploy run --verbose --skip-dns-check
|
||||
|
||||
- name: set DNS entries
|
||||
run: |
|
||||
ssh -o StrictHostKeyChecking=accept-new root@staging2.testrun.org chown opendkim:opendkim -R /etc/dkimkeys
|
||||
cmdeploy dns --zonefile staging-generated.zone --verbose
|
||||
cat staging-generated.zone >> .github/workflows/staging.testrun.org-default.zone
|
||||
cat .github/workflows/staging.testrun.org-default.zone
|
||||
scp .github/workflows/staging.testrun.org-default.zone root@ns.testrun.org:/etc/nsd/staging2.testrun.org.zone
|
||||
ssh root@ns.testrun.org nsd-checkzone staging2.testrun.org /etc/nsd/staging2.testrun.org.zone
|
||||
ssh root@ns.testrun.org systemctl reload nsd
|
||||
|
||||
- name: cmdeploy test
|
||||
run: CHATMAIL_DOMAIN2=ci-chatmail.testrun.org cmdeploy test --slow
|
||||
|
||||
- name: cmdeploy dns
|
||||
run: cmdeploy dns -v
|
||||
|
||||
26
.github/workflows/zizmor-scan.yml
vendored
Normal file
26
.github/workflows/zizmor-scan.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
name: GitHub Actions Security Analysis with zizmor
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["main"]
|
||||
pull_request:
|
||||
branches: ["**"]
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
zizmor:
|
||||
name: Run zizmor
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
security-events: write # Required for upload-sarif (used by zizmor-action) to upload SARIF files.
|
||||
contents: read
|
||||
actions: read
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Run zizmor
|
||||
uses: zizmorcore/zizmor-action@b1d7e1fb5de872772f31590499237e7cce841e8e # v0.5.3
|
||||
7
.github/zizmor.yml
vendored
Normal file
7
.github/zizmor.yml
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
rules:
|
||||
unpinned-uses:
|
||||
config:
|
||||
policies:
|
||||
actions/*: ref-pin
|
||||
dependabot/*: ref-pin
|
||||
chatmail/*: ref-pin
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -4,7 +4,7 @@ __pycache__/
|
||||
*$py.class
|
||||
*.swp
|
||||
*qr-*.png
|
||||
chatmail.ini
|
||||
chatmail*.ini
|
||||
|
||||
|
||||
# C extensions
|
||||
@@ -164,8 +164,3 @@ cython_debug/
|
||||
#.idea/
|
||||
|
||||
chatmail.zone
|
||||
|
||||
# docker
|
||||
/data/
|
||||
/custom/
|
||||
.env
|
||||
|
||||
91
CHANGELOG.md
91
CHANGELOG.md
@@ -1,5 +1,89 @@
|
||||
# Changelog for chatmail deployment
|
||||
|
||||
## 1.10.0 2026-04-30
|
||||
|
||||
* start mtail after networking is fully up <https://github.com/chatmail/relay/pull/942>
|
||||
* support specifying custom filtermail binary through environment variable <https://github.com/chatmail/relay/pull/941>
|
||||
* add automated zizmor scanning of github workflows <https://github.com/chatmail/relay/pull/938>
|
||||
* added dispatch for *automated builds of chatmail relay docker images* <https://github.com/chatmail/relay/pull/934>
|
||||
* do not bind SMTP client sockets to public addresses <https://github.com/chatmail/relay/pull/932>
|
||||
* underline in docs that scripts/initenv.sh should be used for building the docs <https://github.com/chatmail/relay/pull/933>
|
||||
* automatic oldest-first message removal from mailboxes to always stay under max_mailbox_size <https://github.com/chatmail/relay/pull/929>
|
||||
* remove --slow from cmdeploy test <https://github.com/chatmail/relay/pull/931>
|
||||
* handle missing inotify sysctl keys in containers <https://github.com/chatmail/relay/pull/930>
|
||||
* replace resolvconf with static resolv.conf <https://github.com/chatmail/relay/pull/928>
|
||||
* disable fsync for LMTP and IMAP services <https://github.com/chatmail/relay/pull/925>
|
||||
* re-use cmlxc workflow, replacing CI with hetzner staging servers with local lxc containers <https://github.com/chatmail/relay/pull/917>
|
||||
* explicitly install resolvconf <https://github.com/chatmail/relay/pull/924>
|
||||
* detect stale dovecot binary and force restart in activate() <https://github.com/chatmail/relay/pull/922>
|
||||
* Rename filtermail_http_port to filtermail_http_port_incoming <https://github.com/chatmail/relay/pull/921>
|
||||
* consolidated is_in_container() check https://github.com/chatmail/relay/pull/920>
|
||||
* restart dovecot after package replacement (rebase, test condense) <https://github.com/chatmail/relay/pull/913>
|
||||
* Set permissions on dovecot pin prefs <https://github.com/chatmail/relay/pull/915>
|
||||
* Route `/mxdeliv/` to configurable port <https://github.com/chatmail/relay/pull/901>
|
||||
* fix VM detection, automated testing fixes, use newer chatmail-turn and move to standard BIND DNS zone format <https://github.com/chatmail/relay/pull/912>
|
||||
* Upgrade to filtermail 0.6.1 <https://github.com/chatmail/relay/pull/910>
|
||||
* pin dovecot packages to prevent apt upgrades <https://github.com/chatmail/relay/pull/908>
|
||||
* add rpc server to cmdeploy along with client <https://github.com/chatmail/relay/pull/906>
|
||||
* remove unused deps from chatmaild <https://github.com/chatmail/relay/pull/905>
|
||||
* set default smtp_tls_security_level to "verify" unconditionally <https://github.com/chatmail/relay/pull/902>
|
||||
* featprefer IPv4 in SMTP client <https://github.com/chatmail/relay/pull/900>
|
||||
* Install dovecot .deb packages atomically <https://github.com/chatmail/relay/pull/899>
|
||||
* stop installing cron package <https://github.com/chatmail/relay/pull/898>
|
||||
* Rewrite dovecot install logic, update <https://github.com/chatmail/relay/pull/862>
|
||||
* fix a test and some linting fixes <https://github.com/chatmail/relay/pull/897>
|
||||
* Disable IP verification on domain-literal addresses <https://github.com/chatmail/relay/pull/895>
|
||||
* disable installing recommended packages globally on the relay <https://github.com/chatmail/relay/pull/887>
|
||||
* multiple bug fixes across chatmaild and cmdeploy <https://github.com/chatmail/relay/pull/883>
|
||||
* remove /metrics from the website <https://github.com/chatmail/relay/pull/703>
|
||||
* add Prometheus textfile output to fsreport <https://github.com/chatmail/relay/pull/881>
|
||||
* chown opendkim: private key <https://github.com/chatmail/relay/pull/879>
|
||||
* make sure chatmail-metadata was started <https://github.com/chatmail/relay/pull/882>
|
||||
* dovecot update url <https://github.com/chatmail/relay/pull/880>
|
||||
* upgrade to filtermail v0.5.2 <https://github.com/chatmail/relay/pull/876>
|
||||
* download dovecot packages from github release <https://github.com/chatmail/relay/pull/875>
|
||||
* replace DKIM verification with filtermail v0.5 <https://github.com/chatmail/relay/pull/831>
|
||||
* remove CFFI deltachat bindings usage, and consolidate test support with rpc-bindings <https://github.com/chatmail/relay/pull/872>
|
||||
* prepare chatmaild/cmdeploy changes for Docker support <https://github.com/chatmail/relay/pull/857>
|
||||
* stabilize online benchmark timing adding rate-limit-aware cooldown between iterations <https://github.com/chatmail/relay/pull/867>
|
||||
* move rate-limit cooldown to benchmark fixture <https://github.com/chatmail/relay/pull/868>
|
||||
* reconfigure acmetool from redirector to proxy mode <https://github.com/chatmail/relay/pull/861>
|
||||
* make tests work with `--ssh-host localhost` <https://github.com/chatmail/relay/pull/856>
|
||||
* mark f-string with f prefix in test_expunged <https://github.com/chatmail/relay/pull/863>
|
||||
* install also if dovecot.service=False in SystemdEnabled Fact <https://github.com/chatmail/relay/pull/841>
|
||||
* Introduce support for self-signed chatmail relays <https://github.com/chatmail/relay/pull/855>
|
||||
* Strip Received headers before delivery <https://github.com/chatmail/relay/pull/849>
|
||||
* upgrade to filtermail v0.3 <https://github.com/chatmail/relay/pull/850>
|
||||
* fix link to Maddy and update madmail URL <https://github.com/chatmail/relay/pull/847>
|
||||
* accept self-signed certificates for IP-only relays <https://github.com/chatmail/relay/pull/846>
|
||||
* enforce sending from public IP addresses <https://github.com/chatmail/relay/pull/845>
|
||||
* port check: check addresses, fix single services <https://github.com/chatmail/relay/pull/844>
|
||||
* remediates issue with improper concat on resolver injection <https://github.com/chatmail/relay/pull/834>
|
||||
* ipv6 boolean not being respected during operations <https://github.com/chatmail/relay/pull/832>
|
||||
* upgrade to filtermail v0.2 by <https://github.com/chatmail/relay/pull/825>
|
||||
* fix link to filtermail <https://github.com/chatmail/relay/pull/824>
|
||||
* print timestamps when sending messages <https://github.com/chatmail/relay/pull/823>
|
||||
* fix flaky test_exceed_rate_limit <https://github.com/chatmail/relay/pull/822>
|
||||
* Replace filtermail with rust reimplementation <https://github.com/chatmail/relay/pull/808>
|
||||
* Set default internal SMTP ports in Config <https://github.com/chatmail/relay/pull/819>
|
||||
* separate metrics for incoming and outgoing messages <https://github.com/chatmail/relay/pull/820>
|
||||
* disable appending the Received header <https://github.com/chatmail/relay/pull/815>
|
||||
* fail on errors in postfix/dovecot config <https://github.com/chatmail/relay/pull/813>
|
||||
* tweak idle/hibernate metrics some more <https://github.com/chatmail/relay/pull/811>
|
||||
* add config flag to export statistics <https://github.com/chatmail/relay/pull/806>
|
||||
* add --website-only option to run subcommand <https://github.com/chatmail/relay/pull/768>
|
||||
* Strip DKIM-Signature header before LMTP <https://github.com/chatmail/relay/pull/803>
|
||||
* properly make sure that postfix gets restarted on failure <https://github.com/chatmail/relay/pull/802>
|
||||
* expire.py: use absolute path to maildirsize <https://github.com/chatmail/relay/pull/807>
|
||||
* pin Dovecot documentation URLs to version 2.3 <https://github.com/chatmail/relay/pull/800>
|
||||
* try to use "build machine" and "deployment server" consistently <https://github.com/chatmail/relay/pull/797>
|
||||
* adds instructions for migrating control machines <https://github.com/chatmail/relay/pull/795>
|
||||
* use consistent naming schema in getting started <https://github.com/chatmail/relay/pull/793>
|
||||
* remove jsok/serialize-workflow-action dependency <https://github.com/chatmail/relay/pull/790>
|
||||
* streamline migration guide wording, provide titled steps <https://github.com/chatmail/relay/pull/789>
|
||||
* increases default max mailbox size <https://github.com/chatmail/relay/pull/792>
|
||||
* use daemon_name for OpenDKIM sign-verify decision instead of IP <https://github.com/chatmail/relay/pull/784>
|
||||
|
||||
## 1.9.0 2025-12-18
|
||||
|
||||
### Documentation
|
||||
@@ -121,13 +205,6 @@
|
||||
Provide an "fsreport" CLI for more fine grained analysis of message files.
|
||||
([#637](https://github.com/chatmail/relay/pull/637))
|
||||
|
||||
- Add installation via docker compose (MVP 1). The instructions, known issues and limitations are located in `/docs`
|
||||
([#614](https://github.com/chatmail/relay/pull/614))
|
||||
|
||||
- Add configuration parameters
|
||||
([#614](https://github.com/chatmail/relay/pull/614)):
|
||||
- `change_kernel_settings` - Whether to change kernel parameters during installation (default: `True`)
|
||||
- `fs_inotify_max_user_instances_and_watchers` - Value for kernel parameters `fs.inotify.max_user_instances` and `fs.inotify.max_user_watches` (default: `65535`)
|
||||
|
||||
## 1.7.0 2025-09-11
|
||||
|
||||
|
||||
@@ -6,10 +6,7 @@ build-backend = "setuptools.build_meta"
|
||||
name = "chatmaild"
|
||||
version = "0.3"
|
||||
dependencies = [
|
||||
"aiosmtpd",
|
||||
"iniconfig",
|
||||
"deltachat-rpc-server",
|
||||
"deltachat-rpc-client",
|
||||
"filelock",
|
||||
"requests",
|
||||
"crypt-r >= 3.13.1 ; python_version >= '3.11'",
|
||||
@@ -24,8 +21,8 @@ where = ['src']
|
||||
[project.scripts]
|
||||
doveauth = "chatmaild.doveauth:main"
|
||||
chatmail-metadata = "chatmaild.metadata:main"
|
||||
chatmail-metrics = "chatmaild.metrics:main"
|
||||
chatmail-expire = "chatmaild.expire:main"
|
||||
chatmail-expire = "chatmaild.expire:daily_expire_main"
|
||||
chatmail-quota-expire = "chatmaild.expire:quota_expire_main"
|
||||
chatmail-fsreport = "chatmaild.fsreport:main"
|
||||
lastlogin = "chatmaild.lastlogin:main"
|
||||
turnserver = "chatmaild.turnserver:main"
|
||||
@@ -71,6 +68,7 @@ commands =
|
||||
deps = pytest
|
||||
pdbpp
|
||||
pytest-localserver
|
||||
aiosmtpd
|
||||
execnet
|
||||
commands = pytest -v -rsXx {posargs}
|
||||
"""
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import iniconfig
|
||||
@@ -38,21 +37,19 @@ class Config:
|
||||
self.filtermail_smtp_port_incoming = int(
|
||||
params.get("filtermail_smtp_port_incoming", "10081")
|
||||
)
|
||||
self.filtermail_http_port_incoming = int(
|
||||
params.get("filtermail_http_port_incoming", "10082")
|
||||
)
|
||||
self.filtermail_lmtp_port_transport = int(
|
||||
params.get("filtermail_lmtp_port_transport", "10083")
|
||||
)
|
||||
self.postfix_reinject_port = int(params.get("postfix_reinject_port", "10025"))
|
||||
self.postfix_reinject_port_incoming = int(
|
||||
params.get("postfix_reinject_port_incoming", "10026")
|
||||
)
|
||||
self.mtail_address = params.get("mtail_address")
|
||||
self.disable_ipv6 = params.get("disable_ipv6", "false").lower() == "true"
|
||||
self.addr_v4 = os.environ.get("CHATMAIL_ADDR_V4", "")
|
||||
self.addr_v6 = os.environ.get("CHATMAIL_ADDR_V6", "")
|
||||
self.acme_email = params.get("acme_email", "")
|
||||
self.change_kernel_settings = (
|
||||
params.get("change_kernel_settings", "true").lower() == "true"
|
||||
)
|
||||
self.fs_inotify_max_user_instances_and_watchers = int(
|
||||
params["fs_inotify_max_user_instances_and_watchers"]
|
||||
)
|
||||
self.imap_rawlog = params.get("imap_rawlog", "false").lower() == "true"
|
||||
self.imap_compress = params.get("imap_compress", "false").lower() == "true"
|
||||
if "iroh_relay" not in params:
|
||||
@@ -66,6 +63,31 @@ class Config:
|
||||
self.privacy_pdo = params.get("privacy_pdo")
|
||||
self.privacy_supervisor = params.get("privacy_supervisor")
|
||||
|
||||
# TLS certificate management.
|
||||
# If tls_external_cert_and_key is set, use externally managed certs.
|
||||
# Otherwise derived from the domain name:
|
||||
# - Domains starting with "_" use self-signed certificates
|
||||
# - All other domains use ACME.
|
||||
external = params.get("tls_external_cert_and_key", "").strip()
|
||||
|
||||
if external:
|
||||
parts = external.split()
|
||||
if len(parts) != 2:
|
||||
raise ValueError(
|
||||
"tls_external_cert_and_key must have two space-separated"
|
||||
" paths: CERT_PATH KEY_PATH"
|
||||
)
|
||||
self.tls_cert_mode = "external"
|
||||
self.tls_cert_path, self.tls_key_path = parts
|
||||
elif self.mail_domain.startswith("_"):
|
||||
self.tls_cert_mode = "self"
|
||||
self.tls_cert_path = "/etc/ssl/certs/mailserver.pem"
|
||||
self.tls_key_path = "/etc/ssl/private/mailserver.key"
|
||||
else:
|
||||
self.tls_cert_mode = "acme"
|
||||
self.tls_cert_path = f"/var/lib/acme/live/{self.mail_domain}/fullchain"
|
||||
self.tls_key_path = f"/var/lib/acme/live/{self.mail_domain}/privkey"
|
||||
|
||||
# deprecated option
|
||||
mbdir = params.get("mailboxes_dir", f"/home/vmail/mail/{self.mail_domain}")
|
||||
self.mailboxes_dir = Path(mbdir.strip())
|
||||
@@ -73,6 +95,11 @@ class Config:
|
||||
# old unused option (except for first migration from sqlite to maildir store)
|
||||
self.passdb_path = Path(params.get("passdb_path", "/home/vmail/passdb.sqlite"))
|
||||
|
||||
@property
|
||||
def max_mailbox_size_mb(self):
|
||||
"""Return max_mailbox_size as an integer in megabytes."""
|
||||
return parse_size_mb(self.max_mailbox_size)
|
||||
|
||||
def _getbytefile(self):
|
||||
return open(self._inipath, "rb")
|
||||
|
||||
@@ -86,6 +113,16 @@ class Config:
|
||||
return User(maildir, addr, password_path, uid="vmail", gid="vmail")
|
||||
|
||||
|
||||
def parse_size_mb(limit):
|
||||
"""Parse a size string like ``500M`` or ``2G`` and return megabytes."""
|
||||
value = limit.strip().upper().removesuffix("B")
|
||||
if value.endswith("G"):
|
||||
return int(value[:-1]) * 1024
|
||||
if value.endswith("M"):
|
||||
return int(value[:-1])
|
||||
return int(value)
|
||||
|
||||
|
||||
def write_initial_config(inipath, mail_domain, overrides):
|
||||
"""Write out default config file, using the specified config value overrides."""
|
||||
content = get_default_config_content(mail_domain, **overrides)
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
import filelock
|
||||
|
||||
try:
|
||||
import crypt_r
|
||||
except ImportError:
|
||||
@@ -13,6 +16,7 @@ from .dictproxy import DictProxy
|
||||
from .migrate_db import migrate_from_db_to_maildir
|
||||
|
||||
NOCREATE_FILE = "/etc/chatmail-nocreate"
|
||||
VALID_LOCALPART_RE = re.compile(r"^[a-z0-9._-]+$")
|
||||
|
||||
|
||||
def encrypt_password(password: str):
|
||||
@@ -52,6 +56,10 @@ def is_allowed_to_create(config: Config, user, cleartext_password) -> bool:
|
||||
)
|
||||
return False
|
||||
|
||||
if not VALID_LOCALPART_RE.match(localpart):
|
||||
logging.warning("localpart %r contains invalid characters", localpart)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
@@ -140,8 +148,13 @@ class AuthDictProxy(DictProxy):
|
||||
if not is_allowed_to_create(self.config, addr, cleartext_password):
|
||||
return
|
||||
|
||||
user.set_password(encrypt_password(cleartext_password))
|
||||
print(f"Created address: {addr}", file=sys.stderr)
|
||||
lock = filelock.FileLock(str(user.password_path) + ".lock", timeout=5)
|
||||
with lock:
|
||||
userdata = user.get_userdb_dict()
|
||||
if userdata:
|
||||
return userdata
|
||||
user.set_password(encrypt_password(cleartext_password))
|
||||
print(f"Created address: {addr}", file=sys.stderr)
|
||||
return user.get_userdb_dict()
|
||||
|
||||
|
||||
|
||||
@@ -4,17 +4,26 @@ Expire old messages and addresses.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
from argparse import ArgumentParser
|
||||
from collections import namedtuple
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from stat import S_ISREG
|
||||
|
||||
from chatmaild.config import read_config
|
||||
|
||||
FileEntry = namedtuple("FileEntry", ("path", "mtime", "size"))
|
||||
QuotaFileEntry = namedtuple("QuotaFileEntry", ("mtime", "quota_size", "path"))
|
||||
|
||||
# Quota cleanup factor of max_mailbox_size. The mailbox is reset to this size.
|
||||
QUOTA_CLEANUP_FACTOR = 0.7
|
||||
|
||||
# e.g. "cur/1775324677.M448978P3029757.exam,S=3235,W=3305:2,S"
|
||||
_dovecot_fn_rex = re.compile(r".+/(\d+)\..+,S=(\d+)")
|
||||
|
||||
|
||||
def iter_mailboxes(basedir, maxnum):
|
||||
@@ -74,6 +83,42 @@ class MailboxStat:
|
||||
self.extrafiles.sort(key=lambda x: -x.size)
|
||||
|
||||
|
||||
def parse_dovecot_filename(relpath):
|
||||
m = _dovecot_fn_rex.match(relpath)
|
||||
if not m:
|
||||
return None
|
||||
return QuotaFileEntry(int(m.group(1)), int(m.group(2)), relpath)
|
||||
|
||||
|
||||
def scan_mailbox_messages(mbox):
|
||||
messages = []
|
||||
for sub in ("cur", "new"):
|
||||
for name in os_listdir_if_exists(mbox / sub):
|
||||
if entry := parse_dovecot_filename(f"{sub}/{name}"):
|
||||
messages.append(entry)
|
||||
return messages
|
||||
|
||||
|
||||
def expire_to_target(mbox, target_bytes):
|
||||
messages = scan_mailbox_messages(mbox)
|
||||
total_size = sum(m.quota_size for m in messages)
|
||||
# Keep recent 24 hours of messages protected from expiry because
|
||||
# likely something is wrong with interactions on that address
|
||||
# and quota-full signal can help the address owner's device to notice it
|
||||
undeletable_messages_cutoff = time.time() - (3600 * 24)
|
||||
removed = 0
|
||||
for entry in sorted(messages):
|
||||
if total_size <= target_bytes:
|
||||
break
|
||||
if entry.mtime > undeletable_messages_cutoff:
|
||||
break
|
||||
(mbox / entry.path).unlink(missing_ok=True)
|
||||
total_size -= entry.quota_size
|
||||
removed += 1
|
||||
|
||||
return removed
|
||||
|
||||
|
||||
def print_info(msg):
|
||||
print(msg, file=sys.stderr)
|
||||
|
||||
@@ -143,6 +188,19 @@ class Expiry:
|
||||
else:
|
||||
continue
|
||||
changed = True
|
||||
|
||||
target_bytes = (
|
||||
self.config.max_mailbox_size_mb * 1024 * 1024 * QUOTA_CLEANUP_FACTOR
|
||||
)
|
||||
removed = expire_to_target(Path(mbox.basedir), target_bytes)
|
||||
if removed:
|
||||
changed = True
|
||||
self.del_files += removed
|
||||
if self.verbose:
|
||||
print_info(
|
||||
f"quota-expire: removed {removed} message(s) from {mboxname}"
|
||||
)
|
||||
|
||||
if changed:
|
||||
self.remove_file(f"{mbox.basedir}/maildirsize")
|
||||
|
||||
@@ -154,9 +212,9 @@ class Expiry:
|
||||
)
|
||||
|
||||
|
||||
def main(args=None):
|
||||
def daily_expire_main(args=None):
|
||||
"""Expire mailboxes and messages according to chatmail config"""
|
||||
parser = ArgumentParser(description=main.__doc__)
|
||||
parser = ArgumentParser(description=daily_expire_main.__doc__)
|
||||
ini = "/usr/local/lib/chatmaild/chatmail.ini"
|
||||
parser.add_argument(
|
||||
"chatmail_ini",
|
||||
@@ -202,5 +260,33 @@ def main(args=None):
|
||||
print(exp.get_summary())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(sys.argv[1:])
|
||||
def quota_expire_main(args=None):
|
||||
"""Remove mailbox messages to stay within a megabyte target.
|
||||
|
||||
This entry point is called by dovecot when a quota threshold is passed.
|
||||
"""
|
||||
|
||||
parser = ArgumentParser(description=quota_expire_main.__doc__)
|
||||
parser.add_argument(
|
||||
"target_mb",
|
||||
type=int,
|
||||
help="target mailbox size in megabytes",
|
||||
)
|
||||
parser.add_argument(
|
||||
"mailbox_path",
|
||||
type=Path,
|
||||
help="path to a user mailbox",
|
||||
)
|
||||
args = parser.parse_args(args)
|
||||
|
||||
target_bytes = args.target_mb * 1024 * 1024
|
||||
|
||||
removed_count = expire_to_target(args.mailbox_path, target_bytes)
|
||||
if removed_count:
|
||||
(args.mailbox_path / "maildirsize").unlink(missing_ok=True)
|
||||
print(
|
||||
f"quota-expire: removed {removed_count} message(s)"
|
||||
f" from {args.mailbox_path.name}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
return 0
|
||||
|
||||
@@ -13,9 +13,20 @@ to show storage summaries only for first 1000 mailboxes
|
||||
|
||||
python -m chatmaild.fsreport /path/to/chatmail.ini --maxnum 1000
|
||||
|
||||
to write Prometheus textfile for node_exporter
|
||||
|
||||
python -m chatmaild.fsreport --textfile /var/lib/prometheus/node-exporter/
|
||||
|
||||
writes to /var/lib/prometheus/node-exporter/fsreport.prom
|
||||
|
||||
to also write legacy metrics.py style output (default: /var/www/html/metrics):
|
||||
|
||||
python -m chatmaild.fsreport --textfile /var/lib/prometheus/node-exporter/ --legacy-metrics
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
from argparse import ArgumentParser
|
||||
from datetime import datetime
|
||||
|
||||
@@ -48,7 +59,19 @@ class Report:
|
||||
self.num_ci_logins = self.num_all_logins = 0
|
||||
self.login_buckets = {x: 0 for x in (1, 10, 30, 40, 80, 100, 150)}
|
||||
|
||||
self.message_buckets = {x: 0 for x in (0, 160000, 500000, 2000000)}
|
||||
KiB = 1024
|
||||
MiB = 1024 * KiB
|
||||
self.message_size_thresholds = (
|
||||
0,
|
||||
100 * KiB,
|
||||
MiB // 2,
|
||||
1 * MiB,
|
||||
2 * MiB,
|
||||
5 * MiB,
|
||||
10 * MiB,
|
||||
)
|
||||
self.message_buckets = {x: 0 for x in self.message_size_thresholds}
|
||||
self.message_count_buckets = {x: 0 for x in self.message_size_thresholds}
|
||||
|
||||
def process_mailbox_stat(self, mailbox):
|
||||
# categorize login times
|
||||
@@ -68,9 +91,10 @@ class Report:
|
||||
for size in self.message_buckets:
|
||||
for msg in mailbox.messages:
|
||||
if msg.size >= size:
|
||||
if self.mdir and not msg.relpath.startswith(self.mdir):
|
||||
if self.mdir and f"/{self.mdir}/" not in msg.path:
|
||||
continue
|
||||
self.message_buckets[size] += msg.size
|
||||
self.message_count_buckets[size] += 1
|
||||
|
||||
self.size_messages += sum(entry.size for entry in mailbox.messages)
|
||||
self.size_extra += sum(entry.size for entry in mailbox.extrafiles)
|
||||
@@ -93,9 +117,10 @@ class Report:
|
||||
|
||||
pref = f"[{self.mdir}] " if self.mdir else ""
|
||||
for minsize, sumsize in self.message_buckets.items():
|
||||
count = self.message_count_buckets[minsize]
|
||||
percent = (sumsize / all_messages * 100) if all_messages else 0
|
||||
print(
|
||||
f"{pref}larger than {HSize(minsize)}: {HSize(sumsize)} ({percent:.2f}%)"
|
||||
f"{pref}larger than {HSize(minsize)}: {HSize(sumsize)} ({percent:.2f}%), {count} msgs"
|
||||
)
|
||||
|
||||
user_logins = self.num_all_logins - self.num_ci_logins
|
||||
@@ -111,6 +136,75 @@ class Report:
|
||||
for days, active in self.login_buckets.items():
|
||||
print(f"last {days:3} days: {HSize(active)} {p(active)}")
|
||||
|
||||
def _write_atomic(self, filepath, content):
|
||||
"""Atomically write content to filepath via tmp+rename."""
|
||||
dirpath = os.path.dirname(os.path.abspath(filepath))
|
||||
fd, tmppath = tempfile.mkstemp(dir=dirpath, suffix=".tmp")
|
||||
try:
|
||||
with os.fdopen(fd, "w") as f:
|
||||
f.write(content)
|
||||
os.chmod(tmppath, 0o644)
|
||||
os.rename(tmppath, filepath)
|
||||
except BaseException:
|
||||
try:
|
||||
os.unlink(tmppath)
|
||||
except OSError:
|
||||
pass
|
||||
raise
|
||||
|
||||
def dump_textfile(self, filepath):
|
||||
"""Dump metrics in Prometheus exposition format."""
|
||||
lines = []
|
||||
|
||||
lines.append("# HELP chatmail_storage_bytes Mailbox storage in bytes.")
|
||||
lines.append("# TYPE chatmail_storage_bytes gauge")
|
||||
lines.append(f'chatmail_storage_bytes{{kind="messages"}} {self.size_messages}')
|
||||
lines.append(f'chatmail_storage_bytes{{kind="extra"}} {self.size_extra}')
|
||||
total = self.size_extra + self.size_messages
|
||||
lines.append(f'chatmail_storage_bytes{{kind="total"}} {total}')
|
||||
|
||||
lines.append("# HELP chatmail_messages_bytes Sum of msg bytes >= threshold.")
|
||||
lines.append("# TYPE chatmail_messages_bytes gauge")
|
||||
for minsize, sumsize in self.message_buckets.items():
|
||||
lines.append(f'chatmail_messages_bytes{{min_size="{minsize}"}} {sumsize}')
|
||||
|
||||
lines.append("# HELP chatmail_messages_count Number of msgs >= size threshold.")
|
||||
lines.append("# TYPE chatmail_messages_count gauge")
|
||||
for minsize, count in self.message_count_buckets.items():
|
||||
lines.append(f'chatmail_messages_count{{min_size="{minsize}"}} {count}')
|
||||
|
||||
lines.append("# HELP chatmail_accounts Number of accounts.")
|
||||
lines.append("# TYPE chatmail_accounts gauge")
|
||||
user_logins = self.num_all_logins - self.num_ci_logins
|
||||
lines.append(f'chatmail_accounts{{kind="all"}} {self.num_all_logins}')
|
||||
lines.append(f'chatmail_accounts{{kind="ci"}} {self.num_ci_logins}')
|
||||
lines.append(f'chatmail_accounts{{kind="user"}} {user_logins}')
|
||||
|
||||
lines.append(
|
||||
"# HELP chatmail_accounts_active Non-CI accounts active within N days."
|
||||
)
|
||||
lines.append("# TYPE chatmail_accounts_active gauge")
|
||||
for days, active in self.login_buckets.items():
|
||||
lines.append(f'chatmail_accounts_active{{days="{days}"}} {active}')
|
||||
|
||||
self._write_atomic(filepath, "\n".join(lines) + "\n")
|
||||
|
||||
def dump_compat_textfile(self, filepath):
|
||||
"""Dump legacy metrics.py style metrics."""
|
||||
user_logins = self.num_all_logins - self.num_ci_logins
|
||||
lines = [
|
||||
"# HELP total number of accounts",
|
||||
"# TYPE accounts gauge",
|
||||
f"accounts {self.num_all_logins}",
|
||||
"# HELP number of CI accounts",
|
||||
"# TYPE ci_accounts gauge",
|
||||
f"ci_accounts {self.num_ci_logins}",
|
||||
"# HELP number of non-CI accounts",
|
||||
"# TYPE nonci_accounts gauge",
|
||||
f"nonci_accounts {user_logins}",
|
||||
]
|
||||
self._write_atomic(filepath, "\n".join(lines) + "\n")
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""Report about filesystem storage usage of all mailboxes and messages"""
|
||||
@@ -127,19 +221,21 @@ def main(args=None):
|
||||
"--days",
|
||||
default=0,
|
||||
action="store",
|
||||
help="assume date to be days older than now",
|
||||
help="assume date to be DAYS older than now",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--min-login-age",
|
||||
default=0,
|
||||
metavar="DAYS",
|
||||
dest="min_login_age",
|
||||
action="store",
|
||||
help="only sum up message size if last login is at least min-login-age days old",
|
||||
help="only sum up message size if last login is at least DAYS days old",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--mdir",
|
||||
metavar="{cur,new,tmp}",
|
||||
action="store",
|
||||
help="only consider 'cur' or 'new' or 'tmp' messages for summary",
|
||||
help="only consider messages in specified Maildir subdirectory for summary",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
@@ -148,6 +244,21 @@ def main(args=None):
|
||||
action="store",
|
||||
help="maximum number of mailboxes to iterate on",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--textfile",
|
||||
metavar="PATH",
|
||||
default=None,
|
||||
help="write Prometheus textfile to PATH (directory or file); "
|
||||
"if PATH is a directory, writes 'fsreport.prom' inside it",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--legacy-metrics",
|
||||
metavar="FILENAME",
|
||||
nargs="?",
|
||||
const="/var/www/html/metrics",
|
||||
default=None,
|
||||
help="write legacy metrics.py textfile (default: /var/www/html/metrics)",
|
||||
)
|
||||
|
||||
args = parser.parse_args(args)
|
||||
|
||||
@@ -161,7 +272,15 @@ def main(args=None):
|
||||
rep = Report(now=now, min_login_age=int(args.min_login_age), mdir=args.mdir)
|
||||
for mbox in iter_mailboxes(str(config.mailboxes_dir), maxnum=maxnum):
|
||||
rep.process_mailbox_stat(mbox)
|
||||
rep.dump_summary()
|
||||
if args.textfile:
|
||||
path = args.textfile
|
||||
if os.path.isdir(path):
|
||||
path = os.path.join(path, "fsreport.prom")
|
||||
rep.dump_textfile(path)
|
||||
if args.legacy_metrics:
|
||||
rep.dump_compat_textfile(args.legacy_metrics)
|
||||
if not args.textfile and not args.legacy_metrics:
|
||||
rep.dump_summary()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -18,6 +18,7 @@ max_user_send_per_minute = 60
|
||||
max_user_send_burst_size = 10
|
||||
|
||||
# maximum mailbox size of a chatmail address
|
||||
# Oldest messages will be removed automatically, so mailboxes never run full.
|
||||
max_mailbox_size = 500M
|
||||
|
||||
# maximum message size for an e-mail in bytes
|
||||
@@ -48,6 +49,13 @@ passthrough_senders =
|
||||
# (space-separated, item may start with "@" to whitelist whole recipient domains)
|
||||
passthrough_recipients =
|
||||
|
||||
# Use externally managed TLS certificates instead of built-in acmetool.
|
||||
# Paths refer to files on the deployment server (not the build machine).
|
||||
# Both files must already exist before running cmdeploy.
|
||||
# Certificate renewal is your responsibility; changed files are
|
||||
# picked up automatically by all relay services.
|
||||
# tls_external_cert_and_key = /path/to/fullchain.pem /path/to/privkey.pem
|
||||
|
||||
# path to www directory - documented here: https://chatmail.at/doc/relay/getting_started.html#custom-web-pages
|
||||
#www_folder = www
|
||||
|
||||
@@ -69,16 +77,6 @@ disable_ipv6 = False
|
||||
# Your email adress, which will be used in acmetool to manage Let's Encrypt SSL certificates
|
||||
acme_email =
|
||||
|
||||
#
|
||||
# Kernel settings
|
||||
#
|
||||
|
||||
# if you set "True", the kernel settings will be configured according to the values below
|
||||
change_kernel_settings = True
|
||||
|
||||
# change fs.inotify.max_user_instances and fs.inotify.max_user_watches kernel settings
|
||||
fs_inotify_max_user_instances_and_watchers = 65535
|
||||
|
||||
# Defaults to https://iroh.{{mail_domain}} and running `iroh-relay` on the chatmail
|
||||
# service.
|
||||
# If you set it to anything else, the service will be disabled
|
||||
|
||||
@@ -70,6 +70,9 @@ class Metadata:
|
||||
# Some tokens have expired, remove them.
|
||||
with self._modify_tokens(addr) as _tokens:
|
||||
pass
|
||||
elif isinstance(tokens, list):
|
||||
with self._modify_tokens(addr) as tokens:
|
||||
token_list = list(tokens.keys())
|
||||
else:
|
||||
token_list = []
|
||||
return token_list
|
||||
@@ -101,7 +104,11 @@ class MetadataDictProxy(DictProxy):
|
||||
# Handle `GETMETADATA "" /shared/vendor/deltachat/irohrelay`
|
||||
return f"O{self.iroh_relay}\n"
|
||||
elif keyname == "vendor/vendor.dovecot/pvt/server/vendor/deltachat/turn":
|
||||
res = turn_credentials()
|
||||
try:
|
||||
res = turn_credentials()
|
||||
except Exception:
|
||||
logging.exception("failed to get TURN credentials")
|
||||
return "N\n"
|
||||
port = 3478
|
||||
return f"O{self.turn_hostname}:{port}:{res}\n"
|
||||
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def main(vmail_dir=None):
|
||||
if vmail_dir is None:
|
||||
vmail_dir = sys.argv[1]
|
||||
|
||||
accounts = 0
|
||||
ci_accounts = 0
|
||||
|
||||
for path in Path(vmail_dir).iterdir():
|
||||
if not path.joinpath("cur").is_dir():
|
||||
continue
|
||||
accounts += 1
|
||||
if path.name[:3] in ("ci-", "ac_"):
|
||||
ci_accounts += 1
|
||||
|
||||
print("# HELP total number of accounts")
|
||||
print("# TYPE accounts gauge")
|
||||
print(f"accounts {accounts}")
|
||||
print("# HELP number of CI accounts")
|
||||
print("# TYPE ci_accounts gauge")
|
||||
print(f"ci_accounts {ci_accounts}")
|
||||
print("# HELP number of non-CI accounts")
|
||||
print("# TYPE nonci_accounts gauge")
|
||||
print(f"nonci_accounts {accounts - ci_accounts}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -2,10 +2,11 @@
|
||||
|
||||
"""CGI script for creating new accounts."""
|
||||
|
||||
import ipaddress
|
||||
import json
|
||||
import random
|
||||
import secrets
|
||||
import string
|
||||
from urllib.parse import quote
|
||||
|
||||
from chatmaild.config import Config, read_config
|
||||
|
||||
@@ -14,22 +15,47 @@ ALPHANUMERIC = string.ascii_lowercase + string.digits
|
||||
ALPHANUMERIC_PUNCT = string.ascii_letters + string.digits + string.punctuation
|
||||
|
||||
|
||||
def wrap_ip(host):
|
||||
if host.startswith("[") and host.endswith("]"):
|
||||
return host
|
||||
try:
|
||||
ipaddress.ip_address(host)
|
||||
return f"[{host}]"
|
||||
except ValueError:
|
||||
return host
|
||||
|
||||
|
||||
def create_newemail_dict(config: Config):
|
||||
user = "".join(random.choices(ALPHANUMERIC, k=config.username_max_length))
|
||||
user = "".join(
|
||||
secrets.choice(ALPHANUMERIC) for _ in range(config.username_max_length)
|
||||
)
|
||||
password = "".join(
|
||||
secrets.choice(ALPHANUMERIC_PUNCT)
|
||||
for _ in range(config.password_min_length + 3)
|
||||
)
|
||||
return dict(email=f"{user}@{config.mail_domain}", password=f"{password}")
|
||||
return dict(email=f"{user}@{wrap_ip(config.mail_domain)}", password=f"{password}")
|
||||
|
||||
|
||||
def create_dclogin_url(email, password):
|
||||
"""Build a dclogin: URL with credentials and self-signed cert acceptance.
|
||||
|
||||
Uses ic=3 (AcceptInvalidCertificates) so chatmail clients
|
||||
can connect to servers with self-signed TLS certificates.
|
||||
"""
|
||||
return f"dclogin:{quote(email, safe='@')}?p={quote(password, safe='')}&v=1&ic=3"
|
||||
|
||||
|
||||
def print_new_account():
|
||||
config = read_config(CONFIG_PATH)
|
||||
creds = create_newemail_dict(config)
|
||||
|
||||
result = dict(email=creds["email"], password=creds["password"])
|
||||
if config.tls_cert_mode == "self":
|
||||
result["dclogin_url"] = create_dclogin_url(creds["email"], creds["password"])
|
||||
|
||||
print("Content-Type: application/json")
|
||||
print("")
|
||||
print(json.dumps(creds))
|
||||
print(json.dumps(result))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import pytest
|
||||
|
||||
from chatmaild.config import read_config
|
||||
from chatmaild.config import parse_size_mb, read_config
|
||||
|
||||
|
||||
def test_read_config_basic(example_config):
|
||||
@@ -73,3 +73,65 @@ def test_config_userstate_paths(make_config, tmp_path):
|
||||
def test_config_max_message_size(make_config, tmp_path):
|
||||
config = make_config("something.testrun.org", dict(max_message_size="10000"))
|
||||
assert config.max_message_size == 10000
|
||||
|
||||
|
||||
def test_config_tls_default_acme(make_config):
|
||||
config = make_config("chat.example.org")
|
||||
assert config.tls_cert_mode == "acme"
|
||||
assert config.tls_cert_path == "/var/lib/acme/live/chat.example.org/fullchain"
|
||||
assert config.tls_key_path == "/var/lib/acme/live/chat.example.org/privkey"
|
||||
|
||||
|
||||
def test_config_tls_self(make_config):
|
||||
config = make_config("_test.example.org")
|
||||
assert config.tls_cert_mode == "self"
|
||||
assert config.tls_cert_path == "/etc/ssl/certs/mailserver.pem"
|
||||
assert config.tls_key_path == "/etc/ssl/private/mailserver.key"
|
||||
|
||||
|
||||
def test_config_tls_external(make_config):
|
||||
config = make_config(
|
||||
"chat.example.org",
|
||||
{
|
||||
"tls_external_cert_and_key": "/custom/fullchain.pem /custom/privkey.pem",
|
||||
},
|
||||
)
|
||||
assert config.tls_cert_mode == "external"
|
||||
assert config.tls_cert_path == "/custom/fullchain.pem"
|
||||
assert config.tls_key_path == "/custom/privkey.pem"
|
||||
|
||||
|
||||
def test_config_tls_external_overrides_underscore(make_config):
|
||||
config = make_config(
|
||||
"_test.example.org",
|
||||
{
|
||||
"tls_external_cert_and_key": "/certs/fullchain.pem /certs/privkey.pem",
|
||||
},
|
||||
)
|
||||
assert config.tls_cert_mode == "external"
|
||||
assert config.tls_cert_path == "/certs/fullchain.pem"
|
||||
assert config.tls_key_path == "/certs/privkey.pem"
|
||||
|
||||
|
||||
def test_config_tls_external_bad_format(make_config):
|
||||
with pytest.raises(ValueError, match="two space-separated"):
|
||||
make_config(
|
||||
"chat.example.org",
|
||||
{
|
||||
"tls_external_cert_and_key": "/only/one/path.pem",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def test_parse_size_mb():
|
||||
assert parse_size_mb("500M") == 500
|
||||
assert parse_size_mb("2G") == 2048
|
||||
assert parse_size_mb(" 1g ") == 1024
|
||||
assert parse_size_mb("100MB") == 100
|
||||
assert parse_size_mb("256") == 256
|
||||
|
||||
|
||||
def test_max_mailbox_size_mb(make_config):
|
||||
config = make_config("chat.example.org")
|
||||
assert config.max_mailbox_size == "500M"
|
||||
assert config.max_mailbox_size_mb == 500
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import time
|
||||
|
||||
from chatmaild.doveauth import AuthDictProxy
|
||||
from chatmaild.expire import main as main_expire
|
||||
from chatmaild.expire import daily_expire_main as main_expire
|
||||
|
||||
|
||||
def test_login_timestamps(example_config):
|
||||
|
||||
@@ -120,6 +120,60 @@ def test_handle_dovecot_protocol_iterate(gencreds, example_config):
|
||||
assert not lines[2]
|
||||
|
||||
|
||||
def test_invalid_localpart_characters(make_config):
|
||||
"""Test that is_allowed_to_create rejects localparts with invalid characters."""
|
||||
config = make_config("chat.example.org", {"username_min_length": "3"})
|
||||
password = "zequ0Aimuchoodaechik"
|
||||
domain = config.mail_domain
|
||||
|
||||
# valid localparts
|
||||
assert is_allowed_to_create(config, f"abc123@{domain}", password)
|
||||
assert is_allowed_to_create(config, f"a.b-c_d@{domain}", password)
|
||||
|
||||
# uppercase rejected
|
||||
assert not is_allowed_to_create(config, f"Abc123@{domain}", password)
|
||||
assert not is_allowed_to_create(config, f"ABCDEFG@{domain}", password)
|
||||
|
||||
# spaces and special chars rejected
|
||||
assert not is_allowed_to_create(config, f"a b cde@{domain}", password)
|
||||
assert not is_allowed_to_create(config, f"abc+def@{domain}", password)
|
||||
assert not is_allowed_to_create(config, f"abc!def@{domain}", password)
|
||||
assert not is_allowed_to_create(config, f"ab@cdef@{domain}", password)
|
||||
assert not is_allowed_to_create(config, f"abc/def@{domain}", password)
|
||||
assert not is_allowed_to_create(config, f"abc\\def@{domain}", password)
|
||||
|
||||
|
||||
def test_concurrent_creation_same_account(dictproxy):
|
||||
"""Test that concurrent creation of the same account doesn't corrupt password."""
|
||||
addr = "racetest1@chat.example.org"
|
||||
password = "zequ0Aimuchoodaechik"
|
||||
num_threads = 10
|
||||
results = queue.Queue()
|
||||
|
||||
def create():
|
||||
try:
|
||||
res = dictproxy.lookup_passdb(addr, password)
|
||||
results.put(("ok", res))
|
||||
except Exception:
|
||||
results.put(("err", traceback.format_exc()))
|
||||
|
||||
threads = [threading.Thread(target=create, daemon=True) for _ in range(num_threads)]
|
||||
for t in threads:
|
||||
t.start()
|
||||
for t in threads:
|
||||
t.join(timeout=10)
|
||||
|
||||
passwords_seen = set()
|
||||
for _ in range(num_threads):
|
||||
status, res = results.get()
|
||||
if status == "err":
|
||||
pytest.fail(f"concurrent creation failed\n{res}")
|
||||
passwords_seen.add(res["password"])
|
||||
|
||||
# all threads must see the same password hash
|
||||
assert len(passwords_seen) == 1
|
||||
|
||||
|
||||
def test_50_concurrent_lookups_different_accounts(gencreds, dictproxy):
|
||||
num_threads = 50
|
||||
req_per_thread = 5
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
import itertools
|
||||
import os
|
||||
import random
|
||||
import time
|
||||
from datetime import datetime
|
||||
from fnmatch import fnmatch
|
||||
from pathlib import Path
|
||||
@@ -9,13 +11,19 @@ import pytest
|
||||
from chatmaild.expire import (
|
||||
FileEntry,
|
||||
MailboxStat,
|
||||
expire_to_target,
|
||||
get_file_entry,
|
||||
iter_mailboxes,
|
||||
os_listdir_if_exists,
|
||||
parse_dovecot_filename,
|
||||
quota_expire_main,
|
||||
scan_mailbox_messages,
|
||||
)
|
||||
from chatmaild.expire import main as expiry_main
|
||||
from chatmaild.expire import daily_expire_main as expiry_main
|
||||
from chatmaild.fsreport import main as report_main
|
||||
|
||||
MB = 1024 * 1024
|
||||
|
||||
|
||||
def fill_mbox(folderdir):
|
||||
password = folderdir.joinpath("password")
|
||||
@@ -112,6 +120,43 @@ def test_report(mbox1, example_config):
|
||||
report_main(args)
|
||||
|
||||
|
||||
def test_report_mdir_filters_by_path(mbox1, example_config):
|
||||
"""Test that Report with mdir='cur' only counts messages in cur/ subdirectory."""
|
||||
from chatmaild.fsreport import Report
|
||||
|
||||
now = datetime.utcnow().timestamp()
|
||||
|
||||
# Set password mtime to old enough so min_login_age check passes
|
||||
password = Path(mbox1.basedir).joinpath("password")
|
||||
old_time = now - 86400 * 10 # 10 days ago
|
||||
os.utime(password, (old_time, old_time))
|
||||
|
||||
# Reload mailbox with updated mtime
|
||||
from chatmaild.expire import MailboxStat
|
||||
|
||||
mbox = MailboxStat(mbox1.basedir)
|
||||
|
||||
# Report without mdir — should count all messages
|
||||
rep_all = Report(now=now, min_login_age=1, mdir=None)
|
||||
rep_all.process_mailbox_stat(mbox)
|
||||
total_all = rep_all.message_buckets[0]
|
||||
|
||||
# Report with mdir='cur' — should only count cur/ messages
|
||||
rep_cur = Report(now=now, min_login_age=1, mdir="cur")
|
||||
rep_cur.process_mailbox_stat(mbox)
|
||||
total_cur = rep_cur.message_buckets[0]
|
||||
|
||||
# Report with mdir='new' — should only count new/ messages
|
||||
rep_new = Report(now=now, min_login_age=1, mdir="new")
|
||||
rep_new.process_mailbox_stat(mbox)
|
||||
total_new = rep_new.message_buckets[0]
|
||||
|
||||
# cur has 500-byte msg, new has 600-byte msg (from fill_mbox)
|
||||
assert total_cur == 500
|
||||
assert total_new == 600
|
||||
assert total_all == 500 + 600
|
||||
|
||||
|
||||
def test_expiry_cli_basic(example_config, mbox1):
|
||||
args = (str(example_config._inipath),)
|
||||
expiry_main(args)
|
||||
@@ -159,3 +204,51 @@ def test_os_listdir_if_exists(tmp_path):
|
||||
tmp_path.joinpath("x").write_text("hello")
|
||||
assert len(os_listdir_if_exists(str(tmp_path))) == 1
|
||||
assert len(os_listdir_if_exists(str(tmp_path.joinpath("123123")))) == 0
|
||||
|
||||
|
||||
# --- quota expire tests ---
|
||||
|
||||
_msg_counter = itertools.count(1)
|
||||
|
||||
|
||||
def _create_message(basedir, sub, size, days_old=0, disk_size=None):
|
||||
seq = next(_msg_counter)
|
||||
mtime = int(time.time() - days_old * 86400)
|
||||
name = f"{mtime}.M1P1Q{seq}.hostname,S={size},W={size}:2,S"
|
||||
path = basedir / sub / name
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
path.write_bytes(b"x" * (disk_size if disk_size is not None else size))
|
||||
os.utime(path, (mtime, mtime))
|
||||
return path
|
||||
|
||||
|
||||
def test_parse_dovecot_filename():
|
||||
e = parse_dovecot_filename("cur/1775324677.M448978P3029757.exam,S=3235,W=3305:2,S")
|
||||
assert e.path == "cur/1775324677.M448978P3029757.exam,S=3235,W=3305:2,S"
|
||||
assert e.mtime == 1775324677
|
||||
assert e.quota_size == 3235
|
||||
assert parse_dovecot_filename("cur/msg_without_structure") is None
|
||||
|
||||
|
||||
def test_expire_to_target(tmp_path):
|
||||
_create_message(tmp_path, "cur", MB, days_old=10, disk_size=100)
|
||||
_create_message(tmp_path, "new", MB, days_old=5)
|
||||
_create_message(tmp_path, "cur", MB, days_old=0) # undeletable (<1 hour)
|
||||
assert len(scan_mailbox_messages(tmp_path)) == 3
|
||||
# removes oldest first, uses S= size not disk size
|
||||
removed = expire_to_target(tmp_path, MB)
|
||||
assert removed == 2
|
||||
msgs = scan_mailbox_messages(tmp_path)
|
||||
assert len(msgs) == 1
|
||||
# the surviving message is the fresh undeletable one
|
||||
assert msgs[0].mtime > time.time() - 3600
|
||||
|
||||
|
||||
def test_quota_expire_main(tmp_path, capsys):
|
||||
mbox = tmp_path / "user@example.org"
|
||||
_create_message(mbox, "cur", 2 * MB, days_old=5)
|
||||
(mbox / "maildirsize").write_text("x")
|
||||
quota_expire_main([str(1), str(mbox)])
|
||||
_, err = capsys.readouterr()
|
||||
assert "quota-expire: removed 1 message(s) from user@example.org" in err
|
||||
assert not (mbox / "maildirsize").exists()
|
||||
|
||||
@@ -1,9 +1,15 @@
|
||||
import shutil
|
||||
import smtplib
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
|
||||
pytestmark = pytest.mark.skipif(
|
||||
shutil.which("filtermail") is None,
|
||||
reason="filtermail binary not found",
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def smtpserver():
|
||||
@@ -41,6 +47,8 @@ def test_one_mail(
|
||||
make_config, make_popen, smtpserver, maildata, filtermail_mode, monkeypatch
|
||||
):
|
||||
monkeypatch.setenv("PYTHONUNBUFFERED", "1")
|
||||
# DKIM is tested by cmdeploy tests.
|
||||
monkeypatch.setenv("FILTERMAIL_SKIP_DKIM", "1")
|
||||
smtp_inject_port = 20025
|
||||
if filtermail_mode == "outgoing":
|
||||
settings = dict(
|
||||
@@ -58,6 +66,10 @@ def test_one_mail(
|
||||
|
||||
popen = make_popen(["filtermail", path, filtermail_mode])
|
||||
line = popen.stderr.readline().strip()
|
||||
|
||||
# skip a warning that FILTERMAIL_SKIP_DKIM shouldn't be used in prod
|
||||
if b"DKIM verification DISABLED!" in line:
|
||||
line = popen.stderr.readline().strip()
|
||||
if b"loop" not in line:
|
||||
print(line.decode("ascii"), file=sys.stderr)
|
||||
pytest.fail("starting filtermail failed")
|
||||
|
||||
@@ -314,6 +314,51 @@ def test_persistent_queue_items(tmp_path, testaddr, token):
|
||||
assert not queue_item < item2 and not item2 < queue_item
|
||||
|
||||
|
||||
def test_turn_credentials_exception_returns_N(notifier, metadata, monkeypatch):
|
||||
"""Test that turn_credentials() failure returns N\\n instead of crashing."""
|
||||
import chatmaild.metadata
|
||||
|
||||
dictproxy = MetadataDictProxy(
|
||||
notifier=notifier,
|
||||
metadata=metadata,
|
||||
turn_hostname="turn.example.org",
|
||||
)
|
||||
|
||||
def mock_turn_credentials():
|
||||
raise ConnectionRefusedError("socket not available")
|
||||
|
||||
monkeypatch.setattr(chatmaild.metadata, "turn_credentials", mock_turn_credentials)
|
||||
|
||||
transactions = {}
|
||||
res = dictproxy.handle_dovecot_request(
|
||||
"Lshared/0123/vendor/vendor.dovecot/pvt/server/vendor/deltachat/turn"
|
||||
"\tuser@example.org",
|
||||
transactions,
|
||||
)
|
||||
assert res == "N\n"
|
||||
|
||||
|
||||
def test_turn_credentials_success(notifier, metadata, monkeypatch):
|
||||
"""Test that valid turn_credentials() returns TURN URI."""
|
||||
import chatmaild.metadata
|
||||
|
||||
dictproxy = MetadataDictProxy(
|
||||
notifier=notifier,
|
||||
metadata=metadata,
|
||||
turn_hostname="turn.example.org",
|
||||
)
|
||||
|
||||
monkeypatch.setattr(chatmaild.metadata, "turn_credentials", lambda: "user:pass")
|
||||
|
||||
transactions = {}
|
||||
res = dictproxy.handle_dovecot_request(
|
||||
"Lshared/0123/vendor/vendor.dovecot/pvt/server/vendor/deltachat/turn"
|
||||
"\tuser@example.org",
|
||||
transactions,
|
||||
)
|
||||
assert res == "Oturn.example.org:3478:user:pass\n"
|
||||
|
||||
|
||||
def test_iroh_relay(dictproxy):
|
||||
rfile = io.BytesIO(
|
||||
b"\n".join(
|
||||
@@ -327,3 +372,14 @@ def test_iroh_relay(dictproxy):
|
||||
dictproxy.iroh_relay = "https://example.org/"
|
||||
dictproxy.loop_forever(rfile, wfile)
|
||||
assert wfile.getvalue() == b"Ohttps://example.org/\n"
|
||||
|
||||
|
||||
def test_legacy_token_migration(metadata, testaddr):
|
||||
with metadata.get_metadata_dict(testaddr).modify() as data:
|
||||
data[metadata.DEVICETOKEN_KEY] = ["oldtoken1", "oldtoken2"]
|
||||
|
||||
assert metadata.get_tokens_for_addr(testaddr) == ["oldtoken1", "oldtoken2"]
|
||||
mdict = metadata.get_metadata_dict(testaddr).read()
|
||||
tokens = mdict[metadata.DEVICETOKEN_KEY]
|
||||
assert isinstance(tokens, dict)
|
||||
assert "oldtoken1" in tokens and "oldtoken2" in tokens
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
from chatmaild.metrics import main
|
||||
|
||||
|
||||
def test_main(tmp_path, capsys):
|
||||
paths = []
|
||||
for x in ("ci-asllkj", "ac_12l3kj", "qweqwe", "ci-l1k2j31l2k3"):
|
||||
p = tmp_path.joinpath(x)
|
||||
p.mkdir()
|
||||
p.joinpath("cur").mkdir()
|
||||
paths.append(p)
|
||||
|
||||
tmp_path.joinpath("nomailbox").mkdir()
|
||||
|
||||
main(tmp_path)
|
||||
out, _ = capsys.readouterr()
|
||||
d = {}
|
||||
for line in out.split("\n"):
|
||||
if line.strip() and not line.startswith("#"):
|
||||
name, num = line.split()
|
||||
d[name] = int(num)
|
||||
|
||||
assert d["accounts"] == 4
|
||||
assert d["ci_accounts"] == 3
|
||||
assert d["nonci_accounts"] == 1
|
||||
@@ -48,6 +48,8 @@ def test_migration(tmp_path, example_config, caplog):
|
||||
assert passdb_path.stat().st_size > 10000
|
||||
|
||||
example_config.passdb_path = passdb_path
|
||||
# ensure logging.info records are captured regardless of global configuration
|
||||
caplog.set_level("INFO")
|
||||
|
||||
assert not caplog.records
|
||||
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
import json
|
||||
|
||||
import chatmaild
|
||||
from chatmaild.newemail import create_newemail_dict, print_new_account
|
||||
from chatmaild.newemail import (
|
||||
create_dclogin_url,
|
||||
create_newemail_dict,
|
||||
print_new_account,
|
||||
)
|
||||
|
||||
|
||||
def test_create_newemail_dict(example_config):
|
||||
@@ -15,6 +19,24 @@ def test_create_newemail_dict(example_config):
|
||||
assert ac1["password"] != ac2["password"]
|
||||
|
||||
|
||||
def test_create_newemail_dict_ip(make_config):
|
||||
config = make_config("1.2.3.4")
|
||||
ac = create_newemail_dict(config)
|
||||
assert ac["email"].endswith("@[1.2.3.4]")
|
||||
|
||||
|
||||
def test_create_dclogin_url():
|
||||
url = create_dclogin_url("user@example.org", "p@ss w+rd")
|
||||
assert url.startswith("dclogin:")
|
||||
assert "v=1" in url
|
||||
assert "ic=3" in url
|
||||
|
||||
assert "user@example.org" in url
|
||||
# password special chars must be encoded
|
||||
assert "p%40ss" in url
|
||||
assert "w%2Brd" in url
|
||||
|
||||
|
||||
def test_print_new_account(capsys, monkeypatch, maildomain, tmpdir, example_config):
|
||||
monkeypatch.setattr(chatmaild.newemail, "CONFIG_PATH", str(example_config._inipath))
|
||||
print_new_account()
|
||||
@@ -25,3 +47,20 @@ def test_print_new_account(capsys, monkeypatch, maildomain, tmpdir, example_conf
|
||||
dic = json.loads(lines[2])
|
||||
assert dic["email"].endswith(f"@{example_config.mail_domain}")
|
||||
assert len(dic["password"]) >= 10
|
||||
# default tls_cert=acme should not include dclogin_url
|
||||
assert "dclogin_url" not in dic
|
||||
|
||||
|
||||
def test_print_new_account_self_signed(capsys, monkeypatch, make_config):
|
||||
config = make_config("_test.example.org")
|
||||
monkeypatch.setattr(chatmaild.newemail, "CONFIG_PATH", str(config._inipath))
|
||||
print_new_account()
|
||||
out, err = capsys.readouterr()
|
||||
lines = out.split("\n")
|
||||
dic = json.loads(lines[2])
|
||||
assert "dclogin_url" in dic
|
||||
url = dic["dclogin_url"]
|
||||
assert url.startswith("dclogin:")
|
||||
assert "ic=3" in url
|
||||
|
||||
assert dic["email"].split("@")[0] in url
|
||||
|
||||
73
chatmaild/src/chatmaild/tests/test_turnserver.py
Normal file
73
chatmaild/src/chatmaild/tests/test_turnserver.py
Normal file
@@ -0,0 +1,73 @@
|
||||
import socket
|
||||
import threading
|
||||
import time
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from chatmaild.turnserver import turn_credentials
|
||||
|
||||
SOCKET_PATH = "/run/chatmail-turn/turn.socket"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def turn_socket(tmp_path):
|
||||
"""Create a real Unix socket server at a temp path."""
|
||||
sock_path = str(tmp_path / "turn.socket")
|
||||
server = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
server.bind(sock_path)
|
||||
server.listen(1)
|
||||
yield sock_path, server
|
||||
server.close()
|
||||
|
||||
|
||||
def _call_turn_credentials(sock_path):
|
||||
"""Call turn_credentials but connect to sock_path instead of hardcoded path."""
|
||||
original_connect = socket.socket.connect
|
||||
|
||||
def patched_connect(self, address):
|
||||
if address == SOCKET_PATH:
|
||||
address = sock_path
|
||||
return original_connect(self, address)
|
||||
|
||||
with patch.object(socket.socket, "connect", patched_connect):
|
||||
return turn_credentials()
|
||||
|
||||
|
||||
def test_turn_credentials_timeout(turn_socket):
|
||||
"""Server accepts but never responds — must raise socket.timeout."""
|
||||
sock_path, server = turn_socket
|
||||
|
||||
def accept_and_hang():
|
||||
conn, _ = server.accept()
|
||||
time.sleep(30)
|
||||
conn.close()
|
||||
|
||||
t = threading.Thread(target=accept_and_hang, daemon=True)
|
||||
t.start()
|
||||
|
||||
with pytest.raises(socket.timeout):
|
||||
_call_turn_credentials(sock_path)
|
||||
|
||||
|
||||
def test_turn_credentials_connection_refused(tmp_path):
|
||||
"""Socket file doesn't exist — must raise ConnectionRefusedError or FileNotFoundError."""
|
||||
missing = str(tmp_path / "nonexistent.socket")
|
||||
with pytest.raises((ConnectionRefusedError, FileNotFoundError)):
|
||||
_call_turn_credentials(missing)
|
||||
|
||||
|
||||
def test_turn_credentials_success(turn_socket):
|
||||
"""Server responds with credentials — must return stripped string."""
|
||||
sock_path, server = turn_socket
|
||||
|
||||
def respond():
|
||||
conn, _ = server.accept()
|
||||
conn.sendall(b"testuser:testpass\n")
|
||||
conn.close()
|
||||
|
||||
t = threading.Thread(target=respond, daemon=True)
|
||||
t.start()
|
||||
|
||||
result = _call_turn_credentials(sock_path)
|
||||
assert result == "testuser:testpass"
|
||||
@@ -4,6 +4,7 @@ import socket
|
||||
|
||||
def turn_credentials() -> str:
|
||||
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as client_socket:
|
||||
client_socket.settimeout(5)
|
||||
client_socket.connect("/run/chatmail-turn/turn.socket")
|
||||
with client_socket.makefile("rb") as file:
|
||||
return file.readline().decode("utf-8").strip()
|
||||
|
||||
@@ -10,7 +10,6 @@ dependencies = [
|
||||
"pillow",
|
||||
"qrcode",
|
||||
"markdown",
|
||||
"pytest",
|
||||
"setuptools>=68",
|
||||
"termcolor",
|
||||
"build",
|
||||
@@ -20,6 +19,8 @@ dependencies = [
|
||||
"pytest-xdist",
|
||||
"execnet",
|
||||
"imap_tools",
|
||||
"deltachat-rpc-client",
|
||||
"deltachat-rpc-server",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
import importlib.resources
|
||||
|
||||
from pyinfra.operations import apt, files, server, systemd
|
||||
from pyinfra.operations import apt, server
|
||||
|
||||
from ..basedeploy import Deployer
|
||||
|
||||
@@ -9,9 +7,6 @@ class AcmetoolDeployer(Deployer):
|
||||
def __init__(self, email, domains):
|
||||
self.domains = domains
|
||||
self.email = email
|
||||
self.need_restart_redirector = False
|
||||
self.need_restart_reconcile_service = False
|
||||
self.need_restart_reconcile_timer = False
|
||||
|
||||
def install(self):
|
||||
apt.packages(
|
||||
@@ -19,121 +14,41 @@ class AcmetoolDeployer(Deployer):
|
||||
packages=["acmetool"],
|
||||
)
|
||||
|
||||
files.file(
|
||||
name="Remove old acmetool cronjob, it is replaced with systemd timer.",
|
||||
path="/etc/cron.d/acmetool",
|
||||
present=False,
|
||||
)
|
||||
self.remove_file("/etc/cron.d/acmetool")
|
||||
|
||||
files.put(
|
||||
name="Install acmetool hook.",
|
||||
src=importlib.resources.files(__package__)
|
||||
.joinpath("acmetool.hook")
|
||||
.open("rb"),
|
||||
dest="/etc/acme/hooks/nginx",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="755",
|
||||
)
|
||||
files.file(
|
||||
name="Remove acmetool hook from the wrong location where it was previously installed.",
|
||||
path="/usr/lib/acme/hooks/nginx",
|
||||
present=False,
|
||||
)
|
||||
self.put_executable("acmetool/acmetool.hook", "/etc/acme/hooks/nginx")
|
||||
self.remove_file("/usr/lib/acme/hooks/nginx")
|
||||
|
||||
def configure(self):
|
||||
files.template(
|
||||
src=importlib.resources.files(__package__).joinpath(
|
||||
"response-file.yaml.j2"
|
||||
),
|
||||
dest="/var/lib/acme/conf/responses",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
self.put_template(
|
||||
"acmetool/response-file.yaml.j2",
|
||||
"/var/lib/acme/conf/responses",
|
||||
email=self.email,
|
||||
)
|
||||
|
||||
files.template(
|
||||
src=importlib.resources.files(__package__).joinpath("target.yaml.j2"),
|
||||
dest="/var/lib/acme/conf/target",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
self.put_template(
|
||||
"acmetool/target.yaml.j2",
|
||||
"/var/lib/acme/conf/target",
|
||||
)
|
||||
|
||||
server.shell(
|
||||
name=f"Remove old acmetool desired files for {self.domains[0]}",
|
||||
commands=[f"rm -f /var/lib/acme/desired/{self.domains[0]}-*"],
|
||||
)
|
||||
files.template(
|
||||
src=importlib.resources.files(__package__).joinpath("desired.yaml.j2"),
|
||||
dest=f"/var/lib/acme/desired/{self.domains[0]}", # 0 is mailhost TLD
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
self.put_template(
|
||||
"acmetool/desired.yaml.j2",
|
||||
f"/var/lib/acme/desired/{self.domains[0]}",
|
||||
domains=self.domains,
|
||||
)
|
||||
|
||||
service_file = files.put(
|
||||
src=importlib.resources.files(__package__).joinpath(
|
||||
"acmetool-redirector.service"
|
||||
),
|
||||
dest="/etc/systemd/system/acmetool-redirector.service",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
self.need_restart_redirector = service_file.changed
|
||||
|
||||
reconcile_service_file = files.put(
|
||||
src=importlib.resources.files(__package__).joinpath(
|
||||
"acmetool-reconcile.service"
|
||||
),
|
||||
dest="/etc/systemd/system/acmetool-reconcile.service",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
self.need_restart_reconcile_service = reconcile_service_file.changed
|
||||
|
||||
reconcile_timer_file = files.put(
|
||||
src=importlib.resources.files(__package__).joinpath(
|
||||
"acmetool-reconcile.timer"
|
||||
),
|
||||
dest="/etc/systemd/system/acmetool-reconcile.timer",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
self.need_restart_reconcile_timer = reconcile_timer_file.changed
|
||||
self.ensure_systemd_unit("acmetool/acmetool-redirector.service")
|
||||
self.ensure_systemd_unit("acmetool/acmetool-reconcile.service")
|
||||
self.ensure_systemd_unit("acmetool/acmetool-reconcile.timer")
|
||||
|
||||
def activate(self):
|
||||
systemd.service(
|
||||
name="Setup acmetool-redirector service",
|
||||
service="acmetool-redirector.service",
|
||||
running=True,
|
||||
enabled=True,
|
||||
restarted=self.need_restart_redirector,
|
||||
)
|
||||
self.need_restart_redirector = False
|
||||
|
||||
systemd.service(
|
||||
name="Setup acmetool-reconcile service",
|
||||
service="acmetool-reconcile.service",
|
||||
running=False,
|
||||
enabled=False,
|
||||
daemon_reload=self.need_restart_reconcile_service,
|
||||
)
|
||||
self.need_restart_reconcile_service = False
|
||||
|
||||
systemd.service(
|
||||
name="Setup acmetool-reconcile timer",
|
||||
service="acmetool-reconcile.timer",
|
||||
running=True,
|
||||
enabled=True,
|
||||
daemon_reload=self.need_restart_reconcile_timer,
|
||||
)
|
||||
self.need_restart_reconcile_timer = False
|
||||
self.ensure_service("acmetool-redirector.service")
|
||||
self.ensure_service("acmetool-reconcile.service", running=False, enabled=False)
|
||||
self.ensure_service("acmetool-reconcile.timer")
|
||||
|
||||
server.shell(
|
||||
name=f"Reconcile certificates for: {', '.join(self.domains)}",
|
||||
|
||||
@@ -3,7 +3,7 @@ Description=acmetool HTTP redirector
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
ExecStart=/usr/bin/acmetool redirector --service.uid=daemon
|
||||
ExecStart=/usr/bin/acmetool redirector --service.uid=daemon --bind=127.0.0.1:402
|
||||
Restart=always
|
||||
RestartSec=30
|
||||
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
import importlib.resources
|
||||
import io
|
||||
import os
|
||||
from contextlib import contextmanager
|
||||
|
||||
from pyinfra import host
|
||||
from pyinfra.facts.files import Sha256File
|
||||
from pyinfra.facts.server import Command
|
||||
from pyinfra.operations import files, server, systemd
|
||||
|
||||
|
||||
@@ -10,15 +14,47 @@ def has_systemd():
|
||||
return os.path.isdir("/run/systemd/system")
|
||||
|
||||
|
||||
def is_in_container() -> bool:
|
||||
"""Return True if running inside a container (Docker, LXC, etc.)."""
|
||||
return (
|
||||
host.get_fact(
|
||||
Command,
|
||||
"systemd-detect-virt --container --quiet 2>/dev/null && echo yes || true",
|
||||
)
|
||||
== "yes"
|
||||
)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def blocked_service_startup():
|
||||
"""Prevent services from auto-starting during package installation.
|
||||
|
||||
Installs a ``/usr/sbin/policy-rc.d`` that exits 101, blocking any
|
||||
service from being started by the package manager. This avoids bind
|
||||
conflicts and CPU/RAM spikes during initial setup. The file is removed
|
||||
when the context exits.
|
||||
"""
|
||||
# For documentation about policy-rc.d, see:
|
||||
# https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt
|
||||
files.put(
|
||||
src=get_resource("policy-rc.d"),
|
||||
dest="/usr/sbin/policy-rc.d",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="755",
|
||||
)
|
||||
yield
|
||||
files.file("/usr/sbin/policy-rc.d", present=False)
|
||||
|
||||
|
||||
def get_resource(arg, pkg=__package__):
|
||||
return importlib.resources.files(pkg).joinpath(arg)
|
||||
|
||||
|
||||
def configure_remote_units(mail_domain, units) -> None:
|
||||
def configure_remote_units(deployer, mail_domain, units) -> None:
|
||||
remote_base_dir = "/usr/local/lib/chatmaild"
|
||||
remote_venv_dir = f"{remote_base_dir}/venv"
|
||||
remote_chatmail_inipath = f"{remote_base_dir}/chatmail.ini"
|
||||
root_owned = dict(user="root", group="root", mode="644")
|
||||
|
||||
# install systemd units
|
||||
for fn in units:
|
||||
@@ -34,15 +70,13 @@ def configure_remote_units(mail_domain, units) -> None:
|
||||
source_path = get_resource(f"service/{basename}.f")
|
||||
content = source_path.read_text().format(**params).encode()
|
||||
|
||||
files.put(
|
||||
name=f"Upload {basename}",
|
||||
deployer.put_file(
|
||||
src=io.BytesIO(content),
|
||||
dest=f"/etc/systemd/system/{basename}",
|
||||
**root_owned,
|
||||
)
|
||||
|
||||
|
||||
def activate_remote_units(units) -> None:
|
||||
def activate_remote_units(deployer, units) -> None:
|
||||
# activate systemd units
|
||||
for fn in units:
|
||||
basename = fn if "." in fn else f"{fn}.service"
|
||||
@@ -52,14 +86,8 @@ def activate_remote_units(units) -> None:
|
||||
enabled = False
|
||||
else:
|
||||
enabled = True
|
||||
systemd.service(
|
||||
name=f"Setup {basename}",
|
||||
service=basename,
|
||||
running=enabled,
|
||||
enabled=enabled,
|
||||
restarted=enabled,
|
||||
daemon_reload=True,
|
||||
)
|
||||
|
||||
deployer.ensure_service(basename, running=enabled, enabled=enabled)
|
||||
|
||||
|
||||
class Deployment:
|
||||
@@ -105,6 +133,7 @@ class Deployment:
|
||||
|
||||
class Deployer:
|
||||
need_restart = False
|
||||
daemon_reload = False
|
||||
|
||||
def install(self):
|
||||
pass
|
||||
@@ -114,3 +143,113 @@ class Deployer:
|
||||
|
||||
def activate(self):
|
||||
pass
|
||||
|
||||
def ensure_service(self, service, running=True, enabled=True):
|
||||
if running:
|
||||
verb = "Start and enable"
|
||||
else:
|
||||
verb = "Stop"
|
||||
systemd.service(
|
||||
name=f"{verb} {service}",
|
||||
service=service,
|
||||
running=running,
|
||||
enabled=enabled,
|
||||
restarted=self.need_restart if running else False,
|
||||
daemon_reload=self.daemon_reload,
|
||||
)
|
||||
self.daemon_reload = False
|
||||
|
||||
def ensure_systemd_unit(self, src, **kwargs):
|
||||
dest_name = src.split("/")[-1].replace(".j2", "")
|
||||
dest = f"/etc/systemd/system/{dest_name}"
|
||||
if src.endswith(".j2"):
|
||||
return self.put_template(src, dest, **kwargs)
|
||||
return self.put_file(src, dest)
|
||||
|
||||
def put_file(self, src, dest, mode="644"):
|
||||
if isinstance(src, str):
|
||||
src = get_resource(src)
|
||||
res = files.put(
|
||||
name=f"Upload {dest}",
|
||||
src=src,
|
||||
dest=dest,
|
||||
user="root",
|
||||
group="root",
|
||||
mode=mode,
|
||||
)
|
||||
|
||||
return self._update_restart_signals(dest, res)
|
||||
|
||||
def put_executable(self, src, dest):
|
||||
return self.put_file(src, dest, mode="755")
|
||||
|
||||
def put_template(self, src, dest, owner="root", **kwargs):
|
||||
if isinstance(src, str):
|
||||
src = get_resource(src)
|
||||
res = files.template(
|
||||
name=f"Upload {dest}",
|
||||
src=src,
|
||||
dest=dest,
|
||||
user=owner,
|
||||
group=owner,
|
||||
mode="644",
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
return self._update_restart_signals(dest, res)
|
||||
|
||||
def remove_file(self, dest):
|
||||
res = files.file(name=f"Remove {dest}", path=dest, present=False)
|
||||
return self._update_restart_signals(dest, res)
|
||||
|
||||
def ensure_line(self, path, line, **kwargs):
|
||||
name = kwargs.pop("name", f"Ensure line in {path}")
|
||||
res = files.line(name=name, path=path, line=line, **kwargs)
|
||||
return self._update_restart_signals(path, res)
|
||||
|
||||
def ensure_directory(self, path, owner="root", mode="755", **kwargs):
|
||||
name = kwargs.pop("name", f"Ensure directory {path}")
|
||||
res = files.directory(
|
||||
name=name,
|
||||
path=path,
|
||||
user=owner,
|
||||
group=owner,
|
||||
mode=mode,
|
||||
present=True,
|
||||
**kwargs,
|
||||
)
|
||||
return self._update_restart_signals(path, res)
|
||||
|
||||
def remove_directory(self, path, **kwargs):
|
||||
name = kwargs.pop("name", f"Remove directory {path}")
|
||||
res = files.directory(name=name, path=path, present=False, **kwargs)
|
||||
return self._update_restart_signals(path, res)
|
||||
|
||||
def download_executable(self, url, dest, sha256sum, extract=None):
|
||||
existing = host.get_fact(Sha256File, dest)
|
||||
if existing == sha256sum:
|
||||
return
|
||||
|
||||
tmp = f"{dest}.new"
|
||||
if extract:
|
||||
dl_cmd = f"curl -fSL {url} | {extract} >{tmp}"
|
||||
else:
|
||||
dl_cmd = f"curl -fSL {url} -o {tmp}"
|
||||
|
||||
server.shell(
|
||||
name=f"Download {dest}",
|
||||
commands=[
|
||||
f"({dl_cmd}"
|
||||
f" && echo '{sha256sum} {tmp}' | sha256sum -c"
|
||||
f" && mv {tmp} {dest})",
|
||||
f"chmod 755 {dest}",
|
||||
],
|
||||
)
|
||||
self.need_restart = True
|
||||
|
||||
def _update_restart_signals(self, path, res):
|
||||
if res.changed:
|
||||
self.need_restart = True
|
||||
if str(path).startswith("/etc/systemd/system/"):
|
||||
self.daemon_reload = True
|
||||
return res
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
;
|
||||
; Required DNS entries for chatmail servers
|
||||
;
|
||||
{% if A %}
|
||||
{{ mail_domain }}. A {{ A }}
|
||||
{% endif %}
|
||||
{% if AAAA %}
|
||||
{{ mail_domain }}. AAAA {{ AAAA }}
|
||||
{% endif %}
|
||||
{{ mail_domain }}. MX 10 {{ mail_domain }}.
|
||||
_mta-sts.{{ mail_domain }}. TXT "v=STSv1; id={{ sts_id }}"
|
||||
mta-sts.{{ mail_domain }}. CNAME {{ mail_domain }}.
|
||||
www.{{ mail_domain }}. CNAME {{ mail_domain }}.
|
||||
{{ dkim_entry }}
|
||||
|
||||
;
|
||||
; Recommended DNS entries for interoperability and security-hardening
|
||||
;
|
||||
{{ mail_domain }}. TXT "v=spf1 a ~all"
|
||||
_dmarc.{{ mail_domain }}. TXT "v=DMARC1;p=reject;adkim=s;aspf=s"
|
||||
|
||||
{% if acme_account_url %}
|
||||
{{ mail_domain }}. CAA 0 issue "letsencrypt.org;accounturi={{ acme_account_url }}"
|
||||
{% endif %}
|
||||
_adsp._domainkey.{{ mail_domain }}. TXT "dkim=discardable"
|
||||
|
||||
_submission._tcp.{{ mail_domain }}. SRV 0 1 587 {{ mail_domain }}.
|
||||
_submissions._tcp.{{ mail_domain }}. SRV 0 1 465 {{ mail_domain }}.
|
||||
_imap._tcp.{{ mail_domain }}. SRV 0 1 143 {{ mail_domain }}.
|
||||
_imaps._tcp.{{ mail_domain }}. SRV 0 1 993 {{ mail_domain }}.
|
||||
@@ -5,7 +5,6 @@ along with command line option and subcommand parsing.
|
||||
|
||||
import argparse
|
||||
import importlib.resources
|
||||
import importlib.util
|
||||
import os
|
||||
import pathlib
|
||||
import shutil
|
||||
@@ -91,9 +90,10 @@ def run_cmd(args, out):
|
||||
ssh_host = args.ssh_host if args.ssh_host else args.config.mail_domain
|
||||
sshexec = get_sshexec(ssh_host)
|
||||
require_iroh = args.config.enable_iroh_relay
|
||||
strict_tls = args.config.tls_cert_mode == "acme"
|
||||
if not args.dns_check_disabled:
|
||||
remote_data = dns.get_initial_remote_data(sshexec, args.config.mail_domain)
|
||||
if not dns.check_initial_remote_data(remote_data, print=out.red):
|
||||
if not dns.check_initial_remote_data(remote_data, strict_tls=strict_tls, print=out.red):
|
||||
return 1
|
||||
|
||||
env = os.environ.copy()
|
||||
@@ -101,16 +101,11 @@ def run_cmd(args, out):
|
||||
env["CHATMAIL_WEBSITE_ONLY"] = "True" if args.website_only else ""
|
||||
env["CHATMAIL_DISABLE_MAIL"] = "True" if args.disable_mail else ""
|
||||
env["CHATMAIL_REQUIRE_IROH"] = "True" if require_iroh else ""
|
||||
if not args.dns_check_disabled:
|
||||
env["CHATMAIL_ADDR_V4"] = remote_data.get("A") or ""
|
||||
env["CHATMAIL_ADDR_V6"] = remote_data.get("AAAA") or ""
|
||||
deploy_path = importlib.resources.files(__package__).joinpath("run.py").resolve()
|
||||
pyinf = "pyinfra --dry" if args.dry_run else "pyinfra"
|
||||
|
||||
cmd = f"{pyinf} --ssh-user root {ssh_host} {deploy_path} -y"
|
||||
if ssh_host in ["localhost", "@docker"]:
|
||||
if ssh_host == "@docker":
|
||||
env["CHATMAIL_DOCKER"] = "True"
|
||||
if ssh_host == "localhost":
|
||||
cmd = f"{pyinf} @local {deploy_path} -y"
|
||||
|
||||
if version.parse(pyinfra.__version__) < version.parse("3"):
|
||||
@@ -118,24 +113,18 @@ def run_cmd(args, out):
|
||||
return 1
|
||||
|
||||
try:
|
||||
retcode = out.check_call(cmd, env=env)
|
||||
out.check_call(cmd, env=env)
|
||||
if args.website_only:
|
||||
if retcode == 0:
|
||||
out.green("Website deployment completed.")
|
||||
else:
|
||||
out.red("Website deployment failed.")
|
||||
elif retcode == 0:
|
||||
out.green("Deploy completed, call `cmdeploy dns` next.")
|
||||
elif not args.dns_check_disabled and not remote_data["acme_account_url"]:
|
||||
out.green("Website deployment completed.")
|
||||
elif not args.dns_check_disabled and strict_tls and not remote_data["acme_account_url"]:
|
||||
out.red("Deploy completed but letsencrypt not configured")
|
||||
out.red("Run 'cmdeploy run' again")
|
||||
retcode = 0
|
||||
else:
|
||||
out.red("Deploy failed")
|
||||
out.green("Deploy completed, call `cmdeploy dns` next.")
|
||||
return 0
|
||||
except subprocess.CalledProcessError:
|
||||
out.red("Deploy failed")
|
||||
retcode = 1
|
||||
return retcode
|
||||
return 1
|
||||
|
||||
|
||||
def dns_cmd_options(parser):
|
||||
@@ -153,11 +142,13 @@ def dns_cmd(args, out):
|
||||
"""Check DNS entries and optionally generate dns zone file."""
|
||||
ssh_host = args.ssh_host if args.ssh_host else args.config.mail_domain
|
||||
sshexec = get_sshexec(ssh_host, verbose=args.verbose)
|
||||
tls_cert_mode = args.config.tls_cert_mode
|
||||
strict_tls = tls_cert_mode == "acme"
|
||||
remote_data = dns.get_initial_remote_data(sshexec, args.config.mail_domain)
|
||||
if not remote_data:
|
||||
if not dns.check_initial_remote_data(remote_data, strict_tls=strict_tls):
|
||||
return 1
|
||||
|
||||
if not remote_data["acme_account_url"]:
|
||||
if strict_tls and not remote_data["acme_account_url"]:
|
||||
out.red("could not get letsencrypt account url, please run 'cmdeploy run'")
|
||||
return 1
|
||||
|
||||
@@ -165,6 +156,7 @@ def dns_cmd(args, out):
|
||||
out.red("could not determine dkim_entry, please run 'cmdeploy run'")
|
||||
return 1
|
||||
|
||||
remote_data["strict_tls"] = strict_tls
|
||||
zonefile = dns.get_filled_zone_file(remote_data)
|
||||
|
||||
if args.zonefile:
|
||||
@@ -199,23 +191,16 @@ def status_cmd(args, out):
|
||||
|
||||
|
||||
def test_cmd_options(parser):
|
||||
parser.add_argument(
|
||||
"--slow",
|
||||
dest="slow",
|
||||
action="store_true",
|
||||
help="also run slow tests",
|
||||
)
|
||||
add_ssh_host_option(parser)
|
||||
|
||||
|
||||
def test_cmd(args, out):
|
||||
"""Run local and online tests for chatmail deployment.
|
||||
"""Run local and online tests for chatmail deployment."""
|
||||
|
||||
This will automatically pip-install 'deltachat' if it's not available.
|
||||
"""
|
||||
|
||||
x = importlib.util.find_spec("deltachat")
|
||||
if x is None:
|
||||
out.check_call(f"{sys.executable} -m pip install deltachat")
|
||||
env = os.environ.copy()
|
||||
env["CHATMAIL_INI"] = str(args.inipath.absolute())
|
||||
if args.ssh_host:
|
||||
env["CHATMAIL_SSH"] = args.ssh_host
|
||||
|
||||
pytest_path = shutil.which("pytest")
|
||||
pytest_args = [
|
||||
@@ -227,9 +212,7 @@ def test_cmd(args, out):
|
||||
"-v",
|
||||
"--durations=5",
|
||||
]
|
||||
if args.slow:
|
||||
pytest_args.append("--slow")
|
||||
ret = out.run_ret(pytest_args)
|
||||
ret = out.run_ret(pytest_args, env=env)
|
||||
return ret
|
||||
|
||||
|
||||
@@ -320,7 +303,7 @@ def add_ssh_host_option(parser):
|
||||
parser.add_argument(
|
||||
"--ssh-host",
|
||||
dest="ssh_host",
|
||||
help="Run commands on 'localhost', via '@docker', or on a specific SSH host "
|
||||
help="Run commands on 'localhost' or on a specific SSH host "
|
||||
"instead of chatmail.ini's mail_domain.",
|
||||
)
|
||||
|
||||
@@ -330,7 +313,7 @@ def add_config_option(parser):
|
||||
"--config",
|
||||
dest="inipath",
|
||||
action="store",
|
||||
default=Path("chatmail.ini"),
|
||||
default=Path(os.environ.get("CHATMAIL_INI", "chatmail.ini")),
|
||||
type=Path,
|
||||
help="path to the chatmail.ini file",
|
||||
)
|
||||
@@ -382,9 +365,7 @@ def get_parser():
|
||||
|
||||
def get_sshexec(ssh_host: str, verbose=True):
|
||||
if ssh_host in ["localhost", "@local"]:
|
||||
return LocalExec(verbose, docker=False)
|
||||
elif ssh_host == "@docker":
|
||||
return LocalExec(verbose, docker=True)
|
||||
return LocalExec(verbose)
|
||||
if verbose:
|
||||
print(f"[ssh] login to {ssh_host}")
|
||||
return SSHExec(ssh_host, verbose=verbose)
|
||||
|
||||
@@ -5,14 +5,13 @@ Chat Mail pyinfra deploy.
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
from io import StringIO
|
||||
from io import BytesIO, StringIO
|
||||
from pathlib import Path
|
||||
|
||||
from chatmaild.config import read_config
|
||||
from pyinfra import facts, host, logger
|
||||
from pyinfra.facts import hardware
|
||||
from pyinfra.api import FactBase
|
||||
from pyinfra.facts.files import Sha256File
|
||||
from pyinfra.facts import hardware
|
||||
from pyinfra.facts.systemd import SystemdEnabled
|
||||
from pyinfra.operations import apt, files, pip, server, systemd
|
||||
|
||||
@@ -23,16 +22,19 @@ from .basedeploy import (
|
||||
Deployer,
|
||||
Deployment,
|
||||
activate_remote_units,
|
||||
blocked_service_startup,
|
||||
configure_remote_units,
|
||||
get_resource,
|
||||
has_systemd,
|
||||
is_in_container,
|
||||
)
|
||||
from .dovecot.deployer import DovecotDeployer
|
||||
from .external.deployer import ExternalTlsDeployer
|
||||
from .filtermail.deployer import FiltermailDeployer
|
||||
from .mtail.deployer import MtailDeployer
|
||||
from .nginx.deployer import NginxDeployer
|
||||
from .opendkim.deployer import OpendkimDeployer
|
||||
from .postfix.deployer import PostfixDeployer
|
||||
from .selfsigned.deployer import SelfSignedTlsDeployer
|
||||
from .www import build_webpages, find_merge_conflict, get_paths
|
||||
|
||||
|
||||
@@ -78,25 +80,22 @@ def remove_legacy_artifacts():
|
||||
)
|
||||
|
||||
|
||||
def _install_remote_venv_with_chatmaild() -> None:
|
||||
def _install_remote_venv_with_chatmaild(deployer) -> None:
|
||||
remove_legacy_artifacts()
|
||||
dist_file = _build_chatmaild(dist_dir=Path("chatmaild/dist"))
|
||||
remote_base_dir = "/usr/local/lib/chatmaild"
|
||||
remote_dist_file = f"{remote_base_dir}/dist/{dist_file.name}"
|
||||
remote_venv_dir = f"{remote_base_dir}/venv"
|
||||
root_owned = dict(user="root", group="root", mode="644")
|
||||
|
||||
apt.packages(
|
||||
name="apt install python3-virtualenv",
|
||||
packages=["python3-virtualenv"],
|
||||
)
|
||||
|
||||
files.put(
|
||||
name="Upload chatmaild source package",
|
||||
deployer.ensure_directory(f"{remote_base_dir}/dist")
|
||||
deployer.put_file(
|
||||
src=dist_file.open("rb"),
|
||||
dest=remote_dist_file,
|
||||
create_remote_dir=True,
|
||||
**root_owned,
|
||||
)
|
||||
|
||||
pip.virtualenv(
|
||||
@@ -118,67 +117,54 @@ def _install_remote_venv_with_chatmaild() -> None:
|
||||
)
|
||||
|
||||
|
||||
def _configure_remote_venv_with_chatmaild(config) -> None:
|
||||
def _configure_remote_venv_with_chatmaild(deployer, config) -> None:
|
||||
remote_base_dir = "/usr/local/lib/chatmaild"
|
||||
remote_venv_dir = f"{remote_base_dir}/venv"
|
||||
remote_chatmail_inipath = f"{remote_base_dir}/chatmail.ini"
|
||||
root_owned = dict(user="root", group="root", mode="644")
|
||||
|
||||
files.put(
|
||||
name=f"Upload {remote_chatmail_inipath}",
|
||||
deployer.put_file(
|
||||
src=config._getbytefile(),
|
||||
dest=remote_chatmail_inipath,
|
||||
**root_owned,
|
||||
)
|
||||
|
||||
files.template(
|
||||
src=get_resource("metrics.cron.j2"),
|
||||
dest="/etc/cron.d/chatmail-metrics",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
config={
|
||||
"mailboxes_dir": config.mailboxes_dir,
|
||||
"execpath": f"{remote_venv_dir}/bin/chatmail-metrics",
|
||||
},
|
||||
)
|
||||
deployer.remove_file("/etc/cron.d/chatmail-metrics")
|
||||
deployer.remove_file("/var/www/html/metrics")
|
||||
|
||||
|
||||
class UnboundDeployer(Deployer):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.need_restart = False
|
||||
|
||||
def install(self):
|
||||
# Run local DNS resolver `unbound`.
|
||||
# `resolvconf` takes care of setting up /etc/resolv.conf
|
||||
# to use 127.0.0.1 as the resolver.
|
||||
|
||||
#
|
||||
# On an IPv4-only system, if unbound is started but not
|
||||
# configured, it causes subsequent steps to fail to resolve hosts.
|
||||
# Here, we use policy-rc.d to prevent unbound from starting up
|
||||
# on initial install. Later, we will configure it and start it.
|
||||
#
|
||||
# For documentation about policy-rc.d, see:
|
||||
# https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt
|
||||
#
|
||||
files.put(
|
||||
src=get_resource("policy-rc.d"),
|
||||
dest="/usr/sbin/policy-rc.d",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="755",
|
||||
)
|
||||
|
||||
apt.packages(
|
||||
name="Install unbound",
|
||||
packages=["unbound", "unbound-anchor", "dnsutils"],
|
||||
)
|
||||
|
||||
files.file("/usr/sbin/policy-rc.d", present=False)
|
||||
# On an IPv4-only system, if unbound is started but not configured,
|
||||
# it causes subsequent steps to fail to resolve hosts.
|
||||
with blocked_service_startup():
|
||||
apt.packages(
|
||||
name="Install unbound",
|
||||
packages=["unbound", "unbound-anchor", "dnsutils"],
|
||||
)
|
||||
|
||||
def configure(self):
|
||||
# Remove dynamic resolver managers that compete for /etc/resolv.conf.
|
||||
apt.packages(
|
||||
name="Purge resolvconf",
|
||||
packages=["resolvconf"],
|
||||
present=False,
|
||||
extra_uninstall_args="--purge",
|
||||
)
|
||||
# systemd-resolved can't be purged due to dependencies; stop and mask.
|
||||
server.shell(
|
||||
name="Stop and mask systemd-resolved",
|
||||
commands=[
|
||||
"systemctl stop systemd-resolved.service || true",
|
||||
"systemctl mask systemd-resolved.service",
|
||||
],
|
||||
)
|
||||
# Configure unbound resolver with Quad9 fallback and a trailing newline
|
||||
# (SolusVM bug).
|
||||
self.put_file(
|
||||
src=BytesIO(b"nameserver 127.0.0.1\nnameserver 9.9.9.9\n"),
|
||||
dest="/etc/resolv.conf",
|
||||
)
|
||||
server.shell(
|
||||
name="Generate root keys for validating DNSSEC",
|
||||
commands=[
|
||||
@@ -186,26 +172,15 @@ class UnboundDeployer(Deployer):
|
||||
],
|
||||
)
|
||||
if self.config.disable_ipv6:
|
||||
files.directory(
|
||||
self.ensure_directory(
|
||||
path="/etc/unbound/unbound.conf.d",
|
||||
present=True,
|
||||
user="root",
|
||||
group="root",
|
||||
mode="755",
|
||||
)
|
||||
conf = files.put(
|
||||
src=get_resource("unbound/unbound.conf.j2"),
|
||||
dest="/etc/unbound/unbound.conf.d/chatmail.conf",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
self.put_template(
|
||||
"unbound/unbound.conf.j2",
|
||||
"/etc/unbound/unbound.conf.d/chatmail.conf",
|
||||
)
|
||||
else:
|
||||
conf = files.file(
|
||||
path="/etc/unbound/unbound.conf.d/chatmail.conf",
|
||||
present=False,
|
||||
)
|
||||
self.need_restart |= conf.changed
|
||||
self.remove_file("/etc/unbound/unbound.conf.d/chatmail.conf")
|
||||
|
||||
def activate(self):
|
||||
server.shell(
|
||||
@@ -215,27 +190,25 @@ class UnboundDeployer(Deployer):
|
||||
],
|
||||
)
|
||||
|
||||
systemd.service(
|
||||
name="Start and enable unbound",
|
||||
service="unbound.service",
|
||||
running=True,
|
||||
enabled=True,
|
||||
restarted=self.need_restart,
|
||||
self.ensure_service("unbound.service")
|
||||
|
||||
self.ensure_service(
|
||||
"unbound-resolvconf.service",
|
||||
running=False,
|
||||
enabled=False,
|
||||
)
|
||||
|
||||
|
||||
class MtastsDeployer(Deployer):
|
||||
def configure(self):
|
||||
# Remove configuration.
|
||||
files.file("/etc/mta-sts-daemon.yml", present=False)
|
||||
files.directory("/usr/local/lib/postfix-mta-sts-resolver", present=False)
|
||||
files.file("/etc/systemd/system/mta-sts-daemon.service", present=False)
|
||||
self.remove_file("/etc/mta-sts-daemon.yml")
|
||||
self.remove_directory("/usr/local/lib/postfix-mta-sts-resolver")
|
||||
self.remove_file("/etc/systemd/system/mta-sts-daemon.service")
|
||||
|
||||
def activate(self):
|
||||
systemd.service(
|
||||
name="Stop MTA-STS daemon",
|
||||
service="mta-sts-daemon.service",
|
||||
daemon_reload=True,
|
||||
self.ensure_service(
|
||||
"mta-sts-daemon.service",
|
||||
running=False,
|
||||
enabled=False,
|
||||
)
|
||||
@@ -246,14 +219,7 @@ class WebsiteDeployer(Deployer):
|
||||
self.config = config
|
||||
|
||||
def install(self):
|
||||
files.directory(
|
||||
name="Ensure /var/www exists",
|
||||
path="/var/www",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="755",
|
||||
present=True,
|
||||
)
|
||||
self.ensure_directory("/var/www")
|
||||
|
||||
def configure(self):
|
||||
www_path, src_dir, build_dir = get_paths(self.config)
|
||||
@@ -268,6 +234,9 @@ class WebsiteDeployer(Deployer):
|
||||
# if www_folder is a hugo page, build it
|
||||
if build_dir:
|
||||
www_path = build_webpages(src_dir, build_dir, self.config)
|
||||
if www_path is None:
|
||||
logger.warning("Web page build failed, skipping website deployment")
|
||||
return
|
||||
# if it is not a hugo page, upload it as is
|
||||
files.rsync(
|
||||
f"{www_path}/", "/var/www/html", flags=["-avz", "--chown=www-data"]
|
||||
@@ -280,15 +249,11 @@ class LegacyRemoveDeployer(Deployer):
|
||||
|
||||
# remove historic expunge script
|
||||
# which is now implemented through a systemd timer (chatmail-expire)
|
||||
files.file(
|
||||
path="/etc/cron.d/expunge",
|
||||
present=False,
|
||||
)
|
||||
self.remove_file("/etc/cron.d/expunge")
|
||||
|
||||
# Remove OBS repository key that is no longer used.
|
||||
files.file("/etc/apt/keyrings/obs-home-deltachat.gpg", present=False)
|
||||
files.line(
|
||||
name="Remove DeltaChat OBS home repository from sources.list",
|
||||
self.remove_file("/etc/apt/keyrings/obs-home-deltachat.gpg")
|
||||
self.ensure_line(
|
||||
path="/etc/apt/sources.list",
|
||||
line="deb [signed-by=/etc/apt/keyrings/obs-home-deltachat.gpg] https://download.opensuse.org/repositories/home:/deltachat/Debian_12/ ./",
|
||||
escape_regex_characters=True,
|
||||
@@ -296,11 +261,7 @@ class LegacyRemoveDeployer(Deployer):
|
||||
)
|
||||
|
||||
# prior relay versions used filelogging
|
||||
files.directory(
|
||||
name="Ensure old logs on disk are deleted",
|
||||
path="/var/log/journal/",
|
||||
present=False,
|
||||
)
|
||||
self.remove_directory("/var/log/journal/")
|
||||
# remove echobot if it is still running
|
||||
if has_systemd() and host.get_fact(SystemdEnabled).get("echobot.service"):
|
||||
systemd.service(
|
||||
@@ -334,30 +295,21 @@ class TurnDeployer(Deployer):
|
||||
def install(self):
|
||||
(url, sha256sum) = {
|
||||
"x86_64": (
|
||||
"https://github.com/chatmail/chatmail-turn/releases/download/v0.3/chatmail-turn-x86_64-linux",
|
||||
"841e527c15fdc2940b0469e206188ea8f0af48533be12ecb8098520f813d41e4",
|
||||
"https://github.com/chatmail/chatmail-turn/releases/download/v0.4/chatmail-turn-x86_64-linux",
|
||||
"1ec1f5c50122165e858a5a91bcba9037a28aa8cb8b64b8db570aa457c6141a8a",
|
||||
),
|
||||
"aarch64": (
|
||||
"https://github.com/chatmail/chatmail-turn/releases/download/v0.3/chatmail-turn-aarch64-linux",
|
||||
"a5fc2d06d937b56a34e098d2cd72a82d3e89967518d159bf246dc69b65e81b42",
|
||||
"https://github.com/chatmail/chatmail-turn/releases/download/v0.4/chatmail-turn-aarch64-linux",
|
||||
"0fb3e792419494e21ecad536464929dba706bb2c88884ed8f1788141d26fc756",
|
||||
),
|
||||
}[host.get_fact(facts.server.Arch)]
|
||||
|
||||
existing_sha256sum = host.get_fact(Sha256File, "/usr/local/bin/chatmail-turn")
|
||||
if existing_sha256sum != sha256sum:
|
||||
server.shell(
|
||||
name="Download chatmail-turn",
|
||||
commands=[
|
||||
f"(curl -L {url} >/usr/local/bin/chatmail-turn.new && (echo '{sha256sum} /usr/local/bin/chatmail-turn.new' | sha256sum -c) && mv /usr/local/bin/chatmail-turn.new /usr/local/bin/chatmail-turn)",
|
||||
"chmod 755 /usr/local/bin/chatmail-turn",
|
||||
],
|
||||
)
|
||||
self.download_executable(url, "/usr/local/bin/chatmail-turn", sha256sum)
|
||||
|
||||
def configure(self):
|
||||
configure_remote_units(self.mail_domain, self.units)
|
||||
configure_remote_units(self, self.mail_domain, self.units)
|
||||
|
||||
def activate(self):
|
||||
activate_remote_units(self.units)
|
||||
activate_remote_units(self, self.units)
|
||||
|
||||
|
||||
class IrohDeployer(Deployer):
|
||||
@@ -375,72 +327,30 @@ class IrohDeployer(Deployer):
|
||||
"f8ef27631fac213b3ef668d02acd5b3e215292746a3fc71d90c63115446008b1",
|
||||
),
|
||||
}[host.get_fact(facts.server.Arch)]
|
||||
|
||||
existing_sha256sum = host.get_fact(Sha256File, "/usr/local/bin/iroh-relay")
|
||||
if existing_sha256sum != sha256sum:
|
||||
server.shell(
|
||||
name="Download iroh-relay",
|
||||
commands=[
|
||||
f"(curl -L {url} | gunzip | tar -x -f - ./iroh-relay -O >/usr/local/bin/iroh-relay.new && (echo '{sha256sum} /usr/local/bin/iroh-relay.new' | sha256sum -c) && mv /usr/local/bin/iroh-relay.new /usr/local/bin/iroh-relay)",
|
||||
"chmod 755 /usr/local/bin/iroh-relay",
|
||||
],
|
||||
)
|
||||
|
||||
self.need_restart = True
|
||||
self.download_executable(
|
||||
url,
|
||||
"/usr/local/bin/iroh-relay",
|
||||
sha256sum,
|
||||
extract="gunzip | tar -xf - ./iroh-relay -O",
|
||||
)
|
||||
|
||||
def configure(self):
|
||||
systemd_unit = files.put(
|
||||
name="Upload iroh-relay systemd unit",
|
||||
src=get_resource("iroh-relay.service"),
|
||||
dest="/etc/systemd/system/iroh-relay.service",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
self.need_restart |= systemd_unit.changed
|
||||
|
||||
iroh_config = files.put(
|
||||
name="Upload iroh-relay config",
|
||||
src=get_resource("iroh-relay.toml"),
|
||||
dest="/etc/iroh-relay.toml",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
self.need_restart |= iroh_config.changed
|
||||
self.ensure_systemd_unit("iroh-relay.service")
|
||||
self.put_file("iroh-relay.toml", "/etc/iroh-relay.toml")
|
||||
|
||||
def activate(self):
|
||||
systemd.service(
|
||||
name="Start and enable iroh-relay",
|
||||
service="iroh-relay.service",
|
||||
running=True,
|
||||
self.ensure_service(
|
||||
"iroh-relay.service",
|
||||
enabled=self.enable_iroh_relay,
|
||||
restarted=self.need_restart,
|
||||
)
|
||||
self.need_restart = False
|
||||
|
||||
|
||||
class JournaldDeployer(Deployer):
|
||||
def configure(self):
|
||||
journald_conf = files.put(
|
||||
name="Configure journald",
|
||||
src=get_resource("journald.conf"),
|
||||
dest="/etc/systemd/journald.conf",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
self.need_restart = journald_conf.changed
|
||||
self.put_file("journald.conf", "/etc/systemd/journald.conf")
|
||||
|
||||
def activate(self):
|
||||
systemd.service(
|
||||
name="Start and enable journald",
|
||||
service="systemd-journald.service",
|
||||
running=True,
|
||||
enabled=True,
|
||||
restarted=self.need_restart,
|
||||
)
|
||||
self.need_restart = False
|
||||
self.ensure_service("systemd-journald.service")
|
||||
|
||||
|
||||
class ChatmailVenvDeployer(Deployer):
|
||||
@@ -456,14 +366,14 @@ class ChatmailVenvDeployer(Deployer):
|
||||
)
|
||||
|
||||
def install(self):
|
||||
_install_remote_venv_with_chatmaild()
|
||||
_install_remote_venv_with_chatmaild(self)
|
||||
|
||||
def configure(self):
|
||||
_configure_remote_venv_with_chatmaild(self.config)
|
||||
configure_remote_units(self.config.mail_domain, self.units)
|
||||
_configure_remote_venv_with_chatmaild(self, self.config)
|
||||
configure_remote_units(self, self.config.mail_domain, self.units)
|
||||
|
||||
def activate(self):
|
||||
activate_remote_units(self.units)
|
||||
activate_remote_units(self, self.units)
|
||||
|
||||
|
||||
class ChatmailDeployer(Deployer):
|
||||
@@ -472,10 +382,15 @@ class ChatmailDeployer(Deployer):
|
||||
("iroh", None, None),
|
||||
]
|
||||
|
||||
def __init__(self, mail_domain):
|
||||
self.mail_domain = mail_domain
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.mail_domain = config.mail_domain
|
||||
|
||||
def install(self):
|
||||
self.put_file(
|
||||
src=BytesIO(b'APT::Install-Recommends "false";\n'),
|
||||
dest="/etc/apt/apt.conf.d/00InstallRecommends",
|
||||
)
|
||||
apt.update(name="apt update", cache_time=24 * 3600)
|
||||
apt.upgrade(name="upgrade apt packages", auto_remove=True)
|
||||
|
||||
@@ -488,12 +403,15 @@ class ChatmailDeployer(Deployer):
|
||||
name="Install rsync",
|
||||
packages=["rsync"],
|
||||
)
|
||||
apt.packages(
|
||||
name="Ensure cron is installed",
|
||||
packages=["cron"],
|
||||
)
|
||||
|
||||
def configure(self):
|
||||
# metadata crashes if the mailboxes dir does not exist
|
||||
self.ensure_directory(
|
||||
str(self.config.mailboxes_dir),
|
||||
owner="vmail",
|
||||
mode="700",
|
||||
)
|
||||
|
||||
# This file is used by auth proxy.
|
||||
# https://wiki.debian.org/EtcMailName
|
||||
server.shell(
|
||||
@@ -512,12 +430,7 @@ class FcgiwrapDeployer(Deployer):
|
||||
)
|
||||
|
||||
def activate(self):
|
||||
systemd.service(
|
||||
name="Start and enable fcgiwrap",
|
||||
service="fcgiwrap.service",
|
||||
running=True,
|
||||
enabled=True,
|
||||
)
|
||||
self.ensure_service("fcgiwrap.service")
|
||||
|
||||
|
||||
class GithashDeployer(Deployer):
|
||||
@@ -530,21 +443,29 @@ class GithashDeployer(Deployer):
|
||||
git_diff = subprocess.check_output(["git", "diff"]).decode()
|
||||
except Exception:
|
||||
git_diff = ""
|
||||
files.put(
|
||||
name="Upload chatmail relay git commit hash",
|
||||
src=StringIO(git_hash + git_diff),
|
||||
dest="/etc/chatmail-version",
|
||||
mode="700",
|
||||
)
|
||||
self.put_file(src=StringIO(git_hash + git_diff), dest="/etc/chatmail-version")
|
||||
|
||||
|
||||
def deploy_chatmail(config_path: Path, disable_mail: bool, website_only: bool, docker: bool) -> None:
|
||||
def get_tls_deployer(config, mail_domain):
|
||||
"""Select the appropriate TLS deployer based on config."""
|
||||
tls_domains = [mail_domain, f"mta-sts.{mail_domain}", f"www.{mail_domain}"]
|
||||
|
||||
if config.tls_cert_mode == "acme":
|
||||
return AcmetoolDeployer(config.acme_email, tls_domains)
|
||||
elif config.tls_cert_mode == "self":
|
||||
return SelfSignedTlsDeployer(mail_domain)
|
||||
elif config.tls_cert_mode == "external":
|
||||
return ExternalTlsDeployer(config.tls_cert_path, config.tls_key_path)
|
||||
else:
|
||||
raise ValueError(f"Unknown tls_cert_mode: {config.tls_cert_mode}")
|
||||
|
||||
|
||||
def deploy_chatmail(config_path: Path, disable_mail: bool, website_only: bool) -> None:
|
||||
"""Deploy a chat-mail instance.
|
||||
|
||||
:param config_path: path to chatmail.ini
|
||||
:param disable_mail: whether to disable postfix & dovecot
|
||||
:param website_only: if True, only deploy the website
|
||||
:param docker: whether it is running in a docker container
|
||||
"""
|
||||
config = read_config(config_path)
|
||||
check_config(config)
|
||||
@@ -554,28 +475,33 @@ def deploy_chatmail(config_path: Path, disable_mail: bool, website_only: bool, d
|
||||
Deployment().perform_stages([WebsiteDeployer(config)])
|
||||
return
|
||||
|
||||
if host.get_fact(Port, port=53) != "unbound":
|
||||
files.line(
|
||||
name="Add 9.9.9.9 to resolv.conf",
|
||||
path="/etc/resolv.conf",
|
||||
# Guard against resolv.conf missing a trailing newline (SolusVM bug).
|
||||
line="\nnameserver 9.9.9.9",
|
||||
)
|
||||
|
||||
# Check if mtail_address interface is available (if configured)
|
||||
if config.mtail_address and config.mtail_address not in ('127.0.0.1', '::1', 'localhost'):
|
||||
if config.mtail_address and config.mtail_address not in (
|
||||
"127.0.0.1",
|
||||
"::1",
|
||||
"localhost",
|
||||
):
|
||||
ipv4_addrs = host.get_fact(hardware.Ipv4Addrs)
|
||||
all_addresses = [addr for addrs in ipv4_addrs.values() for addr in addrs]
|
||||
if config.mtail_address not in all_addresses:
|
||||
Out().red(f"Deploy failed: mtail_address {config.mtail_address} is not available (VPN up?).\n")
|
||||
Out().red(
|
||||
f"Deploy failed: mtail_address {config.mtail_address} is not available (VPN up?).\n"
|
||||
)
|
||||
exit(1)
|
||||
|
||||
if not docker:
|
||||
if not is_in_container():
|
||||
port_services = [
|
||||
(["master", "smtpd"], 25),
|
||||
("unbound", 53),
|
||||
("acmetool", 80),
|
||||
]
|
||||
if config.tls_cert_mode == "acme":
|
||||
port_services.append(("acmetool", 402))
|
||||
port_services += [
|
||||
(["imap-login", "dovecot"], 143),
|
||||
# acmetool previously listened on port 80,
|
||||
# so don't complain during upgrade that moved it to port 402
|
||||
# and gave the port to nginx.
|
||||
(["acmetool", "nginx"], 80),
|
||||
("nginx", 443),
|
||||
(["master", "smtpd"], 465),
|
||||
(["master", "smtpd"], 587),
|
||||
@@ -600,17 +526,17 @@ def deploy_chatmail(config_path: Path, disable_mail: bool, website_only: bool, d
|
||||
)
|
||||
exit(1)
|
||||
|
||||
tls_domains = [mail_domain, f"mta-sts.{mail_domain}", f"www.{mail_domain}"]
|
||||
tls_deployer = get_tls_deployer(config, mail_domain)
|
||||
|
||||
all_deployers = [
|
||||
ChatmailDeployer(mail_domain),
|
||||
ChatmailDeployer(config),
|
||||
LegacyRemoveDeployer(),
|
||||
FiltermailDeployer(),
|
||||
JournaldDeployer(),
|
||||
UnboundDeployer(config),
|
||||
TurnDeployer(mail_domain),
|
||||
IrohDeployer(config.enable_iroh_relay),
|
||||
AcmetoolDeployer(config.acme_email, tls_domains),
|
||||
tls_deployer,
|
||||
WebsiteDeployer(config),
|
||||
ChatmailVenvDeployer(config),
|
||||
MtastsDeployer(),
|
||||
|
||||
@@ -1,25 +1,36 @@
|
||||
import datetime
|
||||
import importlib
|
||||
|
||||
from jinja2 import Template
|
||||
|
||||
from . import remote
|
||||
|
||||
|
||||
def parse_zone_records(text):
|
||||
"""Yield ``(name, ttl, rtype, rdata)`` from standard BIND-format text."""
|
||||
for raw_line in text.splitlines():
|
||||
line = raw_line.strip()
|
||||
if not line or line.startswith(";"):
|
||||
continue
|
||||
try:
|
||||
name, ttl, _in, rtype, rdata = line.split(None, 4)
|
||||
except ValueError:
|
||||
raise ValueError(f"Bad zone record line: {line!r}") from None
|
||||
name = name.rstrip(".")
|
||||
yield name, ttl, rtype.upper(), rdata
|
||||
|
||||
|
||||
def get_initial_remote_data(sshexec, mail_domain):
|
||||
return sshexec.logged(
|
||||
call=remote.rdns.perform_initial_checks, kwargs=dict(mail_domain=mail_domain)
|
||||
)
|
||||
|
||||
|
||||
def check_initial_remote_data(remote_data, *, print=print):
|
||||
def check_initial_remote_data(remote_data, *, strict_tls=True, print=print):
|
||||
mail_domain = remote_data["mail_domain"]
|
||||
if not remote_data["A"] and not remote_data["AAAA"]:
|
||||
print(f"Missing A and/or AAAA DNS records for {mail_domain}!")
|
||||
elif remote_data["MTA_STS"] != f"{mail_domain}.":
|
||||
elif strict_tls and remote_data["MTA_STS"] != f"{mail_domain}.":
|
||||
print("Missing MTA-STS CNAME record:")
|
||||
print(f"mta-sts.{mail_domain}. CNAME {mail_domain}.")
|
||||
elif remote_data["WWW"] != f"{mail_domain}.":
|
||||
elif strict_tls and remote_data["WWW"] != f"{mail_domain}.":
|
||||
print("Missing www CNAME record:")
|
||||
print(f"www.{mail_domain}. CNAME {mail_domain}.")
|
||||
else:
|
||||
@@ -31,13 +42,39 @@ def get_filled_zone_file(remote_data):
|
||||
if not sts_id:
|
||||
remote_data["sts_id"] = datetime.datetime.now().strftime("%Y%m%d%H%M")
|
||||
|
||||
template = importlib.resources.files(__package__).joinpath("chatmail.zone.j2")
|
||||
content = template.read_text()
|
||||
zonefile = Template(content).render(**remote_data)
|
||||
lines = [x.strip() for x in zonefile.split("\n") if x.strip()]
|
||||
d = remote_data["mail_domain"]
|
||||
|
||||
def append_record(name, rtype, rdata, ttl=3600):
|
||||
lines.append(f"{name:<40} {ttl:<6} IN {rtype:<5} {rdata}")
|
||||
|
||||
lines = ["; Required DNS entries"]
|
||||
if remote_data.get("A"):
|
||||
append_record(f"{d}.", "A", remote_data["A"])
|
||||
if remote_data.get("AAAA"):
|
||||
append_record(f"{d}.", "AAAA", remote_data["AAAA"])
|
||||
append_record(f"{d}.", "MX", f"10 {d}.")
|
||||
if remote_data.get("strict_tls"):
|
||||
append_record(f"_mta-sts.{d}.", "TXT", f'"v=STSv1; id={remote_data["sts_id"]}"')
|
||||
append_record(f"mta-sts.{d}.", "CNAME", f"{d}.")
|
||||
append_record(f"www.{d}.", "CNAME", f"{d}.")
|
||||
lines.append(remote_data["dkim_entry"])
|
||||
lines.append("")
|
||||
zonefile = "\n".join(lines)
|
||||
return zonefile
|
||||
lines.append("; Recommended DNS entries")
|
||||
append_record(f"{d}.", "TXT", '"v=spf1 a ~all"')
|
||||
append_record(f"_dmarc.{d}.", "TXT", '"v=DMARC1;p=reject;adkim=s;aspf=s"')
|
||||
if remote_data.get("acme_account_url"):
|
||||
append_record(
|
||||
f"{d}.",
|
||||
"CAA",
|
||||
f'0 issue "letsencrypt.org;accounturi={remote_data["acme_account_url"]}"',
|
||||
)
|
||||
append_record(f"_adsp._domainkey.{d}.", "TXT", '"dkim=discardable"')
|
||||
append_record(f"_submission._tcp.{d}.", "SRV", f"0 1 587 {d}.")
|
||||
append_record(f"_submissions._tcp.{d}.", "SRV", f"0 1 465 {d}.")
|
||||
append_record(f"_imap._tcp.{d}.", "SRV", f"0 1 143 {d}.")
|
||||
append_record(f"_imaps._tcp.{d}.", "SRV", f"0 1 993 {d}.")
|
||||
lines.append("")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def check_full_zone(sshexec, remote_data, out, zonefile) -> int:
|
||||
@@ -58,7 +95,8 @@ def check_full_zone(sshexec, remote_data, out, zonefile) -> int:
|
||||
returncode = 1
|
||||
if remote_data.get("dkim_entry") in required_diff:
|
||||
out(
|
||||
"If the DKIM entry above does not work with your DNS provider, you can try this one:\n"
|
||||
"If the DKIM entry above does not work with your DNS provider,"
|
||||
" you can try this one:\n"
|
||||
)
|
||||
out(remote_data.get("web_dkim_entry") + "\n")
|
||||
if recommended_diff:
|
||||
|
||||
@@ -1,17 +1,32 @@
|
||||
import io
|
||||
import urllib.request
|
||||
|
||||
from chatmaild.config import Config
|
||||
from pyinfra import host
|
||||
from pyinfra.facts.server import Arch, Sysctl
|
||||
from pyinfra.facts.systemd import SystemdEnabled
|
||||
from pyinfra.operations import apt, files, server, systemd
|
||||
from pyinfra.facts.deb import DebPackages
|
||||
from pyinfra.facts.server import Arch, Command, Sysctl
|
||||
from pyinfra.operations import apt, files, server
|
||||
|
||||
from cmdeploy.basedeploy import (
|
||||
Deployer,
|
||||
activate_remote_units,
|
||||
blocked_service_startup,
|
||||
configure_remote_units,
|
||||
get_resource,
|
||||
has_systemd,
|
||||
is_in_container,
|
||||
)
|
||||
|
||||
DOVECOT_ARCHIVE_VERSION = "2.3.21+dfsg1-3"
|
||||
DOVECOT_PACKAGE_VERSION = f"1:{DOVECOT_ARCHIVE_VERSION}"
|
||||
|
||||
DOVECOT_SHA256 = {
|
||||
("core", "amd64"): "dd060706f52a306fa863d874717210b9fe10536c824afe1790eec247ded5b27d",
|
||||
("core", "arm64"): "e7548e8a82929722e973629ecc40fcfa886894cef3db88f23535149e7f730dc9",
|
||||
("imapd", "amd64"): "8d8dc6fc00bbb6cdb25d345844f41ce2f1c53f764b79a838eb2a03103eebfa86",
|
||||
("imapd", "arm64"): "178fa877ddd5df9930e8308b518f4b07df10e759050725f8217a0c1fb3fd707f",
|
||||
("lmtpd", "amd64"): "2f69ba5e35363de50962d42cccbfe4ed8495265044e244007d7ccddad77513ab",
|
||||
("lmtpd", "arm64"): "89f52fb36524f5877a177dff4a713ba771fd3f91f22ed0af7238d495e143b38f",
|
||||
}
|
||||
|
||||
|
||||
class DovecotDeployer(Deployer):
|
||||
daemon_reload = False
|
||||
@@ -23,134 +38,152 @@ class DovecotDeployer(Deployer):
|
||||
|
||||
def install(self):
|
||||
arch = host.get_fact(Arch)
|
||||
if has_systemd() and "dovecot.service" in host.get_fact(SystemdEnabled):
|
||||
return # already installed and running
|
||||
_install_dovecot_package("core", arch)
|
||||
_install_dovecot_package("imapd", arch)
|
||||
_install_dovecot_package("lmtpd", arch)
|
||||
with blocked_service_startup():
|
||||
debs = []
|
||||
for pkg in ("core", "imapd", "lmtpd"):
|
||||
deb, changed = _download_dovecot_package(pkg, arch)
|
||||
self.need_restart |= changed
|
||||
if deb:
|
||||
debs.append(deb)
|
||||
if debs:
|
||||
deb_list = " ".join(debs)
|
||||
# First dpkg may fail on missing dependencies (stderr suppressed);
|
||||
# apt-get --fix-broken pulls them in, then dpkg retries cleanly.
|
||||
server.shell(
|
||||
name="Install dovecot packages",
|
||||
commands=[
|
||||
f"dpkg --force-confdef --force-confold -i {deb_list} 2> /dev/null || true",
|
||||
"DEBIAN_FRONTEND=noninteractive apt-get -y --fix-broken install",
|
||||
f"dpkg --force-confdef --force-confold -i {deb_list}",
|
||||
],
|
||||
)
|
||||
self.need_restart = True
|
||||
self.put_file(
|
||||
src=io.StringIO(
|
||||
"Package: dovecot-*\n"
|
||||
"Pin: version *\n"
|
||||
"Pin-Priority: -1\n"
|
||||
),
|
||||
dest="/etc/apt/preferences.d/pin-dovecot",
|
||||
)
|
||||
|
||||
def configure(self):
|
||||
configure_remote_units(self.config.mail_domain, self.units)
|
||||
self.need_restart, self.daemon_reload = _configure_dovecot(self.config)
|
||||
configure_remote_units(self, self.config.mail_domain, self.units)
|
||||
_configure_dovecot(self, self.config)
|
||||
|
||||
def activate(self):
|
||||
activate_remote_units(self.units)
|
||||
activate_remote_units(self, self.units)
|
||||
|
||||
restart = False if self.disable_mail else self.need_restart
|
||||
# Detect stale binary: package installed but service still runs old (deleted) binary.
|
||||
if not self.disable_mail and not self.need_restart:
|
||||
stale = host.get_fact(
|
||||
Command,
|
||||
'pid=$(systemctl show -p MainPID --value dovecot.service 2>/dev/null);'
|
||||
' [ "${pid:-0}" != "0" ] && readlink "/proc/$pid/exe" 2>/dev/null | grep -q "(deleted)"'
|
||||
" && echo STALE || true",
|
||||
)
|
||||
if stale == "STALE":
|
||||
self.need_restart = True
|
||||
|
||||
systemd.service(
|
||||
name="Disable dovecot for now" if self.disable_mail else "Start and enable Dovecot",
|
||||
service="dovecot.service",
|
||||
running=False if self.disable_mail else True,
|
||||
enabled=False if self.disable_mail else True,
|
||||
restarted=restart,
|
||||
daemon_reload=self.daemon_reload,
|
||||
active = not self.disable_mail
|
||||
self.ensure_service(
|
||||
"dovecot.service",
|
||||
running=active,
|
||||
enabled=active,
|
||||
)
|
||||
self.need_restart = False
|
||||
|
||||
|
||||
def _install_dovecot_package(package: str, arch: str):
|
||||
def _pick_url(primary, fallback):
|
||||
try:
|
||||
req = urllib.request.Request(primary, method="HEAD")
|
||||
urllib.request.urlopen(req, timeout=10)
|
||||
return primary
|
||||
except Exception:
|
||||
return fallback
|
||||
|
||||
|
||||
def _download_dovecot_package(package: str, arch: str) -> tuple[str | None, bool]:
|
||||
"""Download a dovecot .deb if needed, return (path, changed)."""
|
||||
arch = "amd64" if arch == "x86_64" else arch
|
||||
arch = "arm64" if arch == "aarch64" else arch
|
||||
url = f"https://download.delta.chat/dovecot/dovecot-{package}_2.3.21%2Bdfsg1-3_{arch}.deb"
|
||||
deb_filename = "/root/" + url.split("/")[-1]
|
||||
|
||||
match (package, arch):
|
||||
case ("core", "amd64"):
|
||||
sha256 = "dd060706f52a306fa863d874717210b9fe10536c824afe1790eec247ded5b27d"
|
||||
case ("core", "arm64"):
|
||||
sha256 = "e7548e8a82929722e973629ecc40fcfa886894cef3db88f23535149e7f730dc9"
|
||||
case ("imapd", "amd64"):
|
||||
sha256 = "8d8dc6fc00bbb6cdb25d345844f41ce2f1c53f764b79a838eb2a03103eebfa86"
|
||||
case ("imapd", "arm64"):
|
||||
sha256 = "178fa877ddd5df9930e8308b518f4b07df10e759050725f8217a0c1fb3fd707f"
|
||||
case ("lmtpd", "amd64"):
|
||||
sha256 = "2f69ba5e35363de50962d42cccbfe4ed8495265044e244007d7ccddad77513ab"
|
||||
case ("lmtpd", "arm64"):
|
||||
sha256 = "89f52fb36524f5877a177dff4a713ba771fd3f91f22ed0af7238d495e143b38f"
|
||||
case _:
|
||||
apt.packages(packages=[f"dovecot-{package}"])
|
||||
return
|
||||
pkg_name = f"dovecot-{package}"
|
||||
sha256 = DOVECOT_SHA256.get((package, arch))
|
||||
if sha256 is None:
|
||||
op = apt.packages(packages=[pkg_name])
|
||||
return None, bool(getattr(op, "changed", False))
|
||||
|
||||
installed_versions = host.get_fact(DebPackages).get(pkg_name, [])
|
||||
if DOVECOT_PACKAGE_VERSION in installed_versions:
|
||||
return None, False
|
||||
|
||||
url_version = DOVECOT_ARCHIVE_VERSION.replace("+", "%2B")
|
||||
deb_base = f"{pkg_name}_{url_version}_{arch}.deb"
|
||||
primary_url = f"https://download.delta.chat/dovecot/{deb_base}"
|
||||
fallback_url = f"https://github.com/chatmail/dovecot/releases/download/upstream%2F{url_version}/{deb_base}"
|
||||
url = _pick_url(primary_url, fallback_url)
|
||||
deb_filename = f"/root/{deb_base}"
|
||||
|
||||
files.download(
|
||||
name=f"Download dovecot-{package}",
|
||||
name=f"Download {pkg_name}",
|
||||
src=url,
|
||||
dest=deb_filename,
|
||||
sha256sum=sha256,
|
||||
cache_time=60 * 60 * 24 * 365 * 10, # never redownload the package
|
||||
)
|
||||
|
||||
apt.deb(name=f"Install dovecot-{package}", src=deb_filename)
|
||||
return deb_filename, True
|
||||
|
||||
|
||||
def _configure_dovecot(config: Config, debug: bool = False) -> (bool, bool):
|
||||
def _configure_dovecot(deployer, config: Config, debug: bool = False):
|
||||
"""Configures Dovecot IMAP server."""
|
||||
need_restart = False
|
||||
daemon_reload = False
|
||||
|
||||
main_config = files.template(
|
||||
src=get_resource("dovecot/dovecot.conf.j2"),
|
||||
dest="/etc/dovecot/dovecot.conf",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
deployer.put_template(
|
||||
"dovecot/dovecot.conf.j2",
|
||||
"/etc/dovecot/dovecot.conf",
|
||||
config=config,
|
||||
debug=debug,
|
||||
disable_ipv6=config.disable_ipv6,
|
||||
)
|
||||
need_restart |= main_config.changed
|
||||
auth_config = files.put(
|
||||
src=get_resource("dovecot/auth.conf"),
|
||||
dest="/etc/dovecot/auth.conf",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
deployer.put_file("dovecot/auth.conf", "/etc/dovecot/auth.conf")
|
||||
deployer.put_file(
|
||||
"dovecot/push_notification.lua", "/etc/dovecot/push_notification.lua"
|
||||
)
|
||||
need_restart |= auth_config.changed
|
||||
lua_push_notification_script = files.put(
|
||||
src=get_resource("dovecot/push_notification.lua"),
|
||||
dest="/etc/dovecot/push_notification.lua",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
need_restart |= lua_push_notification_script.changed
|
||||
|
||||
# as per https://doc.dovecot.org/2.3/configuration_manual/os/
|
||||
# it is recommended to set the following inotify limits
|
||||
if config.change_kernel_settings:
|
||||
for name in ("max_user_instances", "max_user_watches"):
|
||||
key = f"fs.inotify.{name}"
|
||||
if host.get_fact(Sysctl)[key] > 65535:
|
||||
# Skip updating limits if already sufficient
|
||||
# (enables running in incus containers where sysctl readonly)
|
||||
continue
|
||||
server.sysctl(
|
||||
name=f"Change {key}",
|
||||
key=key,
|
||||
value=65535,
|
||||
persist=True,
|
||||
can_modify = not is_in_container()
|
||||
for name in ("max_user_instances", "max_user_watches"):
|
||||
key = f"fs.inotify.{name}"
|
||||
value = host.get_fact(Sysctl).get(key, 0)
|
||||
if value > 65534:
|
||||
continue
|
||||
if not can_modify:
|
||||
print(
|
||||
"\n!!!! refusing to attempt sysctl setting in containers\n"
|
||||
f"!!!! dovecot: sysctl {key!r}={value}, should be >65534 for production setups\n"
|
||||
"!!!!"
|
||||
)
|
||||
continue
|
||||
server.sysctl(
|
||||
name=f"Change {key}",
|
||||
key=key,
|
||||
value=65535,
|
||||
persist=True,
|
||||
)
|
||||
|
||||
timezone_env = files.line(
|
||||
deployer.ensure_line(
|
||||
name="Set TZ environment variable",
|
||||
path="/etc/environment",
|
||||
line="TZ=:/etc/localtime",
|
||||
)
|
||||
need_restart |= timezone_env.changed
|
||||
|
||||
restart_conf = files.put(
|
||||
name="dovecot: restart automatically on failure",
|
||||
src=get_resource("service/10_restart.conf"),
|
||||
dest="/etc/systemd/system/dovecot.service.d/10_restart.conf",
|
||||
deployer.put_file(
|
||||
"service/10_restart_on_failure.conf",
|
||||
"/etc/systemd/system/dovecot.service.d/10_restart.conf",
|
||||
)
|
||||
daemon_reload |= restart_conf.changed
|
||||
|
||||
# Validate dovecot configuration before restart
|
||||
if need_restart:
|
||||
if deployer.need_restart:
|
||||
server.shell(
|
||||
name="Validate dovecot configuration",
|
||||
commands=["doveconf -n >/dev/null"],
|
||||
)
|
||||
|
||||
return need_restart, daemon_reload
|
||||
|
||||
@@ -133,6 +133,11 @@ protocol lmtp {
|
||||
# mail_lua and push_notification_lua are needed for Lua push notification handler.
|
||||
# <https://doc.dovecot.org/2.3/configuration_manual/push_notification/#configuration>
|
||||
mail_plugins = $mail_plugins mail_lua notify push_notification push_notification_lua
|
||||
|
||||
# Disable fsync for LMTP. May lose delivered message,
|
||||
# but unlikely to cause problems with multiple relays.
|
||||
# https://doc.dovecot.org/2.3/admin_manual/mailbox_formats/#fsyncing
|
||||
mail_fsync = never
|
||||
}
|
||||
|
||||
plugin {
|
||||
@@ -144,12 +149,26 @@ plugin {
|
||||
}
|
||||
|
||||
plugin {
|
||||
# for now we define static quota-rules for all users
|
||||
quota = maildir:User quota
|
||||
quota_rule = *:storage={{ config.max_mailbox_size }}
|
||||
quota_max_mail_size={{ config.max_message_size }}
|
||||
quota_grace = 0
|
||||
# quota_over_flag_value = TRUE
|
||||
|
||||
quota_rule = *:storage={{ config.max_mailbox_size_mb }}M
|
||||
|
||||
# Trigger at 75%% of quota, expire oldest messages down to 70%%.
|
||||
# The percentages are chosen to prevent current Delta Chat users
|
||||
# from seeing "quota warnings" which trigger at 80% and 95%.
|
||||
|
||||
quota_warning = storage=75%% quota-warning {{ config.max_mailbox_size_mb * 70 // 100 }} {{ config.mailboxes_dir }}/%u
|
||||
}
|
||||
|
||||
service quota-warning {
|
||||
executable = script /usr/local/lib/chatmaild/venv/bin/chatmail-quota-expire
|
||||
user = vmail
|
||||
unix_listener quota-warning {
|
||||
user = vmail
|
||||
mode = 0600
|
||||
}
|
||||
}
|
||||
|
||||
# push_notification configuration
|
||||
@@ -228,8 +247,8 @@ service anvil {
|
||||
}
|
||||
|
||||
ssl = required
|
||||
ssl_cert = </var/lib/acme/live/{{ config.mail_domain }}/fullchain
|
||||
ssl_key = </var/lib/acme/live/{{ config.mail_domain }}/privkey
|
||||
ssl_cert = <{{ config.tls_cert_path }}
|
||||
ssl_key = <{{ config.tls_key_path }}
|
||||
ssl_dh = </usr/share/dovecot/dh.pem
|
||||
ssl_min_protocol = TLSv1.3
|
||||
ssl_prefer_server_ciphers = yes
|
||||
@@ -252,6 +271,9 @@ protocol imap {
|
||||
# sort -sn <(sed 's/ / C: /' *.in) <(sed 's/ / S: /' cat *.out)
|
||||
|
||||
rawlog_dir = %h
|
||||
|
||||
# Disable fsync for IMAP. May lose IMAP changes like setting flags.
|
||||
mail_fsync = never
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
|
||||
44
cmdeploy/src/cmdeploy/external/deployer.py
vendored
Normal file
44
cmdeploy/src/cmdeploy/external/deployer.py
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
|
||||
from pyinfra import host
|
||||
from pyinfra.facts.files import File
|
||||
|
||||
from ..basedeploy import Deployer
|
||||
|
||||
|
||||
class ExternalTlsDeployer(Deployer):
|
||||
"""Expects TLS certificates to be managed on the server.
|
||||
|
||||
Validates that the configured certificate and key files
|
||||
exist on the remote host. Installs a systemd path unit
|
||||
that watches the certificate file and automatically
|
||||
restarts/reloads affected services when it changes.
|
||||
"""
|
||||
|
||||
def __init__(self, cert_path, key_path):
|
||||
self.cert_path = cert_path
|
||||
self.key_path = key_path
|
||||
|
||||
def configure(self):
|
||||
# Verify cert and key exist on the remote host using pyinfra facts.
|
||||
for path in (self.cert_path, self.key_path):
|
||||
if host.get_fact(File, path=path) is None:
|
||||
raise Exception(f"External TLS file not found on server: {path}")
|
||||
|
||||
self.ensure_systemd_unit(
|
||||
"external/tls-cert-reload.path.j2",
|
||||
cert_path=self.cert_path,
|
||||
)
|
||||
self.ensure_systemd_unit(
|
||||
"external/tls-cert-reload.service",
|
||||
)
|
||||
|
||||
def activate(self):
|
||||
# No explicit reload needed here: dovecot/nginx read the cert
|
||||
# on startup, and the .path watcher handles live changes.
|
||||
self.ensure_service(
|
||||
"tls-cert-reload.path",
|
||||
running=True,
|
||||
enabled=True,
|
||||
)
|
||||
|
||||
|
||||
15
cmdeploy/src/cmdeploy/external/tls-cert-reload.path.j2
vendored
Normal file
15
cmdeploy/src/cmdeploy/external/tls-cert-reload.path.j2
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# Watch the TLS certificate file for changes.
|
||||
# When the cert is updated (e.g. renewed by an external process),
|
||||
# this triggers tls-cert-reload.service to reload the affected services.
|
||||
#
|
||||
# NOTE: changes to the certificates are not detected if they cross bind-mount boundaries.
|
||||
# After cert renewal, you must then trigger the reload explicitly:
|
||||
# systemctl start tls-cert-reload.service
|
||||
[Unit]
|
||||
Description=Watch TLS certificate for changes
|
||||
|
||||
[Path]
|
||||
PathChanged={{ cert_path }}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
15
cmdeploy/src/cmdeploy/external/tls-cert-reload.service
vendored
Normal file
15
cmdeploy/src/cmdeploy/external/tls-cert-reload.service
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# Reload services that cache the TLS certificate.
|
||||
#
|
||||
# dovecot: caches the cert at startup; reload re-reads SSL certs
|
||||
# without dropping existing connections.
|
||||
# nginx: caches the cert at startup; reload gracefully picks up
|
||||
# the new cert for new connections.
|
||||
# postfix: reads the cert fresh on each TLS handshake,
|
||||
# does NOT need a reload/restart.
|
||||
[Unit]
|
||||
Description=Reload TLS services after certificate change
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/bin/systemctl try-reload-or-restart dovecot
|
||||
ExecStart=/bin/systemctl try-reload-or-restart nginx
|
||||
@@ -1,52 +1,40 @@
|
||||
from pyinfra import facts, host
|
||||
from pyinfra.operations import files, systemd
|
||||
import os
|
||||
|
||||
from cmdeploy.basedeploy import Deployer, get_resource
|
||||
from pyinfra import facts, host
|
||||
|
||||
from cmdeploy.basedeploy import Deployer
|
||||
|
||||
|
||||
class FiltermailDeployer(Deployer):
|
||||
services = ["filtermail", "filtermail-incoming"]
|
||||
services = ["filtermail", "filtermail-incoming", "filtermail-transport"]
|
||||
bin_path = "/usr/local/bin/filtermail"
|
||||
config_path = "/usr/local/lib/chatmaild/chatmail.ini"
|
||||
|
||||
def __init__(self):
|
||||
self.need_restart = False
|
||||
|
||||
def install(self):
|
||||
local_bin = os.environ.get("CHATMAIL_FILTERMAIL_BINARY")
|
||||
if local_bin:
|
||||
self.put_executable(
|
||||
src=local_bin,
|
||||
dest=self.bin_path,
|
||||
)
|
||||
return
|
||||
|
||||
arch = host.get_fact(facts.server.Arch)
|
||||
url = f"https://github.com/chatmail/filtermail/releases/download/v0.3.0/filtermail-{arch}"
|
||||
url = f"https://github.com/chatmail/filtermail/releases/download/v0.6.4/filtermail-{arch}"
|
||||
sha256sum = {
|
||||
"x86_64": "f14a31323ae2dad3b59d3fdafcde507521da2f951a9478cd1f2fe2b4463df71d",
|
||||
"aarch64": "933770d75046c4fd7084ce8d43f905f8748333426ad839154f0fc654755ef09f",
|
||||
"x86_64": "5295115952c72e4c4ec3c85546e094b4155a4c702c82bd71fcdcb744dc73adf6",
|
||||
"aarch64": "6892244f17b8f26ccb465766e96028e7222b3c8adefca9fc6bfe9ff332ca8dff",
|
||||
}[arch]
|
||||
self.need_restart |= files.download(
|
||||
name="Download filtermail",
|
||||
src=url,
|
||||
sha256sum=sha256sum,
|
||||
dest=self.bin_path,
|
||||
mode="755",
|
||||
).changed
|
||||
self.download_executable(url, self.bin_path, sha256sum)
|
||||
|
||||
def configure(self):
|
||||
for service in self.services:
|
||||
self.need_restart |= files.template(
|
||||
src=get_resource(f"filtermail/{service}.service.j2"),
|
||||
dest=f"/etc/systemd/system/{service}.service",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
self.ensure_systemd_unit(
|
||||
f"filtermail/{service}.service.j2",
|
||||
bin_path=self.bin_path,
|
||||
config_path=self.config_path,
|
||||
).changed
|
||||
)
|
||||
|
||||
def activate(self):
|
||||
for service in self.services:
|
||||
systemd.service(
|
||||
name=f"Start and enable {service}",
|
||||
service=f"{service}.service",
|
||||
running=True,
|
||||
enabled=True,
|
||||
restarted=self.need_restart,
|
||||
daemon_reload=True,
|
||||
)
|
||||
self.need_restart = False
|
||||
self.ensure_service(f"{service}.service")
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
[Unit]
|
||||
Description=Chatmail transport service
|
||||
|
||||
[Service]
|
||||
ExecStart={{ bin_path }} {{ config_path }} transport
|
||||
Restart=always
|
||||
RestartSec=30
|
||||
User=vmail
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1 +0,0 @@
|
||||
*/5 * * * * root {{ config.execpath }} {{ config.mailboxes_dir }} >/var/www/html/metrics
|
||||
@@ -78,3 +78,11 @@ counter rejected_unencrypted_mail_count
|
||||
/Rejected unencrypted mail/ {
|
||||
rejected_unencrypted_mail_count++
|
||||
}
|
||||
|
||||
counter quota_expire_runs
|
||||
counter quota_expire_removed_files
|
||||
|
||||
/quota-expire: removed (?P<count>\d+) message\(s\)/ {
|
||||
quota_expire_runs++
|
||||
quota_expire_removed_files += $count
|
||||
}
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
from pyinfra import facts, host
|
||||
from pyinfra.operations import apt, files, server, systemd
|
||||
from pyinfra.operations import apt
|
||||
|
||||
from cmdeploy.basedeploy import (
|
||||
Deployer,
|
||||
get_resource,
|
||||
)
|
||||
from cmdeploy.basedeploy import Deployer
|
||||
|
||||
|
||||
class MtailDeployer(Deployer):
|
||||
@@ -18,51 +15,30 @@ class MtailDeployer(Deployer):
|
||||
(url, sha256sum) = {
|
||||
"x86_64": (
|
||||
"https://github.com/google/mtail/releases/download/v3.0.8/mtail_3.0.8_linux_amd64.tar.gz",
|
||||
"123c2ee5f48c3eff12ebccee38befd2233d715da736000ccde49e3d5607724e4",
|
||||
"d55cb601049c5e61eabab29998dbbcea95d480e5448544f9470337ba2eea882e",
|
||||
),
|
||||
"aarch64": (
|
||||
"https://github.com/google/mtail/releases/download/v3.0.8/mtail_3.0.8_linux_arm64.tar.gz",
|
||||
"aa04811c0929b6754408676de520e050c45dddeb3401881888a092c9aea89cae",
|
||||
"f748db8ad2a1e0b63684d4c8868cf6a373a20f7e6922e5ece601fff0ee00eb1a",
|
||||
),
|
||||
}[host.get_fact(facts.server.Arch)]
|
||||
|
||||
server.shell(
|
||||
name="Download mtail",
|
||||
commands=[
|
||||
f"(echo '{sha256sum} /usr/local/bin/mtail' | sha256sum -c) || (curl -L {url} | gunzip | tar -x -f - mtail -O >/usr/local/bin/mtail.new && mv /usr/local/bin/mtail.new /usr/local/bin/mtail)",
|
||||
"chmod 755 /usr/local/bin/mtail",
|
||||
],
|
||||
self.download_executable(
|
||||
url,
|
||||
"/usr/local/bin/mtail",
|
||||
sha256sum,
|
||||
extract="gunzip | tar -xf - mtail -O",
|
||||
)
|
||||
|
||||
def configure(self):
|
||||
# Using our own systemd unit instead of `/usr/lib/systemd/system/mtail.service`.
|
||||
# This allows to read from journalctl instead of log files.
|
||||
files.template(
|
||||
src=get_resource("mtail/mtail.service.j2"),
|
||||
dest="/etc/systemd/system/mtail.service",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
self.ensure_systemd_unit(
|
||||
"mtail/mtail.service.j2",
|
||||
address=self.mtail_address or "127.0.0.1",
|
||||
port=3903,
|
||||
)
|
||||
|
||||
mtail_conf = files.put(
|
||||
name="Mtail configuration",
|
||||
src=get_resource("mtail/delivered_mail.mtail"),
|
||||
dest="/etc/mtail/delivered_mail.mtail",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
self.need_restart = mtail_conf.changed
|
||||
self.put_file("mtail/delivered_mail.mtail", "/etc/mtail/delivered_mail.mtail")
|
||||
|
||||
def activate(self):
|
||||
systemd.service(
|
||||
name="Start and enable mtail",
|
||||
service="mtail.service",
|
||||
running=bool(self.mtail_address),
|
||||
enabled=bool(self.mtail_address),
|
||||
restarted=self.need_restart,
|
||||
)
|
||||
self.need_restart = False
|
||||
active = bool(self.mtail_address)
|
||||
self.ensure_service("mtail.service", running=active, enabled=active)
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
[Unit]
|
||||
Description=mtail
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/bin/sh -c "journalctl -f -o short-iso -n 0 | /usr/local/bin/mtail --address={{ address }} --port={{ port }} --progs /etc/mtail --logtostderr --logs -"
|
||||
Restart=on-failure
|
||||
RestartSec=2s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
@@ -1,47 +1,47 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<clientConfig version="1.1">
|
||||
<emailProvider id="{{ config.domain_name }}">
|
||||
<domain>{{ config.domain_name }}</domain>
|
||||
<displayName>{{ config.domain_name }} chatmail</displayName>
|
||||
<displayShortName>{{ config.domain_name }}</displayShortName>
|
||||
<emailProvider id="{{ config.mail_domain }}">
|
||||
<domain>{{ config.mail_domain }}</domain>
|
||||
<displayName>{{ config.mail_domain }} chatmail</displayName>
|
||||
<displayShortName>{{ config.mail_domain }}</displayShortName>
|
||||
<incomingServer type="imap">
|
||||
<hostname>{{ config.domain_name }}</hostname>
|
||||
<hostname>{{ config.mail_domain }}</hostname>
|
||||
<port>993</port>
|
||||
<socketType>SSL</socketType>
|
||||
<authentication>password-cleartext</authentication>
|
||||
<username>%EMAILADDRESS%</username>
|
||||
</incomingServer>
|
||||
<incomingServer type="imap">
|
||||
<hostname>{{ config.domain_name }}</hostname>
|
||||
<hostname>{{ config.mail_domain }}</hostname>
|
||||
<port>143</port>
|
||||
<socketType>STARTTLS</socketType>
|
||||
<authentication>password-cleartext</authentication>
|
||||
<username>%EMAILADDRESS%</username>
|
||||
</incomingServer>
|
||||
<incomingServer type="imap">
|
||||
<hostname>{{ config.domain_name }}</hostname>
|
||||
<hostname>{{ config.mail_domain }}</hostname>
|
||||
<port>443</port>
|
||||
<socketType>SSL</socketType>
|
||||
<authentication>password-cleartext</authentication>
|
||||
<username>%EMAILADDRESS%</username>
|
||||
</incomingServer>
|
||||
<outgoingServer type="smtp">
|
||||
<hostname>{{ config.domain_name }}</hostname>
|
||||
<hostname>{{ config.mail_domain }}</hostname>
|
||||
<port>465</port>
|
||||
<socketType>SSL</socketType>
|
||||
<authentication>password-cleartext</authentication>
|
||||
<username>%EMAILADDRESS%</username>
|
||||
</outgoingServer>
|
||||
<outgoingServer type="smtp">
|
||||
<hostname>{{ config.domain_name }}</hostname>
|
||||
<hostname>{{ config.mail_domain }}</hostname>
|
||||
<port>587</port>
|
||||
<socketType>STARTTLS</socketType>
|
||||
<authentication>password-cleartext</authentication>
|
||||
<username>%EMAILADDRESS%</username>
|
||||
</outgoingServer>
|
||||
<outgoingServer type="smtp">
|
||||
<hostname>{{ config.domain_name }}</hostname>
|
||||
<hostname>{{ config.mail_domain }}</hostname>
|
||||
<port>443</port>
|
||||
<socketType>SSL</socketType>
|
||||
<authentication>password-cleartext</authentication>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from chatmaild.config import Config
|
||||
from pyinfra.operations import apt, files, systemd
|
||||
from pyinfra.operations import apt
|
||||
|
||||
from cmdeploy.basedeploy import (
|
||||
Deployer,
|
||||
@@ -31,87 +31,50 @@ class NginxDeployer(Deployer):
|
||||
# For documentation about policy-rc.d, see:
|
||||
# https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt
|
||||
#
|
||||
files.put(
|
||||
src=get_resource("policy-rc.d"),
|
||||
dest="/usr/sbin/policy-rc.d",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="755",
|
||||
)
|
||||
self.put_executable(src="policy-rc.d", dest="/usr/sbin/policy-rc.d")
|
||||
|
||||
apt.packages(
|
||||
name="Install nginx",
|
||||
packages=["nginx", "libnginx-mod-stream"],
|
||||
)
|
||||
|
||||
files.file("/usr/sbin/policy-rc.d", present=False)
|
||||
self.remove_file("/usr/sbin/policy-rc.d")
|
||||
|
||||
def configure(self):
|
||||
self.need_restart = _configure_nginx(self.config)
|
||||
_configure_nginx(self, self.config)
|
||||
|
||||
def activate(self):
|
||||
systemd.service(
|
||||
name="Start and enable nginx",
|
||||
service="nginx.service",
|
||||
running=True,
|
||||
enabled=True,
|
||||
restarted=self.need_restart,
|
||||
)
|
||||
self.need_restart = False
|
||||
self.ensure_service("nginx.service")
|
||||
|
||||
|
||||
def _configure_nginx(config: Config, debug: bool = False) -> bool:
|
||||
def _configure_nginx(deployer, config: Config, debug: bool = False):
|
||||
"""Configures nginx HTTP server."""
|
||||
need_restart = False
|
||||
|
||||
main_config = files.template(
|
||||
src=get_resource("nginx/nginx.conf.j2"),
|
||||
dest="/etc/nginx/nginx.conf",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
config={"domain_name": config.mail_domain},
|
||||
deployer.put_template(
|
||||
"nginx/nginx.conf.j2",
|
||||
"/etc/nginx/nginx.conf",
|
||||
config=config,
|
||||
disable_ipv6=config.disable_ipv6,
|
||||
)
|
||||
need_restart |= main_config.changed
|
||||
|
||||
autoconfig = files.template(
|
||||
src=get_resource("nginx/autoconfig.xml.j2"),
|
||||
dest="/var/www/html/.well-known/autoconfig/mail/config-v1.1.xml",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
config={"domain_name": config.mail_domain},
|
||||
deployer.put_template(
|
||||
"nginx/autoconfig.xml.j2",
|
||||
"/var/www/html/.well-known/autoconfig/mail/config-v1.1.xml",
|
||||
config=config,
|
||||
)
|
||||
need_restart |= autoconfig.changed
|
||||
|
||||
mta_sts_config = files.template(
|
||||
src=get_resource("nginx/mta-sts.txt.j2"),
|
||||
dest="/var/www/html/.well-known/mta-sts.txt",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
config={"domain_name": config.mail_domain},
|
||||
deployer.put_template(
|
||||
"nginx/mta-sts.txt.j2",
|
||||
"/var/www/html/.well-known/mta-sts.txt",
|
||||
config=config,
|
||||
)
|
||||
need_restart |= mta_sts_config.changed
|
||||
|
||||
# install CGI newemail script
|
||||
#
|
||||
cgi_dir = "/usr/lib/cgi-bin"
|
||||
files.directory(
|
||||
name=f"Ensure {cgi_dir} exists",
|
||||
path=cgi_dir,
|
||||
user="root",
|
||||
group="root",
|
||||
)
|
||||
deployer.ensure_directory(cgi_dir)
|
||||
|
||||
files.put(
|
||||
name="Upload cgi newemail.py script",
|
||||
deployer.put_executable(
|
||||
src=get_resource("newemail.py", pkg="chatmaild").open("rb"),
|
||||
dest=f"{cgi_dir}/newemail.py",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="755",
|
||||
)
|
||||
|
||||
return need_restart
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
version: STSv1
|
||||
mode: enforce
|
||||
mx: {{ config.domain_name }}
|
||||
mx: {{ config.mail_domain }}
|
||||
max_age: 2419200
|
||||
|
||||
@@ -42,6 +42,9 @@ stream {
|
||||
}
|
||||
|
||||
http {
|
||||
{% if config.tls_cert_mode == "self" %}
|
||||
limit_req_zone $binary_remote_addr zone=newaccount:10m rate=2r/s;
|
||||
{% endif %}
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
|
||||
@@ -51,10 +54,10 @@ http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_certificate /var/lib/acme/live/{{ config.domain_name }}/fullchain;
|
||||
ssl_certificate_key /var/lib/acme/live/{{ config.domain_name }}/privkey;
|
||||
ssl_certificate {{ config.tls_cert_path }};
|
||||
ssl_certificate_key {{ config.tls_key_path }};
|
||||
|
||||
gzip on;
|
||||
|
||||
@@ -66,26 +69,30 @@ http {
|
||||
|
||||
index index.html index.htm;
|
||||
|
||||
server_name {{ config.domain_name }} www.{{ config.domain_name }} mta-sts.{{ config.domain_name }};
|
||||
server_name {{ config.mail_domain }} mta-sts.{{ config.mail_domain }};
|
||||
|
||||
access_log syslog:server=unix:/dev/log,facility=local7;
|
||||
|
||||
location /mxdeliv {
|
||||
proxy_pass http://127.0.0.1:{{ config.filtermail_http_port_incoming }};
|
||||
}
|
||||
|
||||
location / {
|
||||
# First attempt to serve request as file, then
|
||||
# as directory, then fall back to displaying a 404.
|
||||
try_files $uri $uri/ =404;
|
||||
}
|
||||
|
||||
location /metrics {
|
||||
default_type text/plain;
|
||||
}
|
||||
|
||||
location /new {
|
||||
{% if config.tls_cert_mode != "self" %}
|
||||
if ($request_method = GET) {
|
||||
# Redirect to Delta Chat,
|
||||
# which will in turn do a POST request.
|
||||
return 301 dcaccount:https://{{ config.domain_name }}/new;
|
||||
return 301 dcaccount:https://{{ config.mail_domain }}/new;
|
||||
}
|
||||
{% else %}
|
||||
limit_req zone=newaccount burst=5 nodelay;
|
||||
{% endif %}
|
||||
|
||||
fastcgi_pass unix:/run/fcgiwrap.socket;
|
||||
include /etc/nginx/fastcgi_params;
|
||||
@@ -99,9 +106,11 @@ http {
|
||||
#
|
||||
# Redirects are only for browsers.
|
||||
location /cgi-bin/newemail.py {
|
||||
{% if config.tls_cert_mode != "self" %}
|
||||
if ($request_method = GET) {
|
||||
return 301 dcaccount:https://{{ config.domain_name }}/new;
|
||||
return 301 dcaccount:https://{{ config.mail_domain }}/new;
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
fastcgi_pass unix:/run/fcgiwrap.socket;
|
||||
include /etc/nginx/fastcgi_params;
|
||||
@@ -132,8 +141,29 @@ http {
|
||||
# Redirect www. to non-www
|
||||
server {
|
||||
listen 127.0.0.1:8443 ssl;
|
||||
server_name www.{{ config.domain_name }};
|
||||
return 301 $scheme://{{ config.domain_name }}$request_uri;
|
||||
server_name www.{{ config.mail_domain }};
|
||||
return 301 $scheme://{{ config.mail_domain }}$request_uri;
|
||||
access_log syslog:server=unix:/dev/log,facility=local7;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
{% if not disable_ipv6 %}
|
||||
listen [::]:80;
|
||||
{% endif %}
|
||||
|
||||
{% if config.tls_cert_mode == "acme" %}
|
||||
location /.well-known/acme-challenge/ {
|
||||
proxy_pass http://acmetool;
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
{% if config.tls_cert_mode == "acme" %}
|
||||
upstream acmetool {
|
||||
server 127.0.0.1:402;
|
||||
}
|
||||
{% endif %}
|
||||
}
|
||||
|
||||
@@ -4,9 +4,9 @@ Installs OpenDKIM
|
||||
|
||||
from pyinfra import host
|
||||
from pyinfra.facts.files import File
|
||||
from pyinfra.operations import apt, files, server, systemd
|
||||
from pyinfra.operations import apt, files, server
|
||||
|
||||
from cmdeploy.basedeploy import Deployer, get_resource
|
||||
from cmdeploy.basedeploy import Deployer
|
||||
|
||||
|
||||
class OpendkimDeployer(Deployer):
|
||||
@@ -25,71 +25,39 @@ class OpendkimDeployer(Deployer):
|
||||
domain = self.mail_domain
|
||||
dkim_selector = "opendkim"
|
||||
"""Configures OpenDKIM"""
|
||||
need_restart = False
|
||||
|
||||
main_config = files.template(
|
||||
src=get_resource("opendkim/opendkim.conf"),
|
||||
dest="/etc/opendkim.conf",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
self.put_template(
|
||||
"opendkim/opendkim.conf",
|
||||
"/etc/opendkim.conf",
|
||||
config={"domain_name": domain, "opendkim_selector": dkim_selector},
|
||||
)
|
||||
need_restart |= main_config.changed
|
||||
|
||||
screen_script = files.put(
|
||||
src=get_resource("opendkim/screen.lua"),
|
||||
dest="/etc/opendkim/screen.lua",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
need_restart |= screen_script.changed
|
||||
self.remove_file("/etc/opendkim/screen.lua")
|
||||
self.remove_file("/etc/opendkim/final.lua")
|
||||
|
||||
final_script = files.put(
|
||||
src=get_resource("opendkim/final.lua"),
|
||||
dest="/etc/opendkim/final.lua",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
need_restart |= final_script.changed
|
||||
|
||||
files.directory(
|
||||
name="Add opendkim directory to /etc",
|
||||
path="/etc/opendkim",
|
||||
user="opendkim",
|
||||
group="opendkim",
|
||||
self.ensure_directory(
|
||||
"/etc/opendkim",
|
||||
owner="opendkim",
|
||||
mode="750",
|
||||
present=True,
|
||||
)
|
||||
|
||||
keytable = files.template(
|
||||
src=get_resource("opendkim/KeyTable"),
|
||||
dest="/etc/dkimkeys/KeyTable",
|
||||
user="opendkim",
|
||||
group="opendkim",
|
||||
mode="644",
|
||||
self.put_template(
|
||||
"opendkim/KeyTable",
|
||||
"/etc/dkimkeys/KeyTable",
|
||||
owner="opendkim",
|
||||
config={"domain_name": domain, "opendkim_selector": dkim_selector},
|
||||
)
|
||||
need_restart |= keytable.changed
|
||||
|
||||
signing_table = files.template(
|
||||
src=get_resource("opendkim/SigningTable"),
|
||||
dest="/etc/dkimkeys/SigningTable",
|
||||
user="opendkim",
|
||||
group="opendkim",
|
||||
mode="644",
|
||||
self.put_template(
|
||||
"opendkim/SigningTable",
|
||||
"/etc/dkimkeys/SigningTable",
|
||||
owner="opendkim",
|
||||
config={"domain_name": domain, "opendkim_selector": dkim_selector},
|
||||
)
|
||||
need_restart |= signing_table.changed
|
||||
files.directory(
|
||||
name="Add opendkim socket directory to /var/spool/postfix",
|
||||
path="/var/spool/postfix/opendkim",
|
||||
user="opendkim",
|
||||
group="opendkim",
|
||||
self.ensure_directory(
|
||||
"/var/spool/postfix/opendkim",
|
||||
owner="opendkim",
|
||||
mode="750",
|
||||
present=True,
|
||||
)
|
||||
|
||||
if not host.get_fact(File, f"/etc/dkimkeys/{dkim_selector}.private"):
|
||||
@@ -102,22 +70,17 @@ class OpendkimDeployer(Deployer):
|
||||
_su_user="opendkim",
|
||||
)
|
||||
|
||||
service_file = files.put(
|
||||
name="Configure opendkim to restart once a day",
|
||||
src=get_resource("opendkim/systemd.conf"),
|
||||
dest="/etc/systemd/system/opendkim.service.d/10-prevent-memory-leak.conf",
|
||||
self.put_file(
|
||||
"opendkim/systemd.conf",
|
||||
"/etc/systemd/system/opendkim.service.d/10-prevent-memory-leak.conf",
|
||||
)
|
||||
need_restart |= service_file.changed
|
||||
|
||||
self.need_restart = need_restart
|
||||
files.file(
|
||||
name="chown opendkim: /etc/dkimkeys/opendkim.private",
|
||||
path="/etc/dkimkeys/opendkim.private",
|
||||
user="opendkim",
|
||||
group="opendkim",
|
||||
)
|
||||
|
||||
def activate(self):
|
||||
systemd.service(
|
||||
name="Start and enable OpenDKIM",
|
||||
service="opendkim.service",
|
||||
running=True,
|
||||
enabled=True,
|
||||
daemon_reload=self.need_restart,
|
||||
restarted=self.need_restart,
|
||||
)
|
||||
self.need_restart = False
|
||||
self.ensure_service("opendkim.service")
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
mtaname = odkim.get_mtasymbol(ctx, "{daemon_name}")
|
||||
if mtaname == "ORIGINATING" then
|
||||
-- Outgoing message will be signed,
|
||||
-- no need to look for signatures.
|
||||
return nil
|
||||
end
|
||||
|
||||
nsigs = odkim.get_sigcount(ctx)
|
||||
if nsigs == nil then
|
||||
return nil
|
||||
end
|
||||
|
||||
local valid = false
|
||||
local error_msg = "No valid DKIM signature found."
|
||||
for i = 1, nsigs do
|
||||
sig = odkim.get_sighandle(ctx, i - 1)
|
||||
sigres = odkim.sig_result(sig)
|
||||
|
||||
-- All signatures that do not correspond to From:
|
||||
-- were ignored in screen.lua and return sigres -1.
|
||||
--
|
||||
-- Any valid signature that was not ignored like this
|
||||
-- means the message is acceptable.
|
||||
if sigres == 0 then
|
||||
valid = true
|
||||
else
|
||||
error_msg = "DKIM signature is invalid, error code " .. tostring(sigres) .. ", search https://github.com/trusteddomainproject/OpenDKIM/blob/master/libopendkim/dkim.h#L108"
|
||||
end
|
||||
end
|
||||
|
||||
if valid then
|
||||
-- Strip all DKIM-Signature headers after successful validation
|
||||
-- Delete in reverse order to avoid index shifting.
|
||||
for i = nsigs, 1, -1 do
|
||||
odkim.del_header(ctx, "DKIM-Signature", i)
|
||||
end
|
||||
else
|
||||
odkim.set_reply(ctx, "554", "5.7.1", error_msg)
|
||||
odkim.set_result(ctx, SMFIS_REJECT)
|
||||
end
|
||||
|
||||
return nil
|
||||
@@ -45,12 +45,6 @@ SignHeaders *,+autocrypt,+content-type
|
||||
# Default is empty.
|
||||
OversignHeaders from,reply-to,subject,date,to,cc,resent-date,resent-from,resent-sender,resent-to,resent-cc,in-reply-to,references,list-id,list-help,list-unsubscribe,list-subscribe,list-post,list-owner,list-archive,autocrypt
|
||||
|
||||
# Script to ignore signatures that do not correspond to the From: domain.
|
||||
ScreenPolicyScript /etc/opendkim/screen.lua
|
||||
|
||||
# Script to reject mails without a valid DKIM signature.
|
||||
FinalPolicyScript /etc/opendkim/final.lua
|
||||
|
||||
# In Debian, opendkim runs as user "opendkim". A umask of 007 is required when
|
||||
# using a local socket with MTAs that access the socket as a non-privileged
|
||||
# user (for example, Postfix). You may need to add user "postfix" to group
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
-- Ignore signatures that do not correspond to the From: domain.
|
||||
|
||||
from_domain = odkim.get_fromdomain(ctx)
|
||||
if from_domain == nil then
|
||||
return nil
|
||||
end
|
||||
|
||||
n = odkim.get_sigcount(ctx)
|
||||
if n == nil then
|
||||
return nil
|
||||
end
|
||||
|
||||
for i = 1, n do
|
||||
sig = odkim.get_sighandle(ctx, i - 1)
|
||||
sig_domain = odkim.sig_getdomain(sig)
|
||||
if from_domain ~= sig_domain then
|
||||
odkim.sig_ignore(sig)
|
||||
end
|
||||
end
|
||||
|
||||
return nil
|
||||
@@ -1,11 +1,10 @@
|
||||
from pyinfra.operations import apt, files, server, systemd
|
||||
from pyinfra.operations import apt, server
|
||||
|
||||
from cmdeploy.basedeploy import Deployer, get_resource
|
||||
from cmdeploy.basedeploy import Deployer
|
||||
|
||||
|
||||
class PostfixDeployer(Deployer):
|
||||
required_users = [("postfix", None, ["opendkim"])]
|
||||
daemon_reload = False
|
||||
|
||||
def __init__(self, config, disable_mail):
|
||||
self.config = config
|
||||
@@ -19,99 +18,58 @@ class PostfixDeployer(Deployer):
|
||||
|
||||
def configure(self):
|
||||
config = self.config
|
||||
need_restart = False
|
||||
|
||||
main_config = files.template(
|
||||
src=get_resource("postfix/main.cf.j2"),
|
||||
dest="/etc/postfix/main.cf",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
self.put_template(
|
||||
"postfix/main.cf.j2",
|
||||
"/etc/postfix/main.cf",
|
||||
config=config,
|
||||
disable_ipv6=config.disable_ipv6,
|
||||
)
|
||||
need_restart |= main_config.changed
|
||||
|
||||
master_config = files.template(
|
||||
src=get_resource("postfix/master.cf.j2"),
|
||||
dest="/etc/postfix/master.cf",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
self.put_template(
|
||||
"postfix/master.cf.j2",
|
||||
"/etc/postfix/master.cf",
|
||||
debug=False,
|
||||
config=config,
|
||||
)
|
||||
need_restart |= master_config.changed
|
||||
|
||||
header_cleanup = files.put(
|
||||
src=get_resource("postfix/submission_header_cleanup"),
|
||||
dest="/etc/postfix/submission_header_cleanup",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
self.put_file(
|
||||
"postfix/submission_header_cleanup",
|
||||
"/etc/postfix/submission_header_cleanup",
|
||||
)
|
||||
need_restart |= header_cleanup.changed
|
||||
self.put_file("postfix/lmtp_header_cleanup", "/etc/postfix/lmtp_header_cleanup")
|
||||
|
||||
lmtp_header_cleanup = files.put(
|
||||
src=get_resource("postfix/lmtp_header_cleanup"),
|
||||
dest="/etc/postfix/lmtp_header_cleanup",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
res = self.put_file(
|
||||
"postfix/smtp_tls_policy_map", "/etc/postfix/smtp_tls_policy_map"
|
||||
)
|
||||
need_restart |= lmtp_header_cleanup.changed
|
||||
|
||||
tls_policy_map = files.put(
|
||||
name="Upload SMTP TLS Policy that accepts self-signed certificates for IP-only hosts",
|
||||
src=get_resource("postfix/smtp_tls_policy_map"),
|
||||
dest="/etc/postfix/smtp_tls_policy_map",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
need_restart |= tls_policy_map.changed
|
||||
if tls_policy_map.changed:
|
||||
tls_policy_changed = res.changed
|
||||
if tls_policy_changed:
|
||||
server.shell(
|
||||
commands=["postmap /etc/postfix/smtp_tls_policy_map"],
|
||||
)
|
||||
|
||||
# Login map that 1:1 maps email address to login.
|
||||
login_map = files.put(
|
||||
src=get_resource("postfix/login_map"),
|
||||
dest="/etc/postfix/login_map",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
need_restart |= login_map.changed
|
||||
self.put_file("postfix/login_map", "/etc/postfix/login_map")
|
||||
|
||||
restart_conf = files.put(
|
||||
name="postfix: restart automatically on failure",
|
||||
src=get_resource("service/10_restart.conf"),
|
||||
dest="/etc/systemd/system/postfix@.service.d/10_restart.conf",
|
||||
self.put_file(
|
||||
"service/10_restart_on_failure.conf",
|
||||
"/etc/systemd/system/postfix@.service.d/10_restart.conf",
|
||||
)
|
||||
self.daemon_reload = restart_conf.changed
|
||||
|
||||
# Validate postfix configuration before restart
|
||||
if need_restart:
|
||||
if self.need_restart:
|
||||
server.shell(
|
||||
name="Validate postfix configuration",
|
||||
# Extract stderr and quit with error if non-zero
|
||||
commands=["""bash -c 'w=$(postconf 2>&1 >/dev/null); [[ -z "$w" ]] || { echo "$w"; false; }'"""],
|
||||
commands=[
|
||||
"""bash -c 'w=$(postconf 2>&1 >/dev/null); [[ -z "$w" ]] || { echo "$w"; false; }'"""
|
||||
],
|
||||
)
|
||||
self.need_restart = need_restart
|
||||
|
||||
def activate(self):
|
||||
restart = False if self.disable_mail else self.need_restart
|
||||
|
||||
systemd.service(
|
||||
name="disable postfix for now"
|
||||
if self.disable_mail
|
||||
else "Start and enable Postfix",
|
||||
service="postfix.service",
|
||||
running=False if self.disable_mail else True,
|
||||
enabled=False if self.disable_mail else True,
|
||||
restarted=restart,
|
||||
daemon_reload=self.daemon_reload,
|
||||
active = not self.disable_mail
|
||||
self.ensure_service(
|
||||
"postfix.service",
|
||||
running=active,
|
||||
enabled=active,
|
||||
)
|
||||
self.need_restart = False
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
/^DKIM-Signature:/ IGNORE
|
||||
/^Authentication-Results:/ IGNORE
|
||||
/^Received:/ IGNORE
|
||||
|
||||
@@ -15,8 +15,8 @@ readme_directory = no
|
||||
compatibility_level = 3.6
|
||||
|
||||
# TLS parameters
|
||||
smtpd_tls_cert_file=/var/lib/acme/live/{{ config.mail_domain }}/fullchain
|
||||
smtpd_tls_key_file=/var/lib/acme/live/{{ config.mail_domain }}/privkey
|
||||
smtpd_tls_cert_file={{ config.tls_cert_path }}
|
||||
smtpd_tls_key_file={{ config.tls_key_path }}
|
||||
smtpd_tls_security_level=may
|
||||
|
||||
smtp_tls_CApath=/etc/ssl/certs
|
||||
@@ -69,15 +69,6 @@ mynetworks = 127.0.0.0/8
|
||||
{% else %}
|
||||
mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128
|
||||
{% endif %}
|
||||
{% if config.addr_v4 %}
|
||||
smtp_bind_address = {{ config.addr_v4 }}
|
||||
{% endif %}
|
||||
{% if config.addr_v6 %}
|
||||
smtp_bind_address6 = {{ config.addr_v6 }}
|
||||
{% endif %}
|
||||
{% if config.addr_v4 or config.addr_v6 %}
|
||||
smtp_bind_address_enforce = yes
|
||||
{% endif %}
|
||||
mailbox_size_limit = 0
|
||||
message_size_limit = {{config.max_message_size}}
|
||||
recipient_delimiter = +
|
||||
@@ -102,3 +93,12 @@ smtpd_sender_login_maps = regexp:/etc/postfix/login_map
|
||||
# Do not lookup SMTP client hostnames to reduce delays
|
||||
# and avoid unnecessary DNS requests.
|
||||
smtpd_peername_lookup = no
|
||||
|
||||
# Use filtermail-transport to relay messages.
|
||||
# We can't force postfix to split messages per destination,
|
||||
# when specifying a custom next-hop,
|
||||
# so instead this is handled in filtermail.
|
||||
# We use LMTP instead SMTP so we can communicate per-recipient errors back to postfix.
|
||||
default_transport = lmtp-filtermail:inet:[127.0.0.1]:{{ config.filtermail_lmtp_port_transport }}
|
||||
lmtp-filtermail_initial_destination_concurrency=10000
|
||||
lmtp-filtermail_destination_concurrency_limit=10000
|
||||
|
||||
@@ -86,7 +86,6 @@ filter unix - n n - - lmtp
|
||||
# Local SMTP server for reinjecting incoming filtered mail
|
||||
127.0.0.1:{{ config.postfix_reinject_port_incoming }} inet n - n - 100 smtpd
|
||||
-o syslog_name=postfix/reinject_incoming
|
||||
-o smtpd_milters=unix:opendkim/opendkim.sock
|
||||
|
||||
# Cleanup `Received` headers for authenticated mail
|
||||
# to avoid leaking client IP.
|
||||
@@ -101,3 +100,8 @@ filter unix - n n - - lmtp
|
||||
# cannot send unprotected Subject.
|
||||
authclean unix n - - - 0 cleanup
|
||||
-o header_checks=regexp:/etc/postfix/submission_header_cleanup
|
||||
|
||||
lmtp-filtermail unix - - y - 10000 lmtp
|
||||
-o syslog_name=postfix/lmtp-filtermail
|
||||
-o lmtp_header_checks=
|
||||
-o lmtp_tls_security_level=none
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
/^\[[^]]+\]$/ encrypt
|
||||
/^_/ encrypt
|
||||
/^nauta\.cu$/ may
|
||||
|
||||
@@ -53,31 +53,36 @@ def get_dkim_entry(mail_domain, pre_command, dkim_selector):
|
||||
print=log_progress,
|
||||
)
|
||||
except CalledProcessError:
|
||||
return
|
||||
return None, None
|
||||
dkim_value_raw = f"v=DKIM1;k=rsa;p={dkim_pubkey};s=email;t=s"
|
||||
dkim_value = '" "'.join(re.findall(".{1,255}", dkim_value_raw))
|
||||
web_dkim_value = "".join(re.findall(".{1,255}", dkim_value_raw))
|
||||
name = f"{dkim_selector}._domainkey.{mail_domain}."
|
||||
return (
|
||||
f'{dkim_selector}._domainkey.{mail_domain}. TXT "{dkim_value}"',
|
||||
f'{dkim_selector}._domainkey.{mail_domain}. TXT "{web_dkim_value}"',
|
||||
f'{name:<40} 3600 IN TXT "{dkim_value}"',
|
||||
f'{name:<40} 3600 IN TXT "{web_dkim_value}"',
|
||||
)
|
||||
|
||||
|
||||
def query_dns(typ, domain):
|
||||
# Get autoritative nameserver from the SOA record.
|
||||
soa_answers = [
|
||||
def get_authoritative_ns(domain):
|
||||
ns_replies = [
|
||||
x.split()
|
||||
for x in shell(
|
||||
f"dig -r -q {domain} -t SOA +noall +authority +answer", print=log_progress
|
||||
f"dig -r -q {domain} -t NS +noall +authority +answer", print=log_progress
|
||||
).split("\n")
|
||||
]
|
||||
soa = [a for a in soa_answers if len(a) >= 3 and a[3] == "SOA"]
|
||||
if not soa:
|
||||
filtered_replies = [a for a in ns_replies if len(a) >= 5 and a[3] == "NS"]
|
||||
if not filtered_replies:
|
||||
return
|
||||
ns = soa[0][4]
|
||||
return filtered_replies[0][4]
|
||||
|
||||
|
||||
def query_dns(typ, domain):
|
||||
ns = get_authoritative_ns(domain)
|
||||
|
||||
# Query authoritative nameserver directly to bypass DNS cache.
|
||||
res = shell(f"dig @{ns} -r -q {domain} -t {typ} +short", print=log_progress)
|
||||
direct_ns = f"@{ns}" if ns else ""
|
||||
res = shell(f"dig {direct_ns} -r -q {domain} -t {typ} +short", print=log_progress)
|
||||
return next((line for line in res.split("\n") if not line.startswith(";")), "")
|
||||
|
||||
|
||||
@@ -94,7 +99,7 @@ def check_zonefile(zonefile, verbose=True):
|
||||
if not zf_line.strip() or zf_line.startswith(";"):
|
||||
continue
|
||||
print(f"dns-checking {zf_line!r}") if verbose else log_progress("")
|
||||
zf_domain, zf_typ, zf_value = zf_line.split(maxsplit=2)
|
||||
zf_domain, _ttl, _in, zf_typ, zf_value = zf_line.split(None, 4)
|
||||
zf_domain = zf_domain.rstrip(".")
|
||||
zf_value = zf_value.strip()
|
||||
query_value = query_dns(zf_typ, zf_domain)
|
||||
|
||||
@@ -40,5 +40,5 @@ def dovecot_recalc_quota(user):
|
||||
#
|
||||
for line in output.split("\n"):
|
||||
parts = line.split()
|
||||
if parts[2] == "STORAGE":
|
||||
if len(parts) >= 6 and parts[2] == "STORAGE":
|
||||
return dict(value=int(parts[3]), limit=int(parts[4]), percent=int(parts[5]))
|
||||
|
||||
@@ -15,9 +15,8 @@ def main():
|
||||
)
|
||||
disable_mail = bool(os.environ.get("CHATMAIL_DISABLE_MAIL"))
|
||||
website_only = bool(os.environ.get("CHATMAIL_WEBSITE_ONLY"))
|
||||
docker = bool(os.environ.get("CHATMAIL_DOCKER"))
|
||||
|
||||
deploy_chatmail(config_path, disable_mail, website_only, docker)
|
||||
deploy_chatmail(config_path, disable_mail, website_only)
|
||||
|
||||
|
||||
if pyinfra.is_cli:
|
||||
|
||||
52
cmdeploy/src/cmdeploy/selfsigned/deployer.py
Normal file
52
cmdeploy/src/cmdeploy/selfsigned/deployer.py
Normal file
@@ -0,0 +1,52 @@
|
||||
import shlex
|
||||
|
||||
from pyinfra.operations import server
|
||||
|
||||
from ..basedeploy import Deployer
|
||||
|
||||
|
||||
def openssl_selfsigned_args(domain, cert_path, key_path, days=36500):
|
||||
"""Return the openssl argument list for a self-signed certificate.
|
||||
|
||||
The certificate uses an EC P-256 key with SAN entries for *domain*,
|
||||
``www.<domain>`` and ``mta-sts.<domain>``.
|
||||
"""
|
||||
return [
|
||||
"openssl", "req", "-x509",
|
||||
"-newkey", "ec", "-pkeyopt", "ec_paramgen_curve:P-256",
|
||||
"-noenc", "-days", str(days),
|
||||
"-keyout", str(key_path),
|
||||
"-out", str(cert_path),
|
||||
"-subj", f"/CN={domain}",
|
||||
# Mark as end-entity cert so it cannot be used as a CA to sign others.
|
||||
"-addext", "basicConstraints=critical,CA:FALSE",
|
||||
"-addext", "extendedKeyUsage=serverAuth,clientAuth",
|
||||
"-addext",
|
||||
f"subjectAltName=DNS:{domain},DNS:www.{domain},DNS:mta-sts.{domain}",
|
||||
]
|
||||
|
||||
|
||||
class SelfSignedTlsDeployer(Deployer):
|
||||
"""Generates a self-signed TLS certificate for all chatmail endpoints."""
|
||||
|
||||
def __init__(self, mail_domain):
|
||||
self.mail_domain = mail_domain
|
||||
self.cert_path = "/etc/ssl/certs/mailserver.pem"
|
||||
self.key_path = "/etc/ssl/private/mailserver.key"
|
||||
|
||||
|
||||
|
||||
def configure(self):
|
||||
args = openssl_selfsigned_args(
|
||||
self.mail_domain, self.cert_path, self.key_path,
|
||||
)
|
||||
cmd = shlex.join(args)
|
||||
server.shell(
|
||||
name="Generate self-signed TLS certificate if not present",
|
||||
commands=[f"[ -f {self.cert_path} ] || {cmd}"],
|
||||
)
|
||||
|
||||
def activate(self):
|
||||
pass
|
||||
|
||||
|
||||
@@ -5,5 +5,5 @@ After=network.target
|
||||
[Service]
|
||||
Type=oneshot
|
||||
User=vmail
|
||||
ExecStart=/usr/local/lib/chatmaild/venv/bin/chatmail-fsreport /usr/local/lib/chatmaild/chatmail.ini
|
||||
ExecStart=/usr/local/lib/chatmaild/venv/bin/chatmail-fsreport /usr/local/lib/chatmaild/chatmail.ini
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ Description=Chatmail dict proxy for IMAP METADATA
|
||||
[Service]
|
||||
ExecStart={execpath} /run/chatmail-metadata/metadata.socket {config_path}
|
||||
Restart=always
|
||||
RestartSec=30
|
||||
RestartSec=5
|
||||
User=vmail
|
||||
RuntimeDirectory=chatmail-metadata
|
||||
UMask=0077
|
||||
|
||||
@@ -85,16 +85,26 @@ class SSHExec:
|
||||
|
||||
|
||||
class LocalExec:
|
||||
def __init__(self, verbose=False, docker=False):
|
||||
FuncError = FuncError
|
||||
|
||||
def __init__(self, verbose=False):
|
||||
self.verbose = verbose
|
||||
self.docker = docker
|
||||
|
||||
def __call__(self, call, kwargs=None, log_callback=None):
|
||||
if kwargs is None:
|
||||
kwargs = {}
|
||||
return call(**kwargs)
|
||||
|
||||
def logged(self, call, kwargs: dict):
|
||||
title = call.__doc__
|
||||
if not title:
|
||||
title = call.__name__
|
||||
where = "locally"
|
||||
if self.docker:
|
||||
if call == remote.rdns.perform_initial_checks:
|
||||
kwargs["pre_command"] = "docker exec chatmail "
|
||||
where = "in docker"
|
||||
if self.verbose:
|
||||
print(f"Running {where}: {call.__name__}(**{kwargs})")
|
||||
return call(**kwargs)
|
||||
print_stderr(f"Running {where}: {title}(**{kwargs})")
|
||||
return self(call, kwargs, log_callback=print_stderr)
|
||||
else:
|
||||
print_stderr(title, end="")
|
||||
res = self(call, kwargs, log_callback=remote.rshell.log_progress)
|
||||
print_stderr()
|
||||
return res
|
||||
|
||||
@@ -1,17 +1,18 @@
|
||||
; Required DNS entries for chatmail servers
|
||||
zftest.testrun.org. A 135.181.204.127
|
||||
zftest.testrun.org. AAAA 2a01:4f9:c012:52f4::1
|
||||
zftest.testrun.org. MX 10 zftest.testrun.org.
|
||||
_mta-sts.zftest.testrun.org. TXT "v=STSv1; id=202403211706"
|
||||
mta-sts.zftest.testrun.org. CNAME zftest.testrun.org.
|
||||
www.zftest.testrun.org. CNAME zftest.testrun.org.
|
||||
opendkim._domainkey.zftest.testrun.org. TXT "v=DKIM1;k=rsa;p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoYt82CVUyz2ouaqjX2kB+5J80knAyoOU3MGU5aWppmwUwwTvj/oSTSpkc5JMtVTRmKKr8NUDWAL1Yw7dfGqqPHdHfwwjS3BIvDzYx+hzgtz62RnfNgV+/2MAoNpfX7cAFIHdRzEHNtwugc3RDLquqPoupAE3Y2YRw2T5zG5fILh4vwIcJZL5Uq6B92j8wwJqOex" "33n+vm1NKQ9rxo/UsHAmZlJzpooXcG/4igTBxJyJlamVSRR6N7Nul1v//YJb7J6v2o0iPHW6uE0StzKaPPNC2IVosSRFbD9H2oqppltptFSNPlI0E+t0JBWHem6YK7xcugiO3ImMCaaU8g6Jt/wIDAQAB;s=email;t=s"
|
||||
; Required DNS entries
|
||||
zftest.testrun.org. 3600 IN A 135.181.204.127
|
||||
zftest.testrun.org. 3600 IN AAAA 2a01:4f9:c012:52f4::1
|
||||
zftest.testrun.org. 3600 IN MX 10 zftest.testrun.org.
|
||||
_mta-sts.zftest.testrun.org. 3600 IN TXT "v=STSv1; id=202403211706"
|
||||
mta-sts.zftest.testrun.org. 3600 IN CNAME zftest.testrun.org.
|
||||
www.zftest.testrun.org. 3600 IN CNAME zftest.testrun.org.
|
||||
opendkim._domainkey.zftest.testrun.org. 3600 IN TXT "v=DKIM1;k=rsa;p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoYt82CVUyz2ouaqjX2kB+5J80knAyoOU3MGU5aWppmwUwwTvj/oSTSpkc5JMtVTRmKKr8NUDWAL1Yw7dfGqqPHdHfwwjS3BIvDzYx+hzgtz62RnfNgV+/2MAoNpfX7cAFIHdRzEHNtwugc3RDLquqPoupAE3Y2YRw2T5zG5fILh4vwIcJZL5Uq6B92j8wwJqOex" "33n+vm1NKQ9rxo/UsHAmZlJzpooXcG/4igTBxJyJlamVSRR6N7Nul1v//YJb7J6v2o0iPHW6uE0StzKaPPNC2IVosSRFbD9H2oqppltptFSNPlI0E+t0JBWHem6YK7xcugiO3ImMCaaU8g6Jt/wIDAQAB;s=email;t=s"
|
||||
|
||||
; Recommended DNS entries
|
||||
_submission._tcp.zftest.testrun.org. SRV 0 1 587 zftest.testrun.org.
|
||||
_submissions._tcp.zftest.testrun.org. SRV 0 1 465 zftest.testrun.org.
|
||||
_imap._tcp.zftest.testrun.org. SRV 0 1 143 zftest.testrun.org.
|
||||
_imaps._tcp.zftest.testrun.org. SRV 0 1 993 zftest.testrun.org.
|
||||
zftest.testrun.org. CAA 0 issue "letsencrypt.org;accounturi=https://acme-v02.api.letsencrypt.org/acme/acct/1371472956"
|
||||
zftest.testrun.org. TXT "v=spf1 a:zftest.testrun.org ~all"
|
||||
_dmarc.zftest.testrun.org. TXT "v=DMARC1;p=reject;adkim=s;aspf=s"
|
||||
_adsp._domainkey.zftest.testrun.org. TXT "dkim=discardable"
|
||||
zftest.testrun.org. 3600 IN TXT "v=spf1 a ~all"
|
||||
_dmarc.zftest.testrun.org. 3600 IN TXT "v=DMARC1;p=reject;adkim=s;aspf=s"
|
||||
zftest.testrun.org. 3600 IN CAA 0 issue "letsencrypt.org;accounturi=https://acme-v02.api.letsencrypt.org/acme/acct/1371472956"
|
||||
_adsp._domainkey.zftest.testrun.org. 3600 IN TXT "dkim=discardable"
|
||||
_submission._tcp.zftest.testrun.org. 3600 IN SRV 0 1 587 zftest.testrun.org.
|
||||
_submissions._tcp.zftest.testrun.org. 3600 IN SRV 0 1 465 zftest.testrun.org.
|
||||
_imap._tcp.zftest.testrun.org. 3600 IN SRV 0 1 143 zftest.testrun.org.
|
||||
_imaps._tcp.zftest.testrun.org. 3600 IN SRV 0 1 993 zftest.testrun.org.
|
||||
|
||||
@@ -41,9 +41,9 @@ class TestDC:
|
||||
|
||||
def dc_ping_pong():
|
||||
chat.send_text("ping")
|
||||
msg = ac2._evtracker.wait_next_incoming_message()
|
||||
msg.chat.send_text("pong")
|
||||
ac1._evtracker.wait_next_incoming_message()
|
||||
msg = ac2.wait_for_incoming_msg()
|
||||
msg.get_snapshot().chat.send_text("pong")
|
||||
ac1.wait_for_incoming_msg()
|
||||
|
||||
benchmark(dc_ping_pong, 5)
|
||||
|
||||
@@ -55,6 +55,6 @@ class TestDC:
|
||||
for i in range(10):
|
||||
chat.send_text(f"hello {i}")
|
||||
for i in range(10):
|
||||
ac2._evtracker.wait_next_incoming_message()
|
||||
ac2.wait_for_incoming_msg()
|
||||
|
||||
benchmark(dc_send_10_receive_10, 5)
|
||||
benchmark(dc_send_10_receive_10, 5, cooldown="auto")
|
||||
|
||||
@@ -89,7 +89,9 @@ def test_concurrent_logins_same_account(
|
||||
assert login_results.get()
|
||||
|
||||
|
||||
def test_no_vrfy(chatmail_config):
|
||||
def test_no_vrfy(cmfactory, chatmail_config):
|
||||
ac = cmfactory.get_online_account()
|
||||
addr = ac.get_config("addr")
|
||||
domain = chatmail_config.mail_domain
|
||||
|
||||
s = smtplib.SMTP(domain)
|
||||
@@ -98,7 +100,7 @@ def test_no_vrfy(chatmail_config):
|
||||
s.putcmd("vrfy", f"wrongaddress@{chatmail_config.mail_domain}")
|
||||
result = s.getreply()
|
||||
print(result)
|
||||
s.putcmd("vrfy", f"echo@{chatmail_config.mail_domain}")
|
||||
s.putcmd("vrfy", addr)
|
||||
result2 = s.getreply()
|
||||
print(result2)
|
||||
assert result[0] == result2[0] == 252
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import pytest
|
||||
import requests
|
||||
|
||||
from cmdeploy.genqr import gen_qr_png_data
|
||||
@@ -8,18 +9,33 @@ def test_gen_qr_png_data(maildomain):
|
||||
assert data
|
||||
|
||||
|
||||
@pytest.mark.filterwarnings("ignore::urllib3.exceptions.InsecureRequestWarning")
|
||||
def test_fastcgi_working(maildomain, chatmail_config):
|
||||
url = f"https://{maildomain}/new"
|
||||
print(url)
|
||||
res = requests.post(url)
|
||||
verify = chatmail_config.tls_cert_mode == "acme"
|
||||
res = requests.post(url, verify=verify)
|
||||
assert maildomain in res.json().get("email")
|
||||
assert len(res.json().get("password")) > chatmail_config.password_min_length
|
||||
|
||||
|
||||
def test_newemail_configure(maildomain, rpc):
|
||||
@pytest.mark.filterwarnings("ignore::urllib3.exceptions.InsecureRequestWarning")
|
||||
def test_newemail_configure(maildomain, rpc, chatmail_config):
|
||||
"""Test configuring accounts by scanning a QR code works."""
|
||||
url = f"DCACCOUNT:https://{maildomain}/new"
|
||||
for i in range(3):
|
||||
account_id = rpc.add_account()
|
||||
rpc.set_config_from_qr(account_id, url)
|
||||
rpc.configure(account_id)
|
||||
if chatmail_config.tls_cert_mode == "self":
|
||||
# deltachat core's rustls rejects self-signed HTTPS certs during
|
||||
# set_config_from_qr, so fetch credentials via requests instead
|
||||
res = requests.post(f"https://{maildomain}/new", verify=False)
|
||||
data = res.json()
|
||||
rpc.add_or_update_transport(account_id, {
|
||||
"addr": data["email"],
|
||||
"password": data["password"],
|
||||
"imapServer": maildomain,
|
||||
"smtpServer": maildomain,
|
||||
"certificateChecks": "acceptInvalidCertificates",
|
||||
})
|
||||
else:
|
||||
rpc.add_transport_from_qr(account_id, url)
|
||||
|
||||
@@ -7,13 +7,13 @@ import time
|
||||
import pytest
|
||||
|
||||
from cmdeploy import remote
|
||||
from cmdeploy.sshexec import SSHExec
|
||||
from cmdeploy.cmdeploy import get_sshexec
|
||||
|
||||
|
||||
class TestSSHExecutor:
|
||||
@pytest.fixture(scope="class")
|
||||
def sshexec(self, sshdomain):
|
||||
return SSHExec(sshdomain)
|
||||
return get_sshexec(sshdomain)
|
||||
|
||||
def test_ls(self, sshexec):
|
||||
out = sshexec(call=remote.rdns.shell, kwargs=dict(command="ls"))
|
||||
@@ -27,6 +27,7 @@ class TestSSHExecutor:
|
||||
assert res["A"] or res["AAAA"]
|
||||
|
||||
def test_logged(self, sshexec, maildomain, capsys):
|
||||
sshexec.verbose = False
|
||||
sshexec.logged(
|
||||
remote.rdns.perform_initial_checks, kwargs=dict(mail_domain=maildomain)
|
||||
)
|
||||
@@ -52,6 +53,8 @@ class TestSSHExecutor:
|
||||
remote.rdns.perform_initial_checks,
|
||||
kwargs=dict(mail_domain=None),
|
||||
)
|
||||
except AssertionError:
|
||||
pass
|
||||
except sshexec.FuncError as e:
|
||||
assert "rdns.py" in str(e)
|
||||
assert "AssertionError" in str(e)
|
||||
@@ -68,6 +71,44 @@ class TestSSHExecutor:
|
||||
assert (now - since_date).total_seconds() < 60 * 60 * 51
|
||||
|
||||
|
||||
def test_dovecot_main_process_matches_installed_binary(sshdomain):
|
||||
sshexec = get_sshexec(sshdomain)
|
||||
main_pid = int(
|
||||
sshexec(
|
||||
call=remote.rshell.shell,
|
||||
kwargs=dict(
|
||||
command="timeout 10 systemctl show -p MainPID --value dovecot.service"
|
||||
),
|
||||
).strip()
|
||||
)
|
||||
assert main_pid != 0, "dovecot.service MainPID is 0 -- service not running?"
|
||||
|
||||
exe = sshexec(
|
||||
call=remote.rshell.shell,
|
||||
kwargs=dict(command=f"timeout 10 readlink /proc/{main_pid}/exe"),
|
||||
).strip()
|
||||
status_text = sshexec(
|
||||
call=remote.rshell.shell,
|
||||
kwargs=dict(
|
||||
command="timeout 10 systemctl show -p StatusText --value dovecot.service"
|
||||
),
|
||||
).strip()
|
||||
installed_version = sshexec(
|
||||
call=remote.rshell.shell, kwargs=dict(command="timeout 10 dovecot --version")
|
||||
).strip()
|
||||
|
||||
assert not exe.endswith("(deleted)"), (
|
||||
f"running dovecot binary was deleted (stale after upgrade): {exe}"
|
||||
)
|
||||
expected_status_text = f"v{installed_version}"
|
||||
assert status_text == expected_status_text or status_text.startswith(
|
||||
f"{expected_status_text} "
|
||||
), (
|
||||
f"dovecot status version mismatch: "
|
||||
f"StatusText={status_text!r}, installed={installed_version!r}"
|
||||
)
|
||||
|
||||
|
||||
def test_timezone_env(remote):
|
||||
for line in remote.iter_output("env"):
|
||||
print(line)
|
||||
@@ -83,10 +124,8 @@ def test_remote(remote, imap_or_smtp):
|
||||
|
||||
|
||||
def test_use_two_chatmailservers(cmfactory, maildomain2):
|
||||
ac1 = cmfactory.new_online_configuring_account(cache=False)
|
||||
cmfactory.switch_maildomain(maildomain2)
|
||||
ac2 = cmfactory.new_online_configuring_account(cache=False)
|
||||
cmfactory.bring_accounts_online()
|
||||
ac1 = cmfactory.get_online_account()
|
||||
ac2 = cmfactory.get_online_account(domain=maildomain2)
|
||||
cmfactory.get_accepted_chat(ac1, ac2)
|
||||
domain1 = ac1.get_config("addr").split("@")[1]
|
||||
domain2 = ac2.get_config("addr").split("@")[1]
|
||||
@@ -146,7 +185,7 @@ def test_reject_missing_dkim(cmsetup, maildata, from_addr):
|
||||
conn.starttls()
|
||||
|
||||
with conn as s:
|
||||
with pytest.raises(smtplib.SMTPDataError, match="No valid DKIM signature"):
|
||||
with pytest.raises(smtplib.SMTPDataError, match="No DKIM signature found"):
|
||||
s.sendmail(from_addr=from_addr, to_addrs=recipient.addr, msg=msg)
|
||||
|
||||
|
||||
@@ -182,7 +221,6 @@ def test_rewrite_subject(cmsetup, maildata):
|
||||
assert "Subject: Unencrypted subject" not in rcvd_msg
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
def test_exceed_rate_limit(cmsetup, gencreds, maildata, chatmail_config):
|
||||
"""Test that the per-account send-mail limit is exceeded."""
|
||||
user1, user2 = cmsetup.gen_users(2)
|
||||
@@ -205,7 +243,6 @@ def test_exceed_rate_limit(cmsetup, gencreds, maildata, chatmail_config):
|
||||
pytest.fail("Rate limit was not exceeded")
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
def test_expunged(remote, chatmail_config):
|
||||
outdated_days = int(chatmail_config.delete_mails_after) + 1
|
||||
find_cmds = [
|
||||
@@ -218,7 +255,7 @@ def test_expunged(remote, chatmail_config):
|
||||
]
|
||||
outdated_days = int(chatmail_config.delete_large_after) + 1
|
||||
find_cmds.append(
|
||||
"find {chatmail_config.mailboxes_dir} -path '*/cur/*' -mtime +{outdated_days} -size +200k -type f"
|
||||
f"find {chatmail_config.mailboxes_dir} -path '*/cur/*' -mtime +{outdated_days} -size +200k -type f"
|
||||
)
|
||||
for cmd in find_cmds:
|
||||
for line in remote.iter_output(cmd):
|
||||
|
||||
@@ -6,16 +6,17 @@ import imap_tools
|
||||
import pytest
|
||||
import requests
|
||||
|
||||
from cmdeploy.cmdeploy import get_sshexec
|
||||
from cmdeploy.remote import rshell
|
||||
from cmdeploy.sshexec import SSHExec
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def imap_mailbox(cmfactory):
|
||||
def imap_mailbox(cmfactory, ssl_context):
|
||||
(ac1,) = cmfactory.get_online_accounts(1)
|
||||
user = ac1.get_config("addr")
|
||||
password = ac1.get_config("mail_pw")
|
||||
mailbox = imap_tools.MailBox(user.split("@")[1])
|
||||
host = user.split("@")[1]
|
||||
mailbox = imap_tools.MailBox(host, ssl_context=ssl_context)
|
||||
mailbox.login(user, password)
|
||||
mailbox.dc_ac = ac1
|
||||
return mailbox
|
||||
@@ -26,6 +27,7 @@ class TestMetadataTokens:
|
||||
|
||||
def test_set_get_metadata(self, imap_mailbox):
|
||||
"set and get metadata token for an account"
|
||||
time.sleep(5) # make sure Metadata service had a chance to restart
|
||||
client = imap_mailbox.client
|
||||
client.send(b'a01 SETMETADATA INBOX (/private/devicetoken "1111" )\n')
|
||||
res = client.readline()
|
||||
@@ -61,8 +63,8 @@ class TestEndToEndDeltaChat:
|
||||
chat.send_text("message0")
|
||||
|
||||
lp.sec("wait for ac2 to receive message")
|
||||
msg2 = ac2._evtracker.wait_next_incoming_message()
|
||||
assert msg2.text == "message0"
|
||||
msg2 = ac2.wait_for_incoming_msg()
|
||||
assert msg2.get_snapshot().text == "message0"
|
||||
|
||||
def test_exceed_quota(
|
||||
self, cmfactory, lp, tmpdir, remote, chatmail_config, sshdomain
|
||||
@@ -90,45 +92,41 @@ class TestEndToEndDeltaChat:
|
||||
lp.sec(f"filling remote inbox for {user}")
|
||||
fn = f"7743102289.M843172P2484002.c20,S={quota},W=2398:2,"
|
||||
path = chatmail_config.mailboxes_dir.joinpath(user, "cur", fn)
|
||||
sshexec = SSHExec(sshdomain)
|
||||
sshexec = get_sshexec(sshdomain)
|
||||
sshexec(call=rshell.write_numbytes, kwargs=dict(path=str(path), num=120))
|
||||
res = sshexec(call=rshell.dovecot_recalc_quota, kwargs=dict(user=user))
|
||||
assert res["percent"] >= 100
|
||||
|
||||
lp.sec("ac2: check quota is triggered")
|
||||
|
||||
starting = True
|
||||
for line in remote.iter_output("journalctl -n0 -f -u dovecot"):
|
||||
if starting:
|
||||
chat.send_text("hello")
|
||||
starting = False
|
||||
def send_hello():
|
||||
chat.send_text("hello")
|
||||
|
||||
for line in remote.iter_output(
|
||||
"journalctl -n1 -f -u dovecot", ready=send_hello
|
||||
):
|
||||
if user not in line:
|
||||
# print(line)
|
||||
continue
|
||||
if "quota exceeded" in line:
|
||||
return
|
||||
|
||||
def test_securejoin(self, cmfactory, lp, maildomain2):
|
||||
ac1 = cmfactory.new_online_configuring_account(cache=False)
|
||||
cmfactory.switch_maildomain(maildomain2)
|
||||
ac2 = cmfactory.new_online_configuring_account(cache=False)
|
||||
cmfactory.bring_accounts_online()
|
||||
ac1 = cmfactory.get_online_account()
|
||||
ac2 = cmfactory.get_online_account(domain=maildomain2)
|
||||
|
||||
lp.sec("ac1: create QR code and let ac2 scan it, starting the securejoin")
|
||||
qr = ac1.get_setup_contact_qr()
|
||||
qr = ac1.get_qr_code()
|
||||
|
||||
lp.sec("ac2: start QR-code based setup contact protocol")
|
||||
ch = ac2.qr_setup_contact(qr)
|
||||
ch = ac2.secure_join(qr)
|
||||
assert ch.id >= 10
|
||||
ac1._evtracker.wait_securejoin_inviter_progress(1000)
|
||||
ac1.wait_for_securejoin_inviter_success()
|
||||
|
||||
def test_dkim_header_stripped(self, cmfactory, maildomain2, lp, imap_mailbox):
|
||||
"""Test that if a DC address receives a message, it has no
|
||||
DKIM-Signature and Authentication-Results headers."""
|
||||
ac1 = cmfactory.new_online_configuring_account(cache=False)
|
||||
cmfactory.switch_maildomain(maildomain2)
|
||||
ac2 = cmfactory.new_online_configuring_account(cache=False)
|
||||
cmfactory.bring_accounts_online()
|
||||
ac1 = cmfactory.get_online_account()
|
||||
ac2 = cmfactory.get_online_account(domain=maildomain2)
|
||||
chat = cmfactory.get_accepted_chat(ac1, imap_mailbox.dc_ac)
|
||||
chat.send_text("message0")
|
||||
chat2 = cmfactory.get_accepted_chat(ac2, imap_mailbox.dc_ac)
|
||||
@@ -145,33 +143,32 @@ class TestEndToEndDeltaChat:
|
||||
assert "dkim-signature" not in msg.headers
|
||||
|
||||
def test_read_receipts_between_instances(self, cmfactory, lp, maildomain2):
|
||||
ac1 = cmfactory.new_online_configuring_account(cache=False)
|
||||
cmfactory.switch_maildomain(maildomain2)
|
||||
ac2 = cmfactory.new_online_configuring_account(cache=False)
|
||||
cmfactory.bring_accounts_online()
|
||||
ac1 = cmfactory.get_online_account()
|
||||
ac2 = cmfactory.get_online_account(domain=maildomain2)
|
||||
|
||||
lp.sec("setup encrypted comms between ac1 and ac2 on different instances")
|
||||
qr = ac1.get_setup_contact_qr()
|
||||
ch = ac2.qr_setup_contact(qr)
|
||||
qr = ac1.get_qr_code()
|
||||
ch = ac2.secure_join(qr)
|
||||
assert ch.id >= 10
|
||||
ac1._evtracker.wait_securejoin_inviter_progress(1000)
|
||||
ac1.wait_for_securejoin_inviter_success()
|
||||
|
||||
lp.sec("ac1 sends a message and ac2 marks it as seen")
|
||||
chat = ac1.create_chat(ac2)
|
||||
msg = chat.send_text("hi")
|
||||
m = ac2._evtracker.wait_next_incoming_message()
|
||||
m = ac2.wait_for_incoming_msg()
|
||||
m.mark_seen()
|
||||
# we can only indirectly wait for mark-seen to cause an smtp-error
|
||||
lp.sec("try to wait for markseen to complete and check error states")
|
||||
deadline = time.time() + 3.1
|
||||
while time.time() < deadline:
|
||||
msgs = m.chat.get_messages()
|
||||
m_snap = m.get_snapshot()
|
||||
msgs = m_snap.chat.get_messages()
|
||||
for msg in msgs:
|
||||
assert "error" not in m.get_message_info()
|
||||
assert "error" not in m.get_info()
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
def test_hide_senders_ip_address(cmfactory):
|
||||
def test_hide_senders_ip_address(cmfactory, ssl_context):
|
||||
public_ip = requests.get("http://icanhazip.com").content.decode().strip()
|
||||
assert ipaddress.ip_address(public_ip)
|
||||
|
||||
@@ -179,7 +176,12 @@ def test_hide_senders_ip_address(cmfactory):
|
||||
chat = cmfactory.get_accepted_chat(user1, user2)
|
||||
|
||||
chat.send_text("testing submission header cleanup")
|
||||
user2._evtracker.wait_next_incoming_message()
|
||||
user2.direct_imap.select_folder("Inbox")
|
||||
msg = user2.direct_imap.get_all_messages()[0]
|
||||
assert public_ip not in msg.obj.as_string()
|
||||
user2.wait_for_incoming_msg()
|
||||
addr = user2.get_config("addr")
|
||||
host = addr.split("@")[1]
|
||||
pw = user2.get_config("mail_pw")
|
||||
mailbox = imap_tools.MailBox(host, ssl_context=ssl_context)
|
||||
mailbox.login(addr, pw)
|
||||
msgs = list(mailbox.fetch(mark_seen=False))
|
||||
assert msgs, "expected at least one message"
|
||||
assert public_ip not in msgs[0].obj.as_string()
|
||||
|
||||
@@ -5,7 +5,11 @@ from cmdeploy.cmdeploy import main
|
||||
|
||||
def test_status_cmd(chatmail_config, capsys, request):
|
||||
os.chdir(request.config.invocation_params.dir)
|
||||
assert main(["status"]) == 0
|
||||
command = ["status"]
|
||||
if os.getenv("CHATMAIL_SSH"):
|
||||
command.append("--ssh-host")
|
||||
command.append(os.getenv("CHATMAIL_SSH"))
|
||||
assert main(command) == 0
|
||||
status_out = capsys.readouterr()
|
||||
print(status_out.out)
|
||||
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
import imaplib
|
||||
import io
|
||||
import ipaddress
|
||||
import itertools
|
||||
import os
|
||||
import random
|
||||
import smtplib
|
||||
import ssl
|
||||
import subprocess
|
||||
import time
|
||||
from pathlib import Path
|
||||
@@ -14,10 +15,12 @@ from chatmaild.config import read_config
|
||||
conftestdir = Path(__file__).parent
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption(
|
||||
"--slow", action="store_true", default=False, help="also run slow tests"
|
||||
)
|
||||
def _is_ip(domain):
|
||||
try:
|
||||
ipaddress.ip_address(domain)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
@@ -27,24 +30,29 @@ def pytest_configure(config):
|
||||
)
|
||||
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
markers = list(item.iter_markers(name="slow"))
|
||||
if markers:
|
||||
if not item.config.getoption("--slow"):
|
||||
pytest.skip("skipping slow test, use --slow to run")
|
||||
def _get_chatmail_config():
|
||||
inipath = os.environ.get("CHATMAIL_INI")
|
||||
if inipath:
|
||||
path = Path(inipath).resolve()
|
||||
return read_config(path), path
|
||||
|
||||
current = Path().resolve()
|
||||
while 1:
|
||||
path = current.joinpath("chatmail.ini").resolve()
|
||||
if path.exists():
|
||||
return read_config(path), path
|
||||
if current == current.parent:
|
||||
break
|
||||
current = current.parent
|
||||
return None, None
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def chatmail_config(pytestconfig):
|
||||
current = basedir = Path().resolve()
|
||||
while 1:
|
||||
path = current.joinpath("chatmail.ini").resolve()
|
||||
if path.exists():
|
||||
return read_config(path)
|
||||
if current == current.parent:
|
||||
break
|
||||
current = current.parent
|
||||
|
||||
config, path = _get_chatmail_config()
|
||||
if config:
|
||||
return config
|
||||
basedir = Path().resolve()
|
||||
pytest.skip(f"no chatmail.ini file found in {basedir} or parent dirs")
|
||||
|
||||
|
||||
@@ -72,10 +80,17 @@ def sshdomain2(maildomain2):
|
||||
|
||||
|
||||
def pytest_report_header():
|
||||
domain = os.environ.get("CHATMAIL_DOMAIN")
|
||||
if domain:
|
||||
text = f"chatmail test instance: {domain}"
|
||||
return ["-" * len(text), text, "-" * len(text)]
|
||||
config, path = _get_chatmail_config()
|
||||
domain2 = os.environ.get("CHATMAIL_DOMAIN2", "NOT SET")
|
||||
domain = config.mail_domain if config else "NOT SET"
|
||||
path = path if path else "NOT SET"
|
||||
|
||||
lines = [
|
||||
f"chatmail.ini {domain} location: {path}",
|
||||
f"chatmail2: {domain2}",
|
||||
]
|
||||
sep = "-" * max(map(len, lines))
|
||||
return [sep, *lines, sep]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -90,15 +105,22 @@ def cm_data(request):
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def benchmark(request):
|
||||
def bench(func, num, name=None, reportfunc=None):
|
||||
def benchmark(request, chatmail_config):
|
||||
def bench(func, num, name=None, reportfunc=None, cooldown=0.0):
|
||||
if name is None:
|
||||
name = func.__name__
|
||||
if cooldown == "auto":
|
||||
per_minute = max(chatmail_config.max_user_send_per_minute, 1)
|
||||
cooldown = chatmail_config.max_user_send_burst_size * 60 / per_minute
|
||||
|
||||
durations = []
|
||||
for i in range(num):
|
||||
now = time.time()
|
||||
func()
|
||||
durations.append(time.time() - now)
|
||||
if cooldown > 0 and i + 1 < num:
|
||||
# Keep post-run cooldown out of measured benchmark duration.
|
||||
time.sleep(cooldown)
|
||||
durations.sort()
|
||||
request.config._benchresults[name] = (reportfunc, durations)
|
||||
|
||||
@@ -144,15 +166,25 @@ def pytest_terminal_summary(terminalreporter):
|
||||
tr.write_line(line)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def imap(maildomain):
|
||||
return ImapConn(maildomain)
|
||||
@pytest.fixture(scope="session")
|
||||
def ssl_context(chatmail_config):
|
||||
if chatmail_config.tls_cert_mode == "self":
|
||||
ctx = ssl.create_default_context()
|
||||
ctx.check_hostname = False
|
||||
ctx.verify_mode = ssl.CERT_NONE
|
||||
return ctx
|
||||
return None
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def make_imap_connection(maildomain):
|
||||
def imap(maildomain, ssl_context):
|
||||
return ImapConn(maildomain, ssl_context=ssl_context)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def make_imap_connection(maildomain, ssl_context):
|
||||
def make_imap_connection():
|
||||
conn = ImapConn(maildomain)
|
||||
conn = ImapConn(maildomain, ssl_context=ssl_context)
|
||||
conn.connect()
|
||||
return conn
|
||||
|
||||
@@ -164,12 +196,13 @@ class ImapConn:
|
||||
logcmd = "journalctl -f -u dovecot"
|
||||
name = "dovecot"
|
||||
|
||||
def __init__(self, host):
|
||||
def __init__(self, host, ssl_context=None):
|
||||
self.host = host
|
||||
self.ssl_context = ssl_context
|
||||
|
||||
def connect(self):
|
||||
print(f"imap-connect {self.host}")
|
||||
self.conn = imaplib.IMAP4_SSL(self.host)
|
||||
self.conn = imaplib.IMAP4_SSL(self.host, ssl_context=self.ssl_context)
|
||||
|
||||
def login(self, user, password):
|
||||
print(f"imap-login {user!r} {password!r}")
|
||||
@@ -195,14 +228,14 @@ class ImapConn:
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def smtp(maildomain):
|
||||
return SmtpConn(maildomain)
|
||||
def smtp(maildomain, ssl_context):
|
||||
return SmtpConn(maildomain, ssl_context=ssl_context)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def make_smtp_connection(maildomain):
|
||||
def make_smtp_connection(maildomain, ssl_context):
|
||||
def make_smtp_connection():
|
||||
conn = SmtpConn(maildomain)
|
||||
conn = SmtpConn(maildomain, ssl_context=ssl_context)
|
||||
conn.connect()
|
||||
return conn
|
||||
|
||||
@@ -214,12 +247,14 @@ class SmtpConn:
|
||||
logcmd = "journalctl -f -t postfix/smtpd -t postfix/smtp -t postfix/lmtp"
|
||||
name = "postfix"
|
||||
|
||||
def __init__(self, host):
|
||||
def __init__(self, host, ssl_context=None):
|
||||
self.host = host
|
||||
self.ssl_context = ssl_context
|
||||
|
||||
def connect(self):
|
||||
print(f"smtp-connect {self.host}")
|
||||
self.conn = smtplib.SMTP_SSL(self.host)
|
||||
context = self.ssl_context or ssl.create_default_context()
|
||||
self.conn = smtplib.SMTP_SSL(self.host, context=context)
|
||||
|
||||
def login(self, user, password):
|
||||
print(f"smtp-login {user!r} {password!r}")
|
||||
@@ -243,6 +278,7 @@ def gencreds(chatmail_config):
|
||||
|
||||
def gen(domain=None):
|
||||
domain = domain if domain else chatmail_config.mail_domain
|
||||
addr_domain = f"[{domain}]" if _is_ip(domain) else domain
|
||||
while 1:
|
||||
num = next(count)
|
||||
alphanumeric = "abcdefghijklmnopqrstuvwxyz1234567890"
|
||||
@@ -256,98 +292,161 @@ def gencreds(chatmail_config):
|
||||
password = "".join(
|
||||
random.choices(alphanumeric, k=chatmail_config.password_min_length)
|
||||
)
|
||||
yield f"{user}@{domain}", f"{password}"
|
||||
yield f"{user}@{addr_domain}", f"{password}"
|
||||
|
||||
return lambda domain=None: next(gen(domain))
|
||||
|
||||
|
||||
#
|
||||
# Delta Chat testplugin re-use
|
||||
# Delta Chat RPC-based test support
|
||||
# use the cmfactory fixture to get chatmail instance accounts
|
||||
#
|
||||
|
||||
from deltachat_rpc_client import DeltaChat, Rpc
|
||||
|
||||
class ChatmailTestProcess:
|
||||
"""Provider for chatmail instance accounts as used by deltachat.testplugin.acfactory"""
|
||||
|
||||
def __init__(self, pytestconfig, maildomain, gencreds):
|
||||
self.pytestconfig = pytestconfig
|
||||
self.maildomain = maildomain
|
||||
assert "." in self.maildomain, maildomain
|
||||
class ChatmailACFactory:
|
||||
"""RPC-based account factory for chatmail testing."""
|
||||
|
||||
def __init__(self, rpc, maildomain, gencreds, chatmail_config):
|
||||
self.dc = DeltaChat(rpc)
|
||||
self.rpc = rpc
|
||||
self._maildomain = maildomain
|
||||
self.gencreds = gencreds
|
||||
self._addr2files = {}
|
||||
self.chatmail_config = chatmail_config
|
||||
|
||||
def get_liveconfig_producer(self):
|
||||
while 1:
|
||||
user, password = self.gencreds(self.maildomain)
|
||||
config = {
|
||||
"addr": user,
|
||||
"mail_pw": password,
|
||||
}
|
||||
# speed up account configuration
|
||||
config["mail_server"] = self.maildomain
|
||||
config["send_server"] = self.maildomain
|
||||
yield config
|
||||
def _make_transport(self, domain):
|
||||
"""Build a transport config dict for the given domain."""
|
||||
addr, password = self.gencreds(domain)
|
||||
transport = {
|
||||
"addr": addr,
|
||||
"password": password,
|
||||
# Setting server explicitly skips requesting autoconfig XML,
|
||||
# see https://datatracker.ietf.org/doc/draft-ietf-mailmaint-autoconfig/
|
||||
"imapServer": domain,
|
||||
"smtpServer": domain,
|
||||
}
|
||||
if self.chatmail_config.tls_cert_mode == "self":
|
||||
transport["certificateChecks"] = "acceptInvalidCertificates"
|
||||
return transport
|
||||
|
||||
def cache_maybe_retrieve_configured_db_files(self, cache_addr, db_target_path):
|
||||
pass
|
||||
def get_online_account(self, domain=None):
|
||||
"""Create, configure and bring online a single account."""
|
||||
return self.get_online_accounts(1, domain)[0]
|
||||
|
||||
def cache_maybe_store_configured_db_files(self, acc):
|
||||
pass
|
||||
def get_online_accounts(self, num, domain=None):
|
||||
"""Create multiple online accounts in parallel."""
|
||||
domain = domain or self._maildomain
|
||||
futures = []
|
||||
accounts = []
|
||||
for _ in range(num):
|
||||
account = self.dc.add_account()
|
||||
addr, password = self.gencreds(domain)
|
||||
if _is_ip(domain):
|
||||
# Use DCLOGIN scheme with explicit server hosts,
|
||||
# matching how madmail presents its addresses to users.
|
||||
qr = (
|
||||
f"dclogin:{addr}"
|
||||
f"?p={password}&v=1"
|
||||
f"&ih={domain}&ip=993"
|
||||
f"&sh={domain}&sp=465"
|
||||
f"&ic=3&ss=default"
|
||||
)
|
||||
future = account.add_transport_from_qr.future(qr)
|
||||
else:
|
||||
future = account.add_or_update_transport.future(
|
||||
self._make_transport(domain)
|
||||
)
|
||||
futures.append(future)
|
||||
|
||||
# ensure messages stay in INBOX so that they can be
|
||||
# concurrently fetched via extra IMAP connections during tests
|
||||
account.set_config("delete_server_after", "10")
|
||||
accounts.append(account)
|
||||
|
||||
for future in futures:
|
||||
future()
|
||||
|
||||
for account in accounts:
|
||||
account.bring_online()
|
||||
return accounts
|
||||
|
||||
def get_accepted_chat(self, ac1, ac2):
|
||||
"""Create a 1:1 chat between ac1 and ac2 accepted on both sides."""
|
||||
ac2.create_chat(ac1)
|
||||
return ac1.create_chat(ac2)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def rpc(tmp_path_factory):
|
||||
"""Start a deltachat-rpc-server process for the test session."""
|
||||
|
||||
# NB: accounts_dir must NOT already exist as directory --
|
||||
# core-rust only creates accounts.toml if the dir doesn't exist yet.
|
||||
accounts_dir = str(tmp_path_factory.mktemp("dc") / "accounts")
|
||||
rpc = Rpc(accounts_dir=accounts_dir)
|
||||
rpc.start()
|
||||
yield rpc
|
||||
rpc.close()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def cmfactory(request, gencreds, tmpdir, maildomain):
|
||||
# cloned from deltachat.testplugin.amfactory
|
||||
pytest.importorskip("deltachat")
|
||||
from deltachat.testplugin import ACFactory
|
||||
|
||||
testproc = ChatmailTestProcess(request.config, maildomain, gencreds)
|
||||
|
||||
class Data:
|
||||
def read_path(self, path):
|
||||
return
|
||||
|
||||
am = ACFactory(request=request, tmpdir=tmpdir, testprocess=testproc, data=Data())
|
||||
|
||||
# nb. a bit hacky
|
||||
# would probably be better if deltachat's test machinery grows native support
|
||||
def switch_maildomain(maildomain2):
|
||||
am.testprocess.maildomain = maildomain2
|
||||
|
||||
am.switch_maildomain = switch_maildomain
|
||||
|
||||
yield am
|
||||
if hasattr(request.node, "rep_call") and request.node.rep_call.failed:
|
||||
if testproc.pytestconfig.getoption("--extra-info"):
|
||||
logfile = io.StringIO()
|
||||
am.dump_imap_summary(logfile=logfile)
|
||||
print(logfile.getvalue())
|
||||
# request.node.add_report_section("call", "imap-server-state", s)
|
||||
def cmfactory(rpc, gencreds, maildomain, chatmail_config):
|
||||
"""Return a ChatmailACFactory for creating online Delta Chat accounts."""
|
||||
return ChatmailACFactory(
|
||||
rpc=rpc,
|
||||
maildomain=maildomain,
|
||||
gencreds=gencreds,
|
||||
chatmail_config=chatmail_config,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def remote(sshdomain):
|
||||
return Remote(sshdomain)
|
||||
r = Remote(sshdomain)
|
||||
yield r
|
||||
r.close()
|
||||
|
||||
|
||||
class Remote:
|
||||
def __init__(self, sshdomain):
|
||||
self.sshdomain = sshdomain
|
||||
self._procs = []
|
||||
|
||||
def iter_output(self, logcmd=""):
|
||||
def iter_output(self, logcmd="", ready=None):
|
||||
getjournal = "journalctl -f" if not logcmd else logcmd
|
||||
self.popen = subprocess.Popen(
|
||||
["ssh", f"root@{self.sshdomain}", getjournal],
|
||||
print(self.sshdomain)
|
||||
match self.sshdomain:
|
||||
case "@local": command = []
|
||||
case "localhost": command = []
|
||||
case _: command = ["ssh", f"root@{self.sshdomain}"]
|
||||
[command.append(arg) for arg in getjournal.split()]
|
||||
popen = subprocess.Popen(
|
||||
command,
|
||||
stdin=subprocess.DEVNULL,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
while 1:
|
||||
line = self.popen.stdout.readline()
|
||||
res = line.decode().strip().lower()
|
||||
if res:
|
||||
self._procs.append(popen)
|
||||
try:
|
||||
while 1:
|
||||
line = popen.stdout.readline()
|
||||
res = line.decode().strip().lower()
|
||||
if not res:
|
||||
break
|
||||
if ready is not None:
|
||||
ready()
|
||||
ready = None
|
||||
yield res
|
||||
else:
|
||||
break
|
||||
finally:
|
||||
popen.terminate()
|
||||
popen.wait()
|
||||
|
||||
def close(self):
|
||||
while self._procs:
|
||||
proc = self._procs.pop()
|
||||
proc.kill()
|
||||
proc.wait()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -363,38 +462,40 @@ def lp(request):
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def cmsetup(maildomain, gencreds):
|
||||
return CMSetup(maildomain, gencreds)
|
||||
def cmsetup(maildomain, gencreds, ssl_context):
|
||||
return CMSetup(maildomain, gencreds, ssl_context)
|
||||
|
||||
|
||||
class CMSetup:
|
||||
def __init__(self, maildomain, gencreds):
|
||||
def __init__(self, maildomain, gencreds, ssl_context):
|
||||
self.maildomain = maildomain
|
||||
self.gencreds = gencreds
|
||||
self.ssl_context = ssl_context
|
||||
|
||||
def gen_users(self, num):
|
||||
print(f"Creating {num} online users")
|
||||
users = []
|
||||
for i in range(num):
|
||||
addr, password = self.gencreds()
|
||||
user = CMUser(self.maildomain, addr, password)
|
||||
user = CMUser(self.maildomain, addr, password, self.ssl_context)
|
||||
assert user.smtp
|
||||
users.append(user)
|
||||
return users
|
||||
|
||||
|
||||
class CMUser:
|
||||
def __init__(self, maildomain, addr, password):
|
||||
def __init__(self, maildomain, addr, password, ssl_context=None):
|
||||
self.maildomain = maildomain
|
||||
self.addr = addr
|
||||
self.password = password
|
||||
self.ssl_context = ssl_context
|
||||
self._smtp = None
|
||||
self._imap = None
|
||||
|
||||
@property
|
||||
def smtp(self):
|
||||
if not self._smtp:
|
||||
handle = SmtpConn(self.maildomain)
|
||||
handle = SmtpConn(self.maildomain, ssl_context=self.ssl_context)
|
||||
handle.connect()
|
||||
handle.login(self.addr, self.password)
|
||||
self._smtp = handle
|
||||
@@ -403,7 +504,7 @@ class CMUser:
|
||||
@property
|
||||
def imap(self):
|
||||
if not self._imap:
|
||||
imap = ImapConn(self.maildomain)
|
||||
imap = ImapConn(self.maildomain, ssl_context=self.ssl_context)
|
||||
imap.connect()
|
||||
imap.login(self.addr, self.password)
|
||||
self._imap = imap
|
||||
|
||||
118
cmdeploy/src/cmdeploy/tests/test_basedeploy.py
Normal file
118
cmdeploy/src/cmdeploy/tests/test_basedeploy.py
Normal file
@@ -0,0 +1,118 @@
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from cmdeploy.basedeploy import Deployer
|
||||
|
||||
|
||||
def test_put_file_restart_and_reload():
|
||||
deployer = Deployer()
|
||||
mock_res = MagicMock()
|
||||
mock_res.changed = True
|
||||
|
||||
with patch("cmdeploy.basedeploy.files.put", return_value=mock_res):
|
||||
deployer.put_file("foo.conf", "/etc/foo.conf")
|
||||
assert deployer.need_restart is True
|
||||
assert deployer.daemon_reload is False
|
||||
|
||||
deployer = Deployer()
|
||||
|
||||
deployer.put_file("test.service", "/etc/systemd/system/test.service")
|
||||
assert deployer.need_restart is True
|
||||
assert deployer.daemon_reload is True
|
||||
|
||||
|
||||
def test_remove_file():
|
||||
deployer = Deployer()
|
||||
mock_res = MagicMock()
|
||||
mock_res.changed = True
|
||||
|
||||
with patch("cmdeploy.basedeploy.files.file", return_value=mock_res) as mock_file:
|
||||
deployer.remove_file("/etc/foo.conf")
|
||||
mock_file.assert_called_once_with(
|
||||
name="Remove /etc/foo.conf", path="/etc/foo.conf", present=False
|
||||
)
|
||||
assert deployer.need_restart is True
|
||||
|
||||
|
||||
def test_ensure_systemd_unit():
|
||||
deployer = Deployer()
|
||||
mock_res = MagicMock()
|
||||
mock_res.changed = True
|
||||
|
||||
# Plain service file
|
||||
with patch("cmdeploy.basedeploy.files.put", return_value=mock_res) as mock_put:
|
||||
deployer.ensure_systemd_unit("iroh-relay.service")
|
||||
assert (
|
||||
mock_put.call_args.kwargs["dest"]
|
||||
== "/etc/systemd/system/iroh-relay.service"
|
||||
)
|
||||
assert deployer.need_restart is True
|
||||
assert deployer.daemon_reload is True
|
||||
|
||||
deployer = Deployer()
|
||||
|
||||
# Template (.j2) dispatches to put_template and strips .j2 suffix
|
||||
with patch("cmdeploy.basedeploy.files.template", return_value=mock_res) as mock_tpl:
|
||||
deployer.ensure_systemd_unit(
|
||||
"filtermail/chatmaild.service.j2",
|
||||
bin_path="/usr/local/bin/filtermail",
|
||||
)
|
||||
assert (
|
||||
mock_tpl.call_args.kwargs["dest"] == "/etc/systemd/system/chatmaild.service"
|
||||
)
|
||||
|
||||
deployer = Deployer()
|
||||
|
||||
# Explicit dest_name override
|
||||
with patch("cmdeploy.basedeploy.files.put", return_value=mock_res) as mock_put:
|
||||
deployer.ensure_systemd_unit(
|
||||
"acmetool/acmetool-reconcile.timer",
|
||||
dest_name="acmetool-reconcile.timer",
|
||||
)
|
||||
assert (
|
||||
mock_put.call_args.kwargs["dest"]
|
||||
== "/etc/systemd/system/acmetool-reconcile.timer"
|
||||
)
|
||||
|
||||
|
||||
def test_ensure_service():
|
||||
with patch("cmdeploy.basedeploy.systemd.service") as mock_svc:
|
||||
deployer = Deployer()
|
||||
deployer.need_restart = True
|
||||
deployer.daemon_reload = True
|
||||
deployer.ensure_service("nginx.service")
|
||||
mock_svc.assert_called_once_with(
|
||||
name="Start and enable nginx.service",
|
||||
service="nginx.service",
|
||||
running=True,
|
||||
enabled=True,
|
||||
restarted=True,
|
||||
daemon_reload=True,
|
||||
)
|
||||
# daemon_reload is cleared to avoid multiple systemctl daemon-reload calls
|
||||
# need_restart is kept to ensure all subsequent services also restart
|
||||
assert deployer.need_restart is True
|
||||
assert deployer.daemon_reload is False
|
||||
|
||||
with patch("cmdeploy.basedeploy.systemd.service") as mock_svc:
|
||||
# Stopping suppresses restarted even when need_restart is True
|
||||
deployer = Deployer()
|
||||
deployer.need_restart = True
|
||||
deployer.daemon_reload = True
|
||||
deployer.ensure_service(
|
||||
"mta-sts-daemon.service",
|
||||
running=False,
|
||||
enabled=False,
|
||||
)
|
||||
assert mock_svc.call_args.kwargs["restarted"] is False
|
||||
assert deployer.need_restart is True
|
||||
|
||||
with patch("cmdeploy.basedeploy.systemd.service") as mock_svc:
|
||||
# Multiple calls: daemon_reload resets after first, need_restart persists
|
||||
deployer = Deployer()
|
||||
deployer.need_restart = True
|
||||
deployer.daemon_reload = True
|
||||
deployer.ensure_service("chatmaild.service")
|
||||
deployer.ensure_service("chatmaild-metadata.service")
|
||||
second_call = mock_svc.call_args_list[1]
|
||||
assert second_call.kwargs["restarted"] is True
|
||||
assert second_call.kwargs["daemon_reload"] is False
|
||||
@@ -23,15 +23,19 @@ class TestCmdline:
|
||||
run = parser.parse_args(["run"])
|
||||
assert init and run
|
||||
|
||||
def test_init_not_overwrite(self, capsys):
|
||||
assert main(["init", "chat.example.org"]) == 0
|
||||
def test_init_not_overwrite(self, capsys, tmp_path, monkeypatch):
|
||||
monkeypatch.delenv("CHATMAIL_INI", raising=False)
|
||||
inipath = tmp_path / "chatmail.ini"
|
||||
args = ["init", "--config", str(inipath), "chat.example.org"]
|
||||
assert main(args) == 0
|
||||
capsys.readouterr()
|
||||
|
||||
assert main(["init", "chat.example.org"]) == 1
|
||||
assert main(args) == 1
|
||||
out, err = capsys.readouterr()
|
||||
assert "path exists" in out.lower()
|
||||
|
||||
assert main(["init", "chat.example.org", "--force"]) == 0
|
||||
args.insert(1, "--force")
|
||||
assert main(args) == 0
|
||||
out, err = capsys.readouterr()
|
||||
assert "deleting config file" in out.lower()
|
||||
|
||||
|
||||
@@ -3,7 +3,8 @@ from copy import deepcopy
|
||||
import pytest
|
||||
|
||||
from cmdeploy import remote
|
||||
from cmdeploy.dns import check_full_zone, check_initial_remote_data
|
||||
from cmdeploy.dns import check_full_zone, check_initial_remote_data, parse_zone_records
|
||||
from cmdeploy.remote.rdns import get_authoritative_ns
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -14,11 +15,15 @@ def mockdns_base(monkeypatch):
|
||||
if command.startswith("dig"):
|
||||
if command == "dig":
|
||||
return "."
|
||||
if "SOA" in command:
|
||||
if "with.public.soa" in command and "NS" in command:
|
||||
return "domain.with.public.soa. 2419 IN NS ns1.first-ns.de."
|
||||
if "with.hidden.soa" in command and "NS" in command:
|
||||
return (
|
||||
"delta.chat. 21600 IN SOA ns1.first-ns.de. dns.hetzner.com."
|
||||
" 2025102800 14400 1800 604800 3600"
|
||||
"domain.with.hidden.soa. 2137 IN NS ns1.desec.io.\n"
|
||||
"domain.with.hidden.soa. 2137 IN NS ns2.desec.org."
|
||||
)
|
||||
if "NS" in command:
|
||||
return "delta.chat. 21600 IN NS ns1.first-ns.de."
|
||||
command_chunks = command.split()
|
||||
domain, typ = command_chunks[4], command_chunks[6]
|
||||
try:
|
||||
@@ -60,6 +65,29 @@ def mockdns(request, mockdns_base, mockdns_expected):
|
||||
return mockdns_base
|
||||
|
||||
|
||||
class TestGetDkimEntry:
|
||||
def test_dkim_entry_returns_tuple_on_success(self, mockdns):
|
||||
entry, web_entry = remote.rdns.get_dkim_entry(
|
||||
"some.domain", "", dkim_selector="opendkim"
|
||||
)
|
||||
# May return None,None if openssl not available, but should never crash
|
||||
if entry is not None:
|
||||
assert "opendkim._domainkey.some.domain" in entry
|
||||
assert "opendkim._domainkey.some.domain" in web_entry
|
||||
|
||||
def test_dkim_entry_returns_none_tuple_on_error(self, monkeypatch):
|
||||
"""CalledProcessError must return (None, None), not bare None."""
|
||||
from subprocess import CalledProcessError
|
||||
|
||||
def failing_shell(command, fail_ok=False, print=print):
|
||||
raise CalledProcessError(1, command)
|
||||
|
||||
monkeypatch.setattr(remote.rdns, "shell", failing_shell)
|
||||
result = remote.rdns.get_dkim_entry("some.domain", "", dkim_selector="opendkim")
|
||||
assert result == (None, None)
|
||||
assert result[0] is None and result[1] is None
|
||||
|
||||
|
||||
class TestPerformInitialChecks:
|
||||
def test_perform_initial_checks_ok1(self, mockdns, mockdns_expected):
|
||||
remote_data = remote.rdns.perform_initial_checks("some.domain")
|
||||
@@ -91,19 +119,71 @@ class TestPerformInitialChecks:
|
||||
assert not res
|
||||
assert len(l) == 2
|
||||
|
||||
def test_perform_initial_checks_no_mta_sts_self_signed(self, mockdns):
|
||||
del mockdns["CNAME"]["mta-sts.some.domain"]
|
||||
remote_data = remote.rdns.perform_initial_checks("some.domain")
|
||||
assert not remote_data["MTA_STS"]
|
||||
|
||||
l = []
|
||||
res = check_initial_remote_data(remote_data, strict_tls=False, print=l.append)
|
||||
assert res
|
||||
assert not l
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("domain", "ns"),
|
||||
[
|
||||
("domain.with.public.soa", "ns1.first-ns.de."),
|
||||
("domain.with.hidden.soa", "ns1.desec.io."),
|
||||
],
|
||||
)
|
||||
def test_get_authoritative_ns(domain, ns, mockdns):
|
||||
assert get_authoritative_ns(domain) == ns
|
||||
|
||||
|
||||
def test_parse_zone_records():
|
||||
text = """
|
||||
; This is a comment
|
||||
some.domain. 3600 IN A 1.1.1.1
|
||||
|
||||
; Another comment
|
||||
www.some.domain. 3600 IN CNAME some.domain.
|
||||
|
||||
; Multi-word rdata
|
||||
some.domain. 3600 IN MX 10 mail.some.domain.
|
||||
|
||||
; DKIM record (single line, multi-word TXT rdata)
|
||||
dkim._domainkey.some.domain. 3600 IN TXT "v=DKIM1;k=rsa;p=MIIBIjANBgkqhkiG" "9w0BAQEFAAOCAQ8AMIIBCgKCAQEA"
|
||||
|
||||
; Another TXT record
|
||||
_dmarc.some.domain. 3600 IN TXT "v=DMARC1;p=reject"
|
||||
"""
|
||||
records = list(parse_zone_records(text))
|
||||
assert records == [
|
||||
("some.domain", "3600", "A", "1.1.1.1"),
|
||||
("www.some.domain", "3600", "CNAME", "some.domain."),
|
||||
("some.domain", "3600", "MX", "10 mail.some.domain."),
|
||||
(
|
||||
"dkim._domainkey.some.domain",
|
||||
"3600",
|
||||
"TXT",
|
||||
'"v=DKIM1;k=rsa;p=MIIBIjANBgkqhkiG" "9w0BAQEFAAOCAQ8AMIIBCgKCAQEA"',
|
||||
),
|
||||
("_dmarc.some.domain", "3600", "TXT", '"v=DMARC1;p=reject"'),
|
||||
]
|
||||
|
||||
|
||||
def test_parse_zone_records_invalid_line():
|
||||
text = "invalid line"
|
||||
with pytest.raises(ValueError, match="Bad zone record line"):
|
||||
list(parse_zone_records(text))
|
||||
|
||||
|
||||
def parse_zonefile_into_dict(zonefile, mockdns_base, only_required=False):
|
||||
for zf_line in zonefile.split("\n"):
|
||||
if zf_line.startswith("#"):
|
||||
if "Recommended" in zf_line and only_required:
|
||||
return
|
||||
continue
|
||||
if not zf_line.strip():
|
||||
continue
|
||||
zf_domain, zf_typ, zf_value = zf_line.split(maxsplit=2)
|
||||
zf_domain = zf_domain.rstrip(".")
|
||||
zf_value = zf_value.strip()
|
||||
mockdns_base.setdefault(zf_typ, {})[zf_domain] = zf_value
|
||||
if only_required:
|
||||
zonefile = zonefile.split("; Recommended")[0]
|
||||
for name, ttl, rtype, rdata in parse_zone_records(zonefile):
|
||||
mockdns_base.setdefault(rtype, {})[name] = rdata
|
||||
|
||||
|
||||
class MockSSHExec:
|
||||
|
||||
238
cmdeploy/src/cmdeploy/tests/test_dovecot_deployer.py
Normal file
238
cmdeploy/src/cmdeploy/tests/test_dovecot_deployer.py
Normal file
@@ -0,0 +1,238 @@
|
||||
from contextlib import nullcontext
|
||||
from types import SimpleNamespace
|
||||
|
||||
import pytest
|
||||
from pyinfra.facts.deb import DebPackages
|
||||
|
||||
from cmdeploy.dovecot import deployer as dovecot_deployer
|
||||
|
||||
|
||||
def make_host(*fact_pairs):
|
||||
"""Build a mock host; get_fact(cls) dispatches to the provided facts mapping.
|
||||
|
||||
Args:
|
||||
*fact_pairs: tuples of (fact_class, fact_value) to register
|
||||
|
||||
Returns:
|
||||
SimpleNamespace with get_fact that raises a clear error if an
|
||||
unexpected fact type is requested.
|
||||
"""
|
||||
facts = dict(fact_pairs)
|
||||
|
||||
def get_fact(cls):
|
||||
if cls not in facts:
|
||||
registered = ", ".join(c.__name__ for c in facts)
|
||||
raise LookupError(
|
||||
f"unexpected get_fact({cls.__name__}); "
|
||||
f"only registered: {registered}"
|
||||
)
|
||||
return facts[cls]
|
||||
|
||||
return SimpleNamespace(get_fact=get_fact)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def deployer():
|
||||
return dovecot_deployer.DovecotDeployer(
|
||||
SimpleNamespace(mail_domain="chat.example.org"),
|
||||
disable_mail=False,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def patch_blocked(monkeypatch):
|
||||
monkeypatch.setattr(dovecot_deployer, "blocked_service_startup", nullcontext)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_files_put(monkeypatch):
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer.files,
|
||||
"put",
|
||||
lambda **kwargs: SimpleNamespace(changed=False),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def track_shell(monkeypatch):
|
||||
calls = []
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer.server,
|
||||
"shell",
|
||||
lambda **kwargs: calls.append(kwargs) or SimpleNamespace(changed=False),
|
||||
)
|
||||
return calls
|
||||
|
||||
|
||||
def test_download_dovecot_package_skips_epoch_matched_install(monkeypatch):
|
||||
epoch_version = dovecot_deployer.DOVECOT_PACKAGE_VERSION
|
||||
downloads = []
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer,
|
||||
"host",
|
||||
make_host((DebPackages, {"dovecot-core": [epoch_version]})),
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer,
|
||||
"_pick_url",
|
||||
lambda primary, fallback: primary,
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer.files,
|
||||
"download",
|
||||
lambda **kwargs: downloads.append(kwargs),
|
||||
)
|
||||
|
||||
deb, changed = dovecot_deployer._download_dovecot_package("core", "amd64")
|
||||
|
||||
assert deb is None, f"expected no deb path when version matches, got {deb!r}"
|
||||
assert changed is False, "should not flag changed when version already installed"
|
||||
assert downloads == [], "should not download when version already installed"
|
||||
|
||||
|
||||
def test_download_dovecot_package_uses_archive_version_for_url_and_filename(
|
||||
monkeypatch,
|
||||
):
|
||||
downloads = []
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer,
|
||||
"host",
|
||||
make_host((DebPackages, {})),
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer,
|
||||
"_pick_url",
|
||||
lambda primary, fallback: primary,
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer.files,
|
||||
"download",
|
||||
lambda **kwargs: downloads.append(kwargs),
|
||||
)
|
||||
|
||||
deb, changed = dovecot_deployer._download_dovecot_package("core", "amd64")
|
||||
|
||||
archive_version = dovecot_deployer.DOVECOT_ARCHIVE_VERSION.replace("+", "%2B")
|
||||
expected_deb = f"/root/dovecot-core_{archive_version}_amd64.deb"
|
||||
|
||||
# Verify the returned path uses archive version, not package version (with epoch)
|
||||
assert changed is True, "should flag changed when package not yet installed"
|
||||
assert deb == expected_deb, f"deb path mismatch: {deb!r} != {expected_deb!r}"
|
||||
assert dovecot_deployer.DOVECOT_PACKAGE_VERSION not in deb, (
|
||||
f"deb path should use archive version (no epoch), got {deb!r}"
|
||||
)
|
||||
assert len(downloads) == 1, "files.download should be called exactly once"
|
||||
|
||||
|
||||
def test_install_skips_dpkg_path_when_epoch_matched_packages_present(
|
||||
deployer, patch_blocked, mock_files_put, track_shell, monkeypatch
|
||||
):
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer,
|
||||
"host",
|
||||
make_host(
|
||||
(
|
||||
dovecot_deployer.DebPackages,
|
||||
{
|
||||
"dovecot-core": [dovecot_deployer.DOVECOT_PACKAGE_VERSION],
|
||||
"dovecot-imapd": [dovecot_deployer.DOVECOT_PACKAGE_VERSION],
|
||||
"dovecot-lmtpd": [dovecot_deployer.DOVECOT_PACKAGE_VERSION],
|
||||
},
|
||||
),
|
||||
(dovecot_deployer.Arch, "x86_64"),
|
||||
),
|
||||
)
|
||||
downloads = []
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer.files,
|
||||
"download",
|
||||
lambda **kwargs: downloads.append(kwargs),
|
||||
)
|
||||
|
||||
deployer.install()
|
||||
|
||||
assert downloads == [], "should not download when all packages epoch-matched"
|
||||
assert track_shell == [], "should not run dpkg when all packages epoch-matched"
|
||||
assert deployer.need_restart is False, (
|
||||
"need_restart should be False when nothing changed"
|
||||
)
|
||||
|
||||
|
||||
def test_install_unsupported_arch_falls_back_to_apt(
|
||||
deployer, patch_blocked, mock_files_put, track_shell, monkeypatch
|
||||
):
|
||||
# For unsupported architectures, all fact lookups return the arch string.
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer,
|
||||
"host",
|
||||
SimpleNamespace(get_fact=lambda cls: "riscv64"),
|
||||
)
|
||||
apt_calls = []
|
||||
|
||||
# Mirrors apt.packages() return value: OperationMeta with .changed property.
|
||||
# Only lmtpd triggers a change to verify |= accumulation of changed flags.
|
||||
def fake_apt(**kwargs):
|
||||
apt_calls.append(kwargs)
|
||||
changed = "lmtpd" in kwargs["packages"][0]
|
||||
return SimpleNamespace(changed=changed)
|
||||
|
||||
monkeypatch.setattr(dovecot_deployer.apt, "packages", fake_apt)
|
||||
|
||||
deployer.install()
|
||||
|
||||
actual_pkgs = [c["packages"] for c in apt_calls]
|
||||
assert actual_pkgs == [["dovecot-core"], ["dovecot-imapd"], ["dovecot-lmtpd"]], (
|
||||
f"expected apt install of core/imapd/lmtpd, got {actual_pkgs}"
|
||||
)
|
||||
assert track_shell == [], "should not run dpkg for unsupported arch"
|
||||
assert deployer.need_restart is True, (
|
||||
"need_restart should be True when apt installed a package"
|
||||
)
|
||||
|
||||
|
||||
def test_install_runs_dpkg_when_packages_need_download(
|
||||
deployer, patch_blocked, mock_files_put, track_shell, monkeypatch
|
||||
):
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer,
|
||||
"host",
|
||||
make_host(
|
||||
(dovecot_deployer.DebPackages, {}),
|
||||
(dovecot_deployer.Arch, "x86_64"),
|
||||
),
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer,
|
||||
"_pick_url",
|
||||
lambda primary, fallback: primary,
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer.files,
|
||||
"download",
|
||||
lambda **kwargs: SimpleNamespace(changed=True),
|
||||
)
|
||||
|
||||
deployer.install()
|
||||
|
||||
assert len(track_shell) == 1, (
|
||||
f"expected one server.shell() call for dpkg install, got {len(track_shell)}"
|
||||
)
|
||||
cmds = track_shell[0]["commands"]
|
||||
assert len(cmds) == 3, f"expected 3 dpkg/apt commands, got: {cmds}"
|
||||
assert cmds[0].startswith("dpkg --force-confdef --force-confold -i ")
|
||||
assert "apt-get -y --fix-broken install" in cmds[1]
|
||||
assert cmds[2].startswith("dpkg --force-confdef --force-confold -i ")
|
||||
assert deployer.need_restart is True, (
|
||||
"need_restart should be True after dpkg install"
|
||||
)
|
||||
|
||||
|
||||
def test_pick_url_falls_back_on_primary_error(monkeypatch):
|
||||
def raise_error(req, timeout):
|
||||
raise OSError("connection timeout")
|
||||
|
||||
monkeypatch.setattr(dovecot_deployer.urllib.request, "urlopen", raise_error)
|
||||
result = dovecot_deployer._pick_url("http://primary", "http://fallback")
|
||||
assert result == "http://fallback", (
|
||||
f"should fall back when primary fails, got {result!r}"
|
||||
)
|
||||
78
cmdeploy/src/cmdeploy/tests/test_external_tls.py
Normal file
78
cmdeploy/src/cmdeploy/tests/test_external_tls.py
Normal file
@@ -0,0 +1,78 @@
|
||||
"""Functional tests for tls_external_cert_and_key option."""
|
||||
|
||||
import json
|
||||
|
||||
import chatmaild.newemail
|
||||
import pytest
|
||||
from chatmaild.config import read_config, write_initial_config
|
||||
|
||||
|
||||
def make_external_config(tmp_path, cert_key=None):
|
||||
inipath = tmp_path / "chatmail.ini"
|
||||
overrides = {}
|
||||
if cert_key is not None:
|
||||
overrides["tls_external_cert_and_key"] = cert_key
|
||||
write_initial_config(inipath, "chat.example.org", overrides=overrides)
|
||||
return inipath
|
||||
|
||||
|
||||
def test_external_tls_config_reads_paths(tmp_path):
|
||||
inipath = make_external_config(
|
||||
tmp_path,
|
||||
cert_key=(
|
||||
"/etc/letsencrypt/live/chat.example.org/fullchain.pem"
|
||||
" /etc/letsencrypt/live/chat.example.org/privkey.pem"
|
||||
),
|
||||
)
|
||||
config = read_config(inipath)
|
||||
assert config.tls_cert_mode == "external"
|
||||
assert (
|
||||
config.tls_cert_path == "/etc/letsencrypt/live/chat.example.org/fullchain.pem"
|
||||
)
|
||||
assert config.tls_key_path == "/etc/letsencrypt/live/chat.example.org/privkey.pem"
|
||||
|
||||
|
||||
def test_external_tls_missing_option_uses_acme(tmp_path):
|
||||
config = read_config(make_external_config(tmp_path))
|
||||
assert config.tls_cert_mode == "acme"
|
||||
|
||||
|
||||
def test_external_tls_bad_format_raises(tmp_path):
|
||||
inipath = make_external_config(tmp_path, cert_key="/only/one/path.pem")
|
||||
with pytest.raises(ValueError, match="two space-separated"):
|
||||
read_config(inipath)
|
||||
|
||||
|
||||
def test_external_tls_three_paths_raises(tmp_path):
|
||||
inipath = make_external_config(tmp_path, cert_key="/a /b /c")
|
||||
with pytest.raises(ValueError, match="two space-separated"):
|
||||
read_config(inipath)
|
||||
|
||||
|
||||
def test_external_tls_no_dclogin_url(tmp_path, capsys, monkeypatch):
|
||||
inipath = make_external_config(
|
||||
tmp_path, cert_key="/certs/fullchain.pem /certs/privkey.pem"
|
||||
)
|
||||
monkeypatch.setattr(chatmaild.newemail, "CONFIG_PATH", str(inipath))
|
||||
chatmaild.newemail.print_new_account()
|
||||
out, _ = capsys.readouterr()
|
||||
lines = out.split("\n")
|
||||
dic = json.loads(lines[2])
|
||||
assert "dclogin_url" not in dic
|
||||
|
||||
|
||||
def test_external_tls_selects_correct_deployer(tmp_path):
|
||||
from cmdeploy.deployers import get_tls_deployer
|
||||
from cmdeploy.external.deployer import ExternalTlsDeployer
|
||||
from cmdeploy.selfsigned.deployer import SelfSignedTlsDeployer
|
||||
|
||||
inipath = make_external_config(
|
||||
tmp_path, cert_key="/certs/fullchain.pem /certs/privkey.pem"
|
||||
)
|
||||
config = read_config(inipath)
|
||||
deployer = get_tls_deployer(config, "chat.example.org")
|
||||
|
||||
assert isinstance(deployer, ExternalTlsDeployer)
|
||||
assert not isinstance(deployer, SelfSignedTlsDeployer)
|
||||
assert deployer.cert_path == "/certs/fullchain.pem"
|
||||
assert deployer.key_path == "/certs/privkey.pem"
|
||||
@@ -1,11 +1,10 @@
|
||||
import importlib.resources
|
||||
from pathlib import Path
|
||||
|
||||
from cmdeploy.www import build_webpages
|
||||
|
||||
|
||||
def test_build_webpages(tmp_path, make_config):
|
||||
pkgroot = importlib.resources.files("cmdeploy")
|
||||
src_dir = pkgroot.joinpath("../../../www/src").resolve()
|
||||
src_dir = (Path(__file__).resolve() / "../../../../../www/src").resolve()
|
||||
assert src_dir.exists(), src_dir
|
||||
config = make_config("chat.example.org")
|
||||
build_dir = tmp_path.joinpath("build")
|
||||
|
||||
68
cmdeploy/src/cmdeploy/tests/test_rshell.py
Normal file
68
cmdeploy/src/cmdeploy/tests/test_rshell.py
Normal file
@@ -0,0 +1,68 @@
|
||||
from unittest.mock import patch
|
||||
|
||||
from cmdeploy.remote.rshell import dovecot_recalc_quota
|
||||
|
||||
|
||||
def test_dovecot_recalc_quota_normal_output():
|
||||
"""Normal doveadm output returns parsed dict."""
|
||||
normal_output = (
|
||||
"Quota name Type Value Limit %\n"
|
||||
"User quota STORAGE 5 102400 0\n"
|
||||
"User quota MESSAGE 2 - 0\n"
|
||||
)
|
||||
|
||||
with patch("cmdeploy.remote.rshell.shell", return_value=normal_output):
|
||||
result = dovecot_recalc_quota("user@example.org")
|
||||
|
||||
# shell is called twice (recalc + get), patch returns same for both
|
||||
assert result == {"value": 5, "limit": 102400, "percent": 0}
|
||||
|
||||
|
||||
def test_dovecot_recalc_quota_empty_output():
|
||||
"""Empty doveadm output (trailing newline) must not IndexError."""
|
||||
call_count = [0]
|
||||
|
||||
def mock_shell(cmd):
|
||||
call_count[0] += 1
|
||||
if "recalc" in cmd:
|
||||
return ""
|
||||
# quota get returns only empty lines
|
||||
return "\n\n"
|
||||
|
||||
with patch("cmdeploy.remote.rshell.shell", side_effect=mock_shell):
|
||||
result = dovecot_recalc_quota("user@example.org")
|
||||
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_dovecot_recalc_quota_malformed_output():
|
||||
"""Malformed output with too few columns must not crash."""
|
||||
call_count = [0]
|
||||
|
||||
def mock_shell(cmd):
|
||||
call_count[0] += 1
|
||||
if "recalc" in cmd:
|
||||
return ""
|
||||
# partial line, fewer than 6 parts
|
||||
return "Quota name\nUser quota STORAGE\n"
|
||||
|
||||
with patch("cmdeploy.remote.rshell.shell", side_effect=mock_shell):
|
||||
result = dovecot_recalc_quota("user@example.org")
|
||||
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_dovecot_recalc_quota_header_only():
|
||||
"""Only header line, no data rows."""
|
||||
call_count = [0]
|
||||
|
||||
def mock_shell(cmd):
|
||||
call_count[0] += 1
|
||||
if "recalc" in cmd:
|
||||
return ""
|
||||
return "Quota name Type Value Limit %\n"
|
||||
|
||||
with patch("cmdeploy.remote.rshell.shell", side_effect=mock_shell):
|
||||
result = dovecot_recalc_quota("user@example.org")
|
||||
|
||||
assert result is None
|
||||
@@ -1,5 +1,4 @@
|
||||
import hashlib
|
||||
import importlib.resources
|
||||
import re
|
||||
import time
|
||||
import traceback
|
||||
@@ -37,7 +36,7 @@ def prepare_template(source):
|
||||
|
||||
|
||||
def get_paths(config) -> (Path, Path, Path):
|
||||
reporoot = importlib.resources.files(__package__).joinpath("../../../").resolve()
|
||||
reporoot = (Path(__file__).resolve() / "../../../../").resolve()
|
||||
www_path = Path(config.www_folder)
|
||||
# if www_folder was not set, use default directory
|
||||
if config.www_folder == "":
|
||||
@@ -133,8 +132,7 @@ def find_merge_conflict(src_dir) -> Path:
|
||||
|
||||
|
||||
def main():
|
||||
path = importlib.resources.files(__package__)
|
||||
reporoot = path.joinpath("../../../").resolve()
|
||||
reporoot = (Path(__file__).resolve() / "../../../../").resolve()
|
||||
inipath = reporoot.joinpath("chatmail.ini")
|
||||
config = read_config(inipath)
|
||||
config.webdev = True
|
||||
|
||||
@@ -4,12 +4,14 @@
|
||||
|
||||
You can use the `make` command and `make html` to build web pages.
|
||||
|
||||
You need a Python environment where the following install was excuted:
|
||||
|
||||
pip install furo sphinx-autobuild
|
||||
You need a Python environment with `sphinx` and other
|
||||
dependencies, you can create it by running `scripts/initenv.sh`
|
||||
from the repository root.
|
||||
|
||||
To develop/change documentation, you can then do:
|
||||
|
||||
. venv/bin/activate
|
||||
cd doc
|
||||
make auto
|
||||
|
||||
A page will open at https://127.0.0.1:8000/ serving the docs and it will
|
||||
|
||||
@@ -47,6 +47,14 @@ steps. Please substitute it with your own domain.
|
||||
www.chat.example.org. 3600 IN CNAME chat.example.org.
|
||||
mta-sts.chat.example.org. 3600 IN CNAME chat.example.org.
|
||||
|
||||
.. note::
|
||||
|
||||
For experimental deployments using self-signed certificates,
|
||||
use a domain name starting with ``_``
|
||||
(e.g. ``_chat.example.org``).
|
||||
The ``mta-sts`` CNAME and ``_mta-sts`` TXT records
|
||||
are not needed for such domains.
|
||||
|
||||
2. On your local PC, clone the repository and bootstrap the Python
|
||||
virtualenv.
|
||||
|
||||
@@ -63,6 +71,16 @@ steps. Please substitute it with your own domain.
|
||||
|
||||
scripts/cmdeploy init chat.example.org # <-- use your domain
|
||||
|
||||
To use self-signed TLS certificates
|
||||
instead of Let's Encrypt,
|
||||
use a domain name starting with ``_``
|
||||
(e.g. ``scripts/cmdeploy init _chat.example.org``).
|
||||
Domains starting with ``_`` cannot obtain WebPKI certificates,
|
||||
so self-signed mode is derived automatically.
|
||||
This is useful for private or test deployments.
|
||||
See the :doc:`overview`
|
||||
for details on certificate provisioning.
|
||||
|
||||
4. Verify that SSH root login to the deployment server server works:
|
||||
|
||||
::
|
||||
@@ -80,13 +98,6 @@ steps. Please substitute it with your own domain.
|
||||
configure at your DNS provider (it can take some time until they are
|
||||
public).
|
||||
|
||||
Docker installation
|
||||
-------------------
|
||||
|
||||
We have experimental support for `docker compose <https://github.com/chatmail/relay/blob/docker-rebase/docs/DOCKER_INSTALLATION_EN.md>`_,
|
||||
but it is not covered by automated tests yet,
|
||||
so don't expect everything to work.
|
||||
|
||||
Other helpful commands
|
||||
----------------------
|
||||
|
||||
@@ -176,6 +187,55 @@ creating addresses, login with ssh to the deployment machine and run:
|
||||
Chatmail address creation will be denied while this file is present.
|
||||
|
||||
|
||||
Running a relay with self-signed certificates
|
||||
----------------------------------------------
|
||||
|
||||
Use a domain name starting with ``_`` (e.g. ``_chat.example.org``)
|
||||
to run a relay with self-signed certificates.
|
||||
Domains starting with ``_`` cannot obtain WebPKI certificates
|
||||
so the relay automatically uses self-signed certificates
|
||||
and all other relays will accept connections from it
|
||||
without requiring certificate verification.
|
||||
This is useful for experimental setups and testing.
|
||||
|
||||
.. _external-tls:
|
||||
|
||||
Running a relay with externally managed certificates
|
||||
-----------------------------------------------------
|
||||
|
||||
If you already have a TLS certificate manager
|
||||
(e.g. Traefik, certbot, or another ACME client)
|
||||
running on the deployment server,
|
||||
you can configure the relay to use those certificates
|
||||
instead of the built-in ``acmetool``.
|
||||
|
||||
Set the following in ``chatmail.ini``::
|
||||
|
||||
tls_external_cert_and_key = /path/to/fullchain.pem /path/to/privkey.pem
|
||||
|
||||
The paths must point to certificate and key files
|
||||
on the deployment server.
|
||||
During ``cmdeploy run``, these paths are written into
|
||||
the Postfix, Dovecot, and Nginx configurations.
|
||||
No certificate files are transferred from the build machine —
|
||||
they must already exist on the server,
|
||||
managed by your external certificate tool.
|
||||
|
||||
The deploy will verify that both files exist on the server.
|
||||
``acmetool`` is **not** installed or run in this mode.
|
||||
|
||||
.. note::
|
||||
|
||||
You are responsible for certificate renewal.
|
||||
When the certificate file changes on disk,
|
||||
all relay services pick up the new certificate automatically
|
||||
via a systemd path watcher installed during deploy.
|
||||
The watcher uses inotify, which does not cross bind-mount boundaries.
|
||||
If you use such a setup, you must trigger the reload explicitly after renewal::
|
||||
|
||||
systemctl start tls-cert-reload.service
|
||||
|
||||
|
||||
Migrating to a new build machine
|
||||
----------------------------------
|
||||
|
||||
|
||||
@@ -16,5 +16,6 @@ Contributions and feedback welcome through the https://github.com/chatmail/relay
|
||||
proxy
|
||||
migrate
|
||||
overview
|
||||
reverse_dns
|
||||
related
|
||||
faq
|
||||
|
||||
@@ -102,17 +102,17 @@ short overview of ``chatmaild`` services:
|
||||
Apple/Google/Huawei.
|
||||
|
||||
- `chatmail-expire <https://github.com/chatmail/relay/blob/main/chatmaild/src/chatmaild/expire.py>`_
|
||||
deletes users if they have not logged in for a longer while.
|
||||
The timeframe can be configured in ``chatmail.ini``.
|
||||
deletes old messages, large messages, and entire mailboxes
|
||||
of users who have not logged in for longer than
|
||||
``delete_inactive_users_after`` days.
|
||||
|
||||
- ``chatmail-quota-expire`` is called by Dovecot's ``quota_warning`` mechanism
|
||||
and will automatically remove oldest messages to keep mailboxes well under ``max_mailbox_size``.
|
||||
|
||||
- `lastlogin <https://github.com/chatmail/relay/blob/main/chatmaild/src/chatmaild/lastlogin.py>`_
|
||||
is contacted by Dovecot when a user logs in and stores the date of
|
||||
the login.
|
||||
|
||||
- `metrics <https://github.com/chatmail/relay/blob/main/chatmaild/src/chatmaild/metrics.py>`_
|
||||
collects some metrics and displays them at
|
||||
``https://example.org/metrics``.
|
||||
|
||||
``www/``
|
||||
~~~~~~~~~
|
||||
|
||||
@@ -142,11 +142,9 @@ Chatmail relay dependency diagram
|
||||
nginx-internal --- autoconfig.xml;
|
||||
certs-nginx[("`TLS certs
|
||||
/var/lib/acme`")] --> nginx-internal;
|
||||
systemd-timer --- chatmail-metrics;
|
||||
systemd-timer --- acmetool;
|
||||
systemd-timer --- chatmail-expire-daily;
|
||||
systemd-timer --- chatmail-fsreport-daily;
|
||||
chatmail-metrics --- website;
|
||||
acmetool --> certs[("`TLS certs
|
||||
/var/lib/acme`")];
|
||||
nginx-external --- |993|dovecot;
|
||||
@@ -155,6 +153,7 @@ Chatmail relay dependency diagram
|
||||
autoconfig.xml --- dovecot;
|
||||
postfix --- |10080|filtermail-outgoing;
|
||||
postfix --- |10081|filtermail-incoming;
|
||||
postfix --- |10083|filtermail-transport;
|
||||
filtermail-outgoing --- |10025 reinject|postfix;
|
||||
filtermail-incoming --- |10026 reinject|postfix;
|
||||
dovecot --- |doveauth.socket|doveauth;
|
||||
@@ -162,6 +161,8 @@ Chatmail relay dependency diagram
|
||||
/home/vmail/.../user"];
|
||||
dovecot --- |lastlogin.socket|lastlogin;
|
||||
dovecot --- chatmail-metadata;
|
||||
dovecot --- |quota-warning|chatmail-quota-expire;
|
||||
chatmail-quota-expire --- maildir;
|
||||
lastlogin --- maildir;
|
||||
doveauth --- maildir;
|
||||
chatmail-expire-daily --- maildir;
|
||||
@@ -295,10 +296,7 @@ ensured by ``filtermail`` proxy.
|
||||
TLS requirements
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
Postfix is configured to require valid TLS by setting
|
||||
`smtp_tls_security_level <https://www.postfix.org/postconf.5.html#smtp_tls_security_level>`_
|
||||
to ``verify``. If emails don’t arrive at your chatmail relay server, the
|
||||
problem is likely that your relay does not have a valid TLS certificate.
|
||||
Filtermail (used for delivery) requires a valid TLS.
|
||||
|
||||
You can test it by resolving ``MX`` records of your relay domain and
|
||||
then connecting to MX relays (e.g ``mx.example.org``) with
|
||||
@@ -309,6 +307,11 @@ When providing a TLS certificate to your chatmail relay server, make
|
||||
sure to provide the full certificate chain and not just the last
|
||||
certificate.
|
||||
|
||||
If you use an external certificate manager (e.g. Traefik or certbot),
|
||||
set ``tls_external_cert_and_key`` in ``chatmail.ini``
|
||||
to provide the certificate and key paths.
|
||||
See :ref:`external-tls` for details.
|
||||
|
||||
If you are running an Exim server and don’t see incoming connections
|
||||
from a chatmail relay server in the logs, make sure ``smtp_no_mail`` log
|
||||
item is enabled in the config with ``log_selector = +smtp_no_mail``. By
|
||||
@@ -317,6 +320,14 @@ default Exim does not log sessions that are closed before sending the
|
||||
by Postfix, so you might think that connection is not established while
|
||||
actually it is a problem with your TLS certificate.
|
||||
|
||||
If emails don’t arrive at your chatmail relay server, the
|
||||
problem is likely that your relay does not have a valid TLS certificate.
|
||||
|
||||
Note that connections to relays with underscore-prefixed test domains
|
||||
(e.g. ``_chat.example.org``) use ``encrypt`` tls security level,
|
||||
because such domains cannot obtain valid Let's Encrypt certificates
|
||||
and run with self-signed certificates.
|
||||
|
||||
|
||||
.. _dovecot: https://dovecot.org
|
||||
.. _postfix: https://www.postfix.org
|
||||
|
||||
64
doc/source/reverse_dns.rst
Normal file
64
doc/source/reverse_dns.rst
Normal file
@@ -0,0 +1,64 @@
|
||||
Configuring reverse DNS
|
||||
=======================
|
||||
|
||||
Some email servers reject the emails
|
||||
if they don't pass `FCrDNS`_ check, also known as `iprev`_ check.
|
||||
|
||||
.. _FCrDNS: https://en.wikipedia.org/wiki/Forward-confirmed_reverse_DNS
|
||||
.. _iprev: https://datatracker.ietf.org/doc/html/rfc8601#section-3
|
||||
|
||||
Passing the check requires that the IP address that email is sent from
|
||||
should have a ``PTR`` record pointing to the domain name of the server,
|
||||
and domain name record should have an ``A/AAAA`` record
|
||||
pointing to the IP address.
|
||||
|
||||
Modern email relies on DKIM and SPF for authentication,
|
||||
while iprev check exists for
|
||||
`historical reasons <https://datatracker.ietf.org/doc/html/draft-ietf-dnsop-reverse-mapping-considerations-06#section-2.1>`_.
|
||||
Chatmail relays don't resolve ``PTR`` records,
|
||||
so you can ignore this section if configuring ``PTR`` records
|
||||
is difficult and federation with legacy email servers that don't accept
|
||||
valid DKIM signature for authentication is not important.
|
||||
|
||||
Multi-homed setups
|
||||
------------------
|
||||
|
||||
If you have a server with multiple IP addresses,
|
||||
also known as multi-homed setup,
|
||||
and don't publish all IP addresses in DNS,
|
||||
you need to make sure you are using
|
||||
the published address when making outgoing connections.
|
||||
|
||||
For example, your server may have a static IP
|
||||
address, and a so-called Floating IP or Virtual IP
|
||||
that can be moved between servers in case of
|
||||
migration or for failover.
|
||||
By using Floating IP you can avoid downtime
|
||||
and keep the IP address reputation
|
||||
for destinatinons that rely on IP reputation and IP blocklists.
|
||||
In this case you will only publish
|
||||
the Floating IP to DNS and only use the static IP
|
||||
to SSH into the server.
|
||||
|
||||
If you have such setup, make sure that
|
||||
you not only set ``PTR`` records for the Floating IP,
|
||||
but make outgoing connections using the Floating IP.
|
||||
Otherwise reverse DNS check succeed,
|
||||
but forward check making sure your domain name points
|
||||
to the IP address will fail.
|
||||
Such setup is indistinguishable from someone
|
||||
setting IP address ``PTR`` with the domain they don't own
|
||||
and as a result don't succeed.
|
||||
|
||||
On Linux you can configure source IP address with ``ip route`` command,
|
||||
for example:
|
||||
::
|
||||
|
||||
ip route change default via <default-gateway> dev eth0 src <source-address>
|
||||
|
||||
Make sure to persist the change after verifying it is working.
|
||||
You can check what your outgoing IP address is
|
||||
with ``curl icanhazip.com``.
|
||||
Check both the IPv4 and IPv6 addresses.
|
||||
For IPv4 address use ``curl ipv4.icanhazip.com`` or ``curl -4 icanhazip.com``
|
||||
and similarly for IPv6 if you have it.
|
||||
@@ -1,52 +0,0 @@
|
||||
services:
|
||||
chatmail:
|
||||
build:
|
||||
context: ./
|
||||
dockerfile: docker/chatmail_relay.dockerfile
|
||||
image: chatmail-relay:latest
|
||||
restart: unless-stopped
|
||||
container_name: chatmail
|
||||
# Required for systemd — use only one of the following:
|
||||
cgroup: host # compose v2 only
|
||||
# privileged: true # compose v1 (not tested)
|
||||
tty: true # required for logs
|
||||
tmpfs: # required for systemd
|
||||
- /tmp
|
||||
- /run
|
||||
- /run/lock
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
environment:
|
||||
CHANGE_KERNEL_SETTINGS: "False"
|
||||
MAIL_DOMAIN: $MAIL_DOMAIN
|
||||
ACME_EMAIL: $ACME_EMAIL
|
||||
RECREATE_VENV: $RECREATE_VENV
|
||||
MAX_MESSAGE_SIZE: $MAX_MESSAGE_SIZE
|
||||
DEBUG_COMMANDS_ENABLED: $DEBUG_COMMANDS_ENABLED
|
||||
FORCE_REINIT_INI_FILE: $FORCE_REINIT_INI_FILE
|
||||
USE_FOREIGN_CERT_MANAGER: $USE_FOREIGN_CERT_MANAGER
|
||||
ENABLE_CERTS_MONITORING: $ENABLE_CERTS_MONITORING
|
||||
CERTS_MONITORING_TIMEOUT: $CERTS_MONITORING_TIMEOUT
|
||||
IS_DEVELOPMENT_INSTANCE: $IS_DEVELOPMENT_INSTANCE
|
||||
CMDEPLOY_STAGES: ${CMDEPLOY_STAGES:-}
|
||||
network_mode: "host"
|
||||
volumes:
|
||||
## system
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:rw # required for systemd
|
||||
- ./:/opt/chatmail
|
||||
|
||||
## data
|
||||
- ./data/chatmail:/home
|
||||
- ./data/chatmail-dkimkeys:/etc/dkimkeys
|
||||
- ./data/chatmail-acme:/var/lib/acme
|
||||
|
||||
## custom resources
|
||||
# - ./custom/www/src/index.md:/opt/chatmail/www/src/index.md
|
||||
|
||||
## debug
|
||||
# - ./docker/files/setup_chatmail_docker.sh:/setup_chatmail_docker.sh
|
||||
# - ./docker/files/entrypoint.sh:/entrypoint.sh
|
||||
# - ./docker/files/update_ini.sh:/update_ini.sh
|
||||
@@ -1,100 +0,0 @@
|
||||
FROM jrei/systemd-debian:12 AS base
|
||||
|
||||
ENV LANG=en_US.UTF-8
|
||||
|
||||
RUN echo 'APT::Install-Recommends "0";' > /etc/apt/apt.conf.d/01norecommend && \
|
||||
echo 'APT::Install-Suggests "0";' >> /etc/apt/apt.conf.d/01norecommend && \
|
||||
apt-get update && \
|
||||
apt-get install -y \
|
||||
ca-certificates && \
|
||||
DEBIAN_FRONTEND=noninteractive \
|
||||
TZ=Europe/London \
|
||||
apt-get install -y tzdata && \
|
||||
apt-get install -y locales && \
|
||||
sed -i -e "s/# $LANG.*/$LANG UTF-8/" /etc/locale.gen && \
|
||||
dpkg-reconfigure --frontend=noninteractive locales && \
|
||||
update-locale LANG=$LANG \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y \
|
||||
git \
|
||||
python3 \
|
||||
python3-venv \
|
||||
python3-virtualenv \
|
||||
gcc \
|
||||
python3-dev \
|
||||
opendkim \
|
||||
opendkim-tools \
|
||||
curl \
|
||||
rsync \
|
||||
unbound \
|
||||
unbound-anchor \
|
||||
dnsutils \
|
||||
postfix \
|
||||
acl \
|
||||
nginx \
|
||||
libnginx-mod-stream \
|
||||
fcgiwrap \
|
||||
cron \
|
||||
&& for pkg in core imapd lmtpd; do \
|
||||
case "$pkg" in \
|
||||
core) sha256="43f593332e22ac7701c62d58b575d2ca409e0f64857a2803be886c22860f5587" ;; \
|
||||
imapd) sha256="8d8dc6fc00bbb6cdb25d345844f41ce2f1c53f764b79a838eb2a03103eebfa86" ;; \
|
||||
lmtpd) sha256="2f69ba5e35363de50962d42cccbfe4ed8495265044e244007d7ccddad77513ab" ;; \
|
||||
esac; \
|
||||
url="https://download.delta.chat/dovecot/dovecot-${pkg}_2.3.21%2Bdfsg1-3_amd64.deb"; \
|
||||
file="/tmp/$(basename "$url")"; \
|
||||
curl -fsSL "$url" -o "$file"; \
|
||||
echo "$sha256 $file" | sha256sum -c -; \
|
||||
apt-get install -y "$file"; \
|
||||
rm -f "$file"; \
|
||||
done \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /opt/chatmail
|
||||
|
||||
# --- Build-time install stage ---
|
||||
# Bake the "install" deployer stage into the image; we can't use
|
||||
# scripts/initenv.sh because /opt/chatmail is empty at build time as
|
||||
# source arrives at runtime via volume mount., so we use a throwaway venv.
|
||||
# On container start only "configure,activate" stages run.
|
||||
COPY . /tmp/chatmail-src/
|
||||
WORKDIR /tmp/chatmail-src
|
||||
|
||||
# Dummy config — deploy_chatmail() needs a parseable ini to instantiate deployers
|
||||
RUN printf '[params]\nmail_domain = build.local\n' > /tmp/chatmail.ini
|
||||
|
||||
# Do what initenv.sh would do without the docs
|
||||
RUN python3 -m venv /tmp/build-venv && \
|
||||
/tmp/build-venv/bin/pip install --no-cache-dir \
|
||||
-e chatmaild -e cmdeploy
|
||||
|
||||
RUN CMDEPLOY_STAGES=install \
|
||||
CHATMAIL_INI=/tmp/chatmail.ini \
|
||||
CHATMAIL_DOCKER=True \
|
||||
/tmp/build-venv/bin/pyinfra @local \
|
||||
/tmp/chatmail-src/cmdeploy/src/cmdeploy/run.py -y
|
||||
|
||||
RUN rm -rf /tmp/chatmail-src /tmp/build-venv /tmp/chatmail.ini
|
||||
|
||||
WORKDIR /opt/chatmail
|
||||
# --- End build-time install stage ---
|
||||
|
||||
ARG SETUP_CHATMAIL_SERVICE_PATH=/lib/systemd/system/setup_chatmail.service
|
||||
COPY ./docker/files/setup_chatmail.service "$SETUP_CHATMAIL_SERVICE_PATH"
|
||||
RUN ln -sf "$SETUP_CHATMAIL_SERVICE_PATH" "/etc/systemd/system/multi-user.target.wants/setup_chatmail.service"
|
||||
|
||||
COPY --chmod=555 ./docker/files/setup_chatmail_docker.sh /setup_chatmail_docker.sh
|
||||
COPY --chmod=555 ./docker/files/update_ini.sh /update_ini.sh
|
||||
COPY --chmod=555 ./docker/files/entrypoint.sh /entrypoint.sh
|
||||
|
||||
VOLUME ["/sys/fs/cgroup", "/home"]
|
||||
|
||||
STOPSIGNAL SIGRTMIN+3
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
|
||||
CMD [ "--default-standard-output=journal+console", \
|
||||
"--default-standard-error=journal+console" ]
|
||||
|
||||
@@ -1,84 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Convert a chatmail.ini to a Docker .env file.
|
||||
|
||||
Usage: python docker/cm_ini_to_env.py [chatmail.ini] [.env]
|
||||
|
||||
Reads the ini file, extracts all non-default key=value pairs,
|
||||
and writes them as UPPER_CASE env vars suitable for docker-compose.
|
||||
"""
|
||||
|
||||
import configparser
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Keys that only make sense for bare-metal deploys or are handled
|
||||
# separately by the Docker setup and should not appear in .env.
|
||||
SKIP_KEYS = set()
|
||||
|
||||
# Keys that exist in .env but have a different name than the ini key.
|
||||
# ini_key -> env_key
|
||||
RENAMES = {}
|
||||
|
||||
|
||||
def read_ini(path):
|
||||
"""Return dict of key=value from [params] section."""
|
||||
cp = configparser.ConfigParser()
|
||||
cp.read(path)
|
||||
if not cp.has_section("params"):
|
||||
sys.exit(f"Error: {path} has no [params] section")
|
||||
return dict(cp.items("params"))
|
||||
|
||||
|
||||
def read_defaults():
|
||||
"""Return dict of default values from the ini template."""
|
||||
template = Path(__file__).resolve().parent.parent / "chatmaild/src/chatmaild/ini/chatmail.ini.f"
|
||||
if not template.exists():
|
||||
return {}
|
||||
cp = configparser.ConfigParser()
|
||||
cp.read(template)
|
||||
if not cp.has_section("params"):
|
||||
return {}
|
||||
defaults = {}
|
||||
for key, value in cp.items("params"):
|
||||
# Template placeholders like {mail_domain} aren't real defaults.
|
||||
if "{" not in value:
|
||||
defaults[key] = value
|
||||
return defaults
|
||||
|
||||
|
||||
def ini_to_env(ini_path, only_non_default=True):
|
||||
"""Yield (ENV_KEY, value) pairs from an ini file."""
|
||||
params = read_ini(ini_path)
|
||||
defaults = read_defaults() if only_non_default else {}
|
||||
|
||||
for key, value in sorted(params.items()):
|
||||
if key in SKIP_KEYS:
|
||||
continue
|
||||
if only_non_default and key in defaults and value.strip() == defaults[key].strip():
|
||||
continue
|
||||
env_key = RENAMES.get(key, key.upper())
|
||||
yield env_key, value.strip()
|
||||
|
||||
|
||||
def main():
|
||||
ini_path = sys.argv[1] if len(sys.argv) > 1 else "chatmail.ini"
|
||||
env_path = sys.argv[2] if len(sys.argv) > 2 else None
|
||||
|
||||
if not Path(ini_path).exists():
|
||||
sys.exit(f"Error: {ini_path} not found")
|
||||
|
||||
lines = []
|
||||
for env_key, value in ini_to_env(ini_path):
|
||||
lines.append(f'{env_key}="{value}"')
|
||||
|
||||
output = "\n".join(lines) + "\n"
|
||||
|
||||
if env_path:
|
||||
Path(env_path).write_text(output)
|
||||
print(f"Wrote {len(lines)} variables to {env_path}")
|
||||
else:
|
||||
print(output, end="")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,11 +0,0 @@
|
||||
MAIL_DOMAIN="chat.example.com"
|
||||
# ACME_EMAIL=""
|
||||
# RECREATE_VENV="false"
|
||||
# MAX_MESSAGE_SIZE="50M"
|
||||
# DEBUG_COMMANDS_ENABLED="true"
|
||||
# FORCE_REINIT_INI_FILE="true"
|
||||
# USE_FOREIGN_CERT_MANAGER="True"
|
||||
# ENABLE_CERTS_MONITORING="true"
|
||||
# CERTS_MONITORING_TIMEOUT=10
|
||||
# IS_DEVELOPMENT_INSTANCE="True"
|
||||
# CMDEPLOY_STAGES - default: "configure,activate". Set to "install,configure,activate" to force full reinstall.
|
||||
@@ -1,11 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -eo pipefail
|
||||
|
||||
unlink /etc/nginx/sites-enabled/default || true
|
||||
|
||||
SETUP_CHATMAIL_SERVICE_PATH="${SETUP_CHATMAIL_SERVICE_PATH:-/lib/systemd/system/setup_chatmail.service}"
|
||||
|
||||
env_vars=$(printenv | cut -d= -f1 | xargs)
|
||||
sed -i "s|<envs_list>|$env_vars|g" $SETUP_CHATMAIL_SERVICE_PATH
|
||||
|
||||
exec /lib/systemd/systemd $@
|
||||
@@ -1,14 +0,0 @@
|
||||
[Unit]
|
||||
Description=Run container setup commands
|
||||
After=multi-user.target
|
||||
ConditionPathExists=/setup_chatmail_docker.sh
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/bin/bash /setup_chatmail_docker.sh
|
||||
RemainAfterExit=true
|
||||
WorkingDirectory=/opt/chatmail
|
||||
PassEnvironment=<envs_list>
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,84 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
export INI_FILE="${INI_FILE:-chatmail.ini}"
|
||||
export ENABLE_CERTS_MONITORING="${ENABLE_CERTS_MONITORING:-true}"
|
||||
export CERTS_MONITORING_TIMEOUT="${CERTS_MONITORING_TIMEOUT:-60}"
|
||||
export PATH_TO_SSL="${PATH_TO_SSL:-/var/lib/acme/live/${MAIL_DOMAIN}}"
|
||||
export CHANGE_KERNEL_SETTINGS=${CHANGE_KERNEL_SETTINGS:-"False"}
|
||||
export RECREATE_VENV=${RECREATE_VENV:-"false"}
|
||||
|
||||
if [ -z "$MAIL_DOMAIN" ]; then
|
||||
echo "ERROR: Environment variable 'MAIL_DOMAIN' must be set!" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
debug_commands() {
|
||||
echo "Executing debug commands"
|
||||
# git config --global --add safe.directory /opt/chatmail
|
||||
# ./scripts/initenv.sh
|
||||
}
|
||||
|
||||
calculate_hash() {
|
||||
find "$PATH_TO_SSL" -type f -exec sha1sum {} \; | sort | sha1sum | awk '{print $1}'
|
||||
}
|
||||
|
||||
monitor_certificates() {
|
||||
if [ "$ENABLE_CERTS_MONITORING" != "true" ]; then
|
||||
echo "Certs monitoring disabled."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
current_hash=$(calculate_hash)
|
||||
previous_hash=$current_hash
|
||||
|
||||
while true; do
|
||||
current_hash=$(calculate_hash)
|
||||
if [[ "$current_hash" != "$previous_hash" ]]; then
|
||||
# TODO: add an option to restart at a specific time interval
|
||||
echo "[INFO] Certificate's folder hash was changed, reloading nginx, dovecot and postfix services."
|
||||
systemctl reload nginx.service
|
||||
systemctl reload dovecot.service
|
||||
systemctl reload postfix.service
|
||||
previous_hash=$current_hash
|
||||
fi
|
||||
sleep $CERTS_MONITORING_TIMEOUT
|
||||
done
|
||||
}
|
||||
|
||||
### MAIN
|
||||
|
||||
if [ "$DEBUG_COMMANDS_ENABLED" = true ]; then
|
||||
debug_commands
|
||||
fi
|
||||
|
||||
if [ "$FORCE_REINIT_INI_FILE" = true ]; then
|
||||
INI_CMD_ARGS=--force
|
||||
fi
|
||||
|
||||
if [ ! -f /etc/dkimkeys/opendkim.private ]; then
|
||||
/usr/sbin/opendkim-genkey -D /etc/dkimkeys -d $MAIL_DOMAIN -s opendkim
|
||||
fi
|
||||
chown opendkim:opendkim /etc/dkimkeys/opendkim.private
|
||||
chown opendkim:opendkim /etc/dkimkeys/opendkim.txt
|
||||
|
||||
# TODO: Move to debug_commands after git clone is moved to dockerfile.
|
||||
git config --global --add safe.directory /opt/chatmail
|
||||
if [ "$RECREATE_VENV" = true ]; then
|
||||
rm -rf venv
|
||||
fi
|
||||
# Skip venv creation if it already exists
|
||||
if [ ! -x venv/bin/python ] || [ ! -x venv/bin/cmdeploy ]; then
|
||||
./scripts/initenv.sh
|
||||
fi
|
||||
|
||||
./scripts/cmdeploy init --config "${INI_FILE}" $INI_CMD_ARGS $MAIL_DOMAIN || true
|
||||
bash /update_ini.sh
|
||||
|
||||
export CMDEPLOY_STAGES="${CMDEPLOY_STAGES:-configure,activate}"
|
||||
./scripts/cmdeploy run --ssh-host @docker
|
||||
|
||||
echo "ForwardToConsole=yes" >> /etc/systemd/journald.conf
|
||||
systemctl restart systemd-journald
|
||||
|
||||
monitor_certificates &
|
||||
@@ -1,79 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -eo pipefail
|
||||
|
||||
INI_FILE="${INI_FILE:-chatmail.ini}"
|
||||
|
||||
if [ ! -f "$INI_FILE" ]; then
|
||||
echo "Error: file $INI_FILE not found." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TMP_FILE="$(mktemp)"
|
||||
|
||||
convert_to_bytes() {
|
||||
local value="$1"
|
||||
if [[ "$value" =~ ^([0-9]+)([KkMmGgTt])$ ]]; then
|
||||
local num="${BASH_REMATCH[1]}"
|
||||
local unit="${BASH_REMATCH[2]}"
|
||||
case "$unit" in
|
||||
[Kk]) echo $((num * 1024)) ;;
|
||||
[Mm]) echo $((num * 1024 * 1024)) ;;
|
||||
[Gg]) echo $((num * 1024 * 1024 * 1024)) ;;
|
||||
[Tt]) echo $((num * 1024 * 1024 * 1024 * 1024)) ;;
|
||||
esac
|
||||
elif [[ "$value" =~ ^[0-9]+$ ]]; then
|
||||
echo "$value"
|
||||
else
|
||||
echo "Error: incorrect size format: $value." >&2
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
process_specific_params() {
|
||||
local key=$1
|
||||
local value=$2
|
||||
local destination_file=$3
|
||||
|
||||
if [[ "$key" == "max_message_size" ]]; then
|
||||
converted=$(convert_to_bytes "$value") || exit 1
|
||||
if grep -q -e "## .* = .* bytes" "$destination_file"; then
|
||||
sed "s|## .* = .* bytes|## $value = $converted bytes|g" "$destination_file";
|
||||
else
|
||||
echo "## $value = $converted bytes" >> "$destination_file"
|
||||
fi
|
||||
echo "$key = $converted" >> "$destination_file"
|
||||
else
|
||||
echo "$key = $value" >> "$destination_file"
|
||||
fi
|
||||
}
|
||||
|
||||
while IFS= read -r line; do
|
||||
if [[ "$line" =~ ^[[:space:]]*#.* || "$line" =~ ^[[:space:]]*$ ]]; then
|
||||
echo "$line" >> "$TMP_FILE"
|
||||
continue
|
||||
fi
|
||||
|
||||
if [[ "$line" =~ ^([a-z0-9_]+)[[:space:]]*=[[:space:]]*(.*)$ ]]; then
|
||||
key="${BASH_REMATCH[1]}"
|
||||
current_value="${BASH_REMATCH[2]}"
|
||||
env_var_name=$(echo "$key" | tr 'a-z' 'A-Z')
|
||||
env_value="${!env_var_name}"
|
||||
|
||||
if [[ -n "$env_value" ]]; then
|
||||
process_specific_params "$key" "$env_value" "$TMP_FILE"
|
||||
else
|
||||
echo "$line" >> "$TMP_FILE"
|
||||
fi
|
||||
else
|
||||
echo "$line" >> "$TMP_FILE"
|
||||
fi
|
||||
done < "$INI_FILE"
|
||||
|
||||
PERMS=$(stat -c %a "$INI_FILE")
|
||||
OWNER=$(stat -c %u "$INI_FILE")
|
||||
GROUP=$(stat -c %g "$INI_FILE")
|
||||
|
||||
chmod "$PERMS" "$TMP_FILE"
|
||||
chown "$OWNER":"$GROUP" "$TMP_FILE"
|
||||
|
||||
mv "$TMP_FILE" "$INI_FILE"
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user