mirror of
https://github.com/chatmail/relay.git
synced 2026-05-10 16:04:37 +00:00
Compare commits
403 Commits
missytake/
...
hpk/fixver
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2a676c7e35 | ||
|
|
ba3d86c9c7 | ||
|
|
b53fd912d6 | ||
|
|
c819ee20ad | ||
|
|
7138fc7f55 | ||
|
|
d14f384de3 | ||
|
|
155c1221b8 | ||
|
|
3393d071e5 | ||
|
|
52c73658f9 | ||
|
|
b022a61955 | ||
|
|
2e383a8e94 | ||
|
|
9908a4c88c | ||
|
|
c04f4a4b44 | ||
|
|
6d0ce061bc | ||
|
|
a48c525455 | ||
|
|
2786b60658 | ||
|
|
fe46b573a6 | ||
|
|
674e496a53 | ||
|
|
86e5708709 | ||
|
|
04ac2cf700 | ||
|
|
e2ec0cf2c5 | ||
|
|
ee9d54f7d6 | ||
|
|
1604321d5b | ||
|
|
4ab04fa6c4 | ||
|
|
8b6829b906 | ||
|
|
cf2cb57cca | ||
|
|
693c3f8555 | ||
|
|
371efdfafb | ||
|
|
23765a5ed8 | ||
|
|
0adeefbdd7 | ||
|
|
624838eedd | ||
|
|
1abdc407af | ||
|
|
ff541b81ea | ||
|
|
ed9b4092a8 | ||
|
|
1b8ad3ca12 | ||
|
|
f85d304e65 | ||
|
|
4d1856d8f1 | ||
|
|
ae2ab52aa9 | ||
|
|
d0c396538b | ||
|
|
78a4e28408 | ||
|
|
2432d4f498 | ||
|
|
31301abb42 | ||
|
|
6b4edd8502 | ||
|
|
9c467ab3e8 | ||
|
|
774350778b | ||
|
|
06d53503e5 | ||
|
|
b128935940 | ||
|
|
2e38c61ca2 | ||
|
|
9dd8ce8ce1 | ||
|
|
0ae3f94ecc | ||
|
|
4481a12369 | ||
|
|
a47016e9f2 | ||
|
|
4e6ba7378d | ||
|
|
e428c646d1 | ||
|
|
dbd5cd16f5 | ||
|
|
e21f2a0fa2 | ||
|
|
8ca0909fa5 | ||
|
|
2c99cc84aa | ||
|
|
73309778c2 | ||
|
|
50ecc2b315 | ||
|
|
7b5b180b4b | ||
|
|
193624e522 | ||
|
|
437287fadc | ||
|
|
0ad679997a | ||
|
|
38cc1c7cd6 | ||
|
|
7a6ed8340e | ||
|
|
2ce9e5fe78 | ||
|
|
cf96be2cbb | ||
|
|
36eb63faa1 | ||
|
|
91df11015e | ||
|
|
d4f8a29243 | ||
|
|
0144fc3ea8 | ||
|
|
e7ce6679b9 | ||
|
|
d1adf52f89 | ||
|
|
56d0e2ca27 | ||
|
|
2613558db6 | ||
|
|
6843fcb1a0 | ||
|
|
ff54ad88d8 | ||
|
|
cce2b27ae7 | ||
|
|
87022e3681 | ||
|
|
06560dd071 | ||
|
|
1b0337a5f7 | ||
|
|
dfcaf415b1 | ||
|
|
c0718325ef | ||
|
|
7d72b0e592 | ||
|
|
8f1e23d98e | ||
|
|
56aaf2649b | ||
|
|
2660b4d24c | ||
|
|
ea60ecfb57 | ||
|
|
2a3a224cc2 | ||
|
|
e42139e97b | ||
|
|
65b660c413 | ||
|
|
dd2beb226a | ||
|
|
9c7508cc33 | ||
|
|
ab3492d9a1 | ||
|
|
032faf0a94 | ||
|
|
c45fe03652 | ||
|
|
08bf4c234b | ||
|
|
2d0ccdb4a3 | ||
|
|
3abba6f2fa | ||
|
|
f9aaeb0f42 | ||
|
|
e0c44bf04f | ||
|
|
8ff53d12cb | ||
|
|
0aa0324c81 | ||
|
|
bfcfc9b090 | ||
|
|
e101c36ab4 | ||
|
|
be7aa21039 | ||
|
|
4906b82e44 | ||
|
|
5d49b4c0fd | ||
|
|
56c8f9faae | ||
|
|
203a7da3f4 | ||
|
|
a1667ca54d | ||
|
|
6401bbb32c | ||
|
|
325cc7a7b4 | ||
|
|
c2acbad802 | ||
|
|
0e7ab96dc8 | ||
|
|
d1f9523836 | ||
|
|
bcf2fdb5d0 | ||
|
|
77a6f49c9b | ||
|
|
99630e4d1b | ||
|
|
2f8199a7c6 | ||
|
|
4eeead2826 | ||
|
|
0d890274fd | ||
|
|
7191329a9f | ||
|
|
1ae4c8451a | ||
|
|
f04a624e19 | ||
|
|
24e3f33acd | ||
|
|
610843a44a | ||
|
|
966754a346 | ||
|
|
87153667ed | ||
|
|
abe0cb5d08 | ||
|
|
8c8c37c822 | ||
|
|
e7bed4d2a1 | ||
|
|
df21076e9b | ||
|
|
70da217442 | ||
|
|
40fd62c562 | ||
|
|
d76b33def1 | ||
|
|
bab3de9768 | ||
|
|
49c66116bf | ||
|
|
9bf99cc8a9 | ||
|
|
1188aed061 | ||
|
|
e15b8ebf11 | ||
|
|
c84ddf69e8 | ||
|
|
96fc3d9ff6 | ||
|
|
4b5e8feb96 | ||
|
|
c98853570b | ||
|
|
bad356503e | ||
|
|
dba48e88d1 | ||
|
|
3ae8834cbe | ||
|
|
81391f4066 | ||
|
|
55cfd00505 | ||
|
|
b000213c68 | ||
|
|
51d16b6bb8 | ||
|
|
2beba8c455 | ||
|
|
33c67d22fa | ||
|
|
166bf68915 | ||
|
|
abb70a6b14 | ||
|
|
96108bbaba | ||
|
|
8f68672e31 | ||
|
|
9e6e3af534 | ||
|
|
fa5a6a64b3 | ||
|
|
6b7c002e24 | ||
|
|
4b2f98788d | ||
|
|
13faa42abd | ||
|
|
7c12136991 | ||
|
|
3637bba5dc | ||
|
|
e2b157bd96 | ||
|
|
83abb3a3e1 | ||
|
|
2e3e3101b6 | ||
|
|
213d68ed02 | ||
|
|
68cc6676ef | ||
|
|
14ca95d25a | ||
|
|
3524b055db | ||
|
|
7b16f1330d | ||
|
|
7a907b138c | ||
|
|
0ff0159a89 | ||
|
|
81d2bf89c7 | ||
|
|
514a911529 | ||
|
|
fc7240a1ad | ||
|
|
bdcccd858c | ||
|
|
af30d2b55d | ||
|
|
5664b97db4 | ||
|
|
81364bd523 | ||
|
|
3c3e54fceb | ||
|
|
ae96b752a3 | ||
|
|
33b69fac95 | ||
|
|
165dc10f59 | ||
|
|
3df3c031d4 | ||
|
|
5515dc4c4b | ||
|
|
50b986a265 | ||
|
|
f24bc99c6f | ||
|
|
a0ebb2bdbc | ||
|
|
132bdcb5e5 | ||
|
|
7d593841bb | ||
|
|
83e7caeaf8 | ||
|
|
1cff4a94f1 | ||
|
|
ded9dd470d | ||
|
|
b94ad729fd | ||
|
|
b60267f37f | ||
|
|
a0aa2912dd | ||
|
|
76108c1c03 | ||
|
|
61b8dc4637 | ||
|
|
d42f579291 | ||
|
|
dd3cf4d449 | ||
|
|
7361cc9350 | ||
|
|
00f199816d | ||
|
|
8d7e1dad0e | ||
|
|
c0da7bb3bf | ||
|
|
863ded6480 | ||
|
|
d75321b355 | ||
|
|
9148b16d81 | ||
|
|
fa9aa5b015 | ||
|
|
0155f32df6 | ||
|
|
9ddd5d8b2b | ||
|
|
4cfe228a1f | ||
|
|
741a20450c | ||
|
|
b7fadcd4be | ||
|
|
7db26f33d9 | ||
|
|
2b90f7db37 | ||
|
|
e37dd5153a | ||
|
|
f21e4ff55b | ||
|
|
21258a267a | ||
|
|
e7ddf6dc32 | ||
|
|
e3c77a5b37 | ||
|
|
8256080ad1 | ||
|
|
248b225665 | ||
|
|
79591adca4 | ||
|
|
185757cf40 | ||
|
|
87a3adec03 | ||
|
|
4f5719f590 | ||
|
|
9787b63cbb | ||
|
|
6f600fa329 | ||
|
|
20b6e0c528 | ||
|
|
262e98f0ba | ||
|
|
d720b8107d | ||
|
|
d7f50183ea | ||
|
|
248603ab0a | ||
|
|
123531f1eb | ||
|
|
1170adc1d4 | ||
|
|
a6f7ff3652 | ||
|
|
d39076f0d6 | ||
|
|
65c0bf13f2 | ||
|
|
0ed7c360a9 | ||
|
|
af272545dd | ||
|
|
7725a73cf5 | ||
|
|
e65311c0df | ||
|
|
d091b865c7 | ||
|
|
6e28cf9ca1 | ||
|
|
9b6dfa9cdc | ||
|
|
44ab006dca | ||
|
|
c56805211f | ||
|
|
05ec64bf4a | ||
|
|
290e80e795 | ||
|
|
56fab1b071 | ||
|
|
00ab53800e | ||
|
|
fc65072edb | ||
|
|
7bf2dfd62e | ||
|
|
b801838b69 | ||
|
|
abd50e20ed | ||
|
|
d6fb38750a | ||
|
|
3b73457de3 | ||
|
|
ba06a4ff70 | ||
|
|
7fdaffe829 | ||
|
|
73831c74d9 | ||
|
|
d8cbe9d6af | ||
|
|
180ddb8168 | ||
|
|
a1eeea4632 | ||
|
|
a49aa0e655 | ||
|
|
7e81495b51 | ||
|
|
6fde062613 | ||
|
|
84e0376762 | ||
|
|
d690c22c06 | ||
|
|
5410c1bebc | ||
|
|
915bd39dd5 | ||
|
|
2de8b155c2 | ||
|
|
c975aa3bd1 | ||
|
|
6b73f6933a | ||
|
|
3ce350de9e | ||
|
|
1e05974970 | ||
|
|
577c04d537 | ||
|
|
d880937d44 | ||
|
|
46d2334e9c | ||
|
|
0ba94dc613 | ||
|
|
d379feea4f | ||
|
|
e82abee1b9 | ||
|
|
94060ff254 | ||
|
|
1b5cbfbc3d | ||
|
|
f1dcecaa8f | ||
|
|
650338925a | ||
|
|
44f653ccca | ||
|
|
6c686da937 | ||
|
|
387532cfca | ||
|
|
68904f8f61 | ||
|
|
740fe8b146 | ||
|
|
162dc85635 | ||
|
|
b699be3ac8 | ||
|
|
b4122beec4 | ||
|
|
1596b2517c | ||
|
|
1f5b2e947c | ||
|
|
8a59d94105 | ||
|
|
96a1dbac08 | ||
|
|
5215e1dc2b | ||
|
|
624a33a61e | ||
|
|
6bc751213f | ||
|
|
4b721bfcd4 | ||
|
|
4a6aa446cd | ||
|
|
e0140bbad5 | ||
|
|
6cede707ac | ||
|
|
b27937a16d | ||
|
|
30b6df20a9 | ||
|
|
6c27eaa506 | ||
|
|
0c28310861 | ||
|
|
0125dda6d7 | ||
|
|
fe38fcbeba | ||
|
|
b4af6df55c | ||
|
|
15244f6462 | ||
|
|
23655df08a | ||
|
|
b925f3b5ab | ||
|
|
823bc90eb1 | ||
|
|
ed93678c9d | ||
|
|
2b4e18d16f | ||
|
|
09ff56e5b9 | ||
|
|
b35e84e479 | ||
|
|
0638bea363 | ||
|
|
ab9ec98bcc | ||
|
|
b9a4471ee4 | ||
|
|
5f29c53232 | ||
|
|
1d4aa3d205 | ||
|
|
a78c903521 | ||
|
|
a0a1dd65a6 | ||
|
|
046552061e | ||
|
|
1fba4a3cdf | ||
|
|
44ff6da5d2 | ||
|
|
71160b8f65 | ||
|
|
9f74d0a608 | ||
|
|
c9078d7c92 | ||
|
|
aa4259477f | ||
|
|
21f9885ffe | ||
|
|
f9e885c442 | ||
|
|
b45be700a8 | ||
|
|
9c381e1fbf | ||
|
|
3cc9bc3ceb | ||
|
|
2a89be8209 | ||
|
|
c848b61346 | ||
|
|
49787044ff | ||
|
|
04ae0b86fb | ||
|
|
b0434dc927 | ||
|
|
7578c5f1d3 | ||
|
|
5ba99dc782 | ||
|
|
6d898d5431 | ||
|
|
fc3fb93432 | ||
|
|
c4f0146e16 | ||
|
|
194030a456 | ||
|
|
ce240083c4 | ||
|
|
0722876603 | ||
|
|
724020ec2a | ||
|
|
b01348d313 | ||
|
|
46e31bbce3 | ||
|
|
a4f4627a75 | ||
|
|
8d34e036ec | ||
|
|
e004a5e2f6 | ||
|
|
acf6e862d0 | ||
|
|
31faf2c78e | ||
|
|
f8c28d8b9f | ||
|
|
f69a2355f6 | ||
|
|
388c01105c | ||
|
|
f8996e1d7d | ||
|
|
6b3d5025d9 | ||
|
|
ed271189d2 | ||
|
|
65f8a9a652 | ||
|
|
6c5b9fde1f | ||
|
|
258436442f | ||
|
|
05a32efa50 | ||
|
|
1142d06fdb | ||
|
|
35fe189be7 | ||
|
|
a78e8e10d2 | ||
|
|
9af37ccfbf | ||
|
|
803f3e6181 | ||
|
|
f188aef11e | ||
|
|
76d7e60018 | ||
|
|
fe749159e4 | ||
|
|
3c3532a292 | ||
|
|
710ca0070f | ||
|
|
4038fefefd | ||
|
|
cdcdc0b724 | ||
|
|
2313093b55 | ||
|
|
3f2ec54725 | ||
|
|
e928a33f95 | ||
|
|
2780f53d3b | ||
|
|
c3f1bdca52 | ||
|
|
f4e371676b | ||
|
|
8ec6e6e985 | ||
|
|
f4fc1a3f93 | ||
|
|
42bfb9f22f | ||
|
|
1a35cdc7a9 | ||
|
|
2daac76574 | ||
|
|
5633582d31 | ||
|
|
667a987dfc | ||
|
|
49907c78a3 | ||
|
|
5cfdb0698f | ||
|
|
7e6f8ddfba | ||
|
|
4d915f9800 | ||
|
|
9e6ba1a164 |
33
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
33
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Report something that isn't working.
|
||||
title: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
Please fill out as much of this form as you can (leaving out stuff that is not applicable is ok).
|
||||
-->
|
||||
|
||||
- Server OS (Operating System) - preferably Debian 12:
|
||||
- On which OS you run cmdeploy:
|
||||
- chatmail/relay version: `git rev-parse HEAD`
|
||||
|
||||
## Expected behavior
|
||||
|
||||
*What did you try to achieve?*
|
||||
|
||||
## Actual behavior
|
||||
|
||||
*What happened instead?*
|
||||
|
||||
### Steps to reproduce the problem:
|
||||
|
||||
1.
|
||||
2.
|
||||
|
||||
### Screenshots
|
||||
|
||||
### Logs
|
||||
|
||||
1
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
1
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1 @@
|
||||
blank_issues_enabled: true
|
||||
7
.github/workflows/ci.yaml
vendored
7
.github/workflows/ci.yaml
vendored
@@ -10,7 +10,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
# Checkout pull request HEAD commit instead of merge commit
|
||||
# Otherwise `test_deployed_state` will be unhappy.
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: download filtermail
|
||||
run: curl -L https://github.com/chatmail/filtermail/releases/download/v0.5.2/filtermail-x86_64 -o /usr/local/bin/filtermail && chmod +x /usr/local/bin/filtermail
|
||||
- name: run chatmaild tests
|
||||
working-directory: chatmaild
|
||||
run: pipx run tox
|
||||
|
||||
53
.github/workflows/docs-preview.yaml
vendored
Normal file
53
.github/workflows/docs-preview.yaml
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
name: documentation preview
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- 'doc/**'
|
||||
- 'scripts/build-docs.sh'
|
||||
- '.github/workflows/docs-preview.yaml'
|
||||
|
||||
jobs:
|
||||
scripts:
|
||||
name: build
|
||||
runs-on: ubuntu-latest
|
||||
environment:
|
||||
name: 'staging.chatmail.at/doc/relay/'
|
||||
url: https://staging.chatmail.at/doc/relay/${{ steps.prepare.outputs.prid }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: initenv
|
||||
run: scripts/initenv.sh
|
||||
|
||||
- name: append venv/bin to PATH
|
||||
run: echo `pwd`/venv/bin >>$GITHUB_PATH
|
||||
|
||||
- name: build documentation
|
||||
working-directory: doc
|
||||
run: sphinx-build source build
|
||||
|
||||
- name: build documentation second time (for TOC)
|
||||
working-directory: doc
|
||||
run: sphinx-build source build
|
||||
|
||||
- name: Get Pullrequest ID
|
||||
id: prepare
|
||||
run: |
|
||||
export PULLREQUEST_ID=$(echo "${{ github.ref }}" | cut -d "/" -f3)
|
||||
echo "prid=$PULLREQUEST_ID" >> $GITHUB_OUTPUT
|
||||
if [ $(expr length "${{ secrets.USERNAME }}") -gt "1" ]; then echo "uploadtoserver=true" >> $GITHUB_OUTPUT; fi
|
||||
- run: |
|
||||
echo "baseurl: /${{ steps.prepare.outputs.prid }}" >> _config.yml
|
||||
|
||||
- name: Upload preview
|
||||
run: |
|
||||
mkdir -p "$HOME/.ssh"
|
||||
echo "${{ secrets.CHATMAIL_STAGING_SSHKEY }}" > "$HOME/.ssh/key"
|
||||
chmod 600 "$HOME/.ssh/key"
|
||||
rsync -rILvh -e "ssh -i $HOME/.ssh/key -o StrictHostKeyChecking=no" $GITHUB_WORKSPACE/doc/build/ "${{ secrets.USERNAME }}@chatmail.at:/var/www/html/staging.chatmail.at/doc/relay/${{ steps.prepare.outputs.prid }}/"
|
||||
|
||||
- name: check links
|
||||
working-directory: doc
|
||||
run: sphinx-build --builder linkcheck source build
|
||||
|
||||
47
.github/workflows/docs.yaml
vendored
Normal file
47
.github/workflows/docs.yaml
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
name: build and upload documentation
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- 'missytake/docs-ci'
|
||||
paths:
|
||||
- 'doc/**'
|
||||
- 'scripts/build-docs.sh'
|
||||
- '.github/workflows/docs.yaml'
|
||||
|
||||
jobs:
|
||||
scripts:
|
||||
name: build
|
||||
runs-on: ubuntu-latest
|
||||
environment:
|
||||
name: 'chatmail.at/doc/relay/'
|
||||
url: https://chatmail.at/doc/relay/
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: initenv
|
||||
run: scripts/initenv.sh
|
||||
|
||||
- name: append venv/bin to PATH
|
||||
run: echo `pwd`/venv/bin >>$GITHUB_PATH
|
||||
|
||||
- name: build documentation
|
||||
working-directory: doc
|
||||
run: sphinx-build source build
|
||||
|
||||
- name: build documentation second time (for TOC)
|
||||
working-directory: doc
|
||||
run: sphinx-build source build
|
||||
|
||||
- name: check links
|
||||
working-directory: doc
|
||||
run: sphinx-build --builder linkcheck source build
|
||||
|
||||
- name: upload documentation
|
||||
run: |
|
||||
mkdir -p "$HOME/.ssh"
|
||||
echo "${{ secrets.CHATMAIL_STAGING_SSHKEY }}" > "$HOME/.ssh/key"
|
||||
chmod 600 "$HOME/.ssh/key"
|
||||
rsync -rILvh -e "ssh -i $HOME/.ssh/key -o StrictHostKeyChecking=no" $GITHUB_WORKSPACE/doc/build/ "${{ secrets.USERNAME }}@chatmail.at:/var/www/html/chatmail.at/doc/relay/"
|
||||
|
||||
54
.github/workflows/test-and-deploy-ipv4only.yaml
vendored
54
.github/workflows/test-and-deploy-ipv4only.yaml
vendored
@@ -16,13 +16,11 @@ jobs:
|
||||
name: deploy on staging-ipv4.testrun.org, and run tests
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
concurrency:
|
||||
group: ci-ipv4-${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: ${{ !contains(github.ref, '$GITHUB_REF') }}
|
||||
environment:
|
||||
name: staging-ipv4.testrun.org
|
||||
url: https://staging-ipv4.testrun.org/
|
||||
concurrency: staging-ipv4.testrun.org
|
||||
steps:
|
||||
- uses: jsok/serialize-workflow-action@v1
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: prepare SSH
|
||||
@@ -38,8 +36,8 @@ jobs:
|
||||
if [ -f dkimkeys-ipv4/dkimkeys/opendkim.private ]; then rsync -avz -e "ssh -o StrictHostKeyChecking=accept-new" dkimkeys-ipv4 root@ns.testrun.org:/tmp/ || true; fi
|
||||
if [ "$(ls -A acme-ipv4/acme/certs)" ]; then rsync -avz -e "ssh -o StrictHostKeyChecking=accept-new" acme-ipv4 root@ns.testrun.org:/tmp/ || true; fi
|
||||
# make sure CAA record isn't set
|
||||
scp .github/workflows/staging-ipv4.testrun.org-default.zone root@ns.testrun.org:/etc/nsd/staging-ipv4.testrun.org.zone
|
||||
ssh -o StrictHostKeyChecking=accept-new root@ns.testrun.org sed -i '/CAA/d' /etc/nsd/staging-ipv4.testrun.org.zone
|
||||
scp -o StrictHostKeyChecking=accept-new .github/workflows/staging-ipv4.testrun.org-default.zone root@ns.testrun.org:/etc/nsd/staging-ipv4.testrun.org.zone
|
||||
ssh root@ns.testrun.org sed -i '/CAA/d' /etc/nsd/staging-ipv4.testrun.org.zone
|
||||
ssh root@ns.testrun.org nsd-checkzone staging-ipv4.testrun.org /etc/nsd/staging-ipv4.testrun.org.zone
|
||||
ssh root@ns.testrun.org systemctl reload nsd
|
||||
|
||||
@@ -49,7 +47,7 @@ jobs:
|
||||
-H "Authorization: Bearer ${{ secrets.HETZNER_API_TOKEN }}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"image":"debian-12"}' \
|
||||
"https://api.hetzner.cloud/v1/servers/${{ secrets.STAGING_SERVER_ID }}/actions/rebuild"
|
||||
"https://api.hetzner.cloud/v1/servers/${{ secrets.STAGING_IPV4_SERVER_ID }}/actions/rebuild"
|
||||
|
||||
- run: scripts/initenv.sh
|
||||
|
||||
@@ -63,38 +61,44 @@ jobs:
|
||||
while ! ssh -o ConnectTimeout=180 -o StrictHostKeyChecking=accept-new -v root@staging-ipv4.testrun.org id -u ; do sleep 1 ; done
|
||||
ssh -o StrictHostKeyChecking=accept-new -v root@staging-ipv4.testrun.org id -u
|
||||
# download acme & dkim state from ns.testrun.org
|
||||
rsync -e "ssh -o StrictHostKeyChecking=accept-new" -avz root@ns.testrun.org:/tmp/acme-ipv4 acme-restore || true
|
||||
rsync -avz root@ns.testrun.org:/tmp/dkimkeys-ipv4 dkimkeys-restore || true
|
||||
rsync -e "ssh -o StrictHostKeyChecking=accept-new" -avz root@ns.testrun.org:/tmp/acme-ipv4/acme acme-restore || true
|
||||
rsync -avz root@ns.testrun.org:/tmp/dkimkeys-ipv4/dkimkeys dkimkeys-restore || true
|
||||
# restore acme & dkim state to staging2.testrun.org
|
||||
rsync -avz acme-restore/acme-ipv4/acme root@staging-ipv4.testrun.org:/var/lib/ || true
|
||||
rsync -avz dkimkeys-restore/dkimkeys-ipv4/dkimkeys root@staging-ipv4.testrun.org:/etc/ || true
|
||||
rsync -avz acme-restore/acme root@staging-ipv4.testrun.org:/var/lib/ || true
|
||||
rsync -avz dkimkeys-restore/dkimkeys root@staging-ipv4.testrun.org:/etc/ || true
|
||||
ssh -o StrictHostKeyChecking=accept-new -v root@staging-ipv4.testrun.org chown root:root -R /var/lib/acme || true
|
||||
|
||||
- name: run formatting checks
|
||||
run: cmdeploy fmt -v
|
||||
|
||||
- name: run deploy-chatmail offline tests
|
||||
run: pytest --pyargs cmdeploy
|
||||
|
||||
- run: |
|
||||
cmdeploy init staging-ipv4.testrun.org
|
||||
sed -i 's#disable_ipv6 = False#disable_ipv6 = True#' chatmail.ini
|
||||
- name: setup dependencies
|
||||
run: |
|
||||
ssh root@staging-ipv4.testrun.org apt update
|
||||
ssh root@staging-ipv4.testrun.org apt install -y git python3.11-venv python3-dev gcc
|
||||
ssh root@staging-ipv4.testrun.org git clone https://github.com/chatmail/relay
|
||||
ssh root@staging-ipv4.testrun.org "cd relay && git checkout " ${{ github.head_ref }}
|
||||
ssh root@staging-ipv4.testrun.org "cd relay && scripts/initenv.sh"
|
||||
|
||||
- run: cmdeploy run
|
||||
- name: initialize config
|
||||
run: |
|
||||
ssh root@staging-ipv4.testrun.org "cd relay && scripts/cmdeploy init staging-ipv4.testrun.org"
|
||||
ssh root@staging-ipv4.testrun.org "sed -i 's#disable_ipv6 = False#disable_ipv6 = True#' relay/chatmail.ini"
|
||||
ssh root@staging-ipv4.testrun.org "sed -i 's/#\s*mtail_address/mtail_address/' relay/chatmail.ini"
|
||||
|
||||
- run: ssh root@staging-ipv4.testrun.org "cd relay && scripts/cmdeploy run --verbose --skip-dns-check --ssh-host localhost"
|
||||
|
||||
- name: set DNS entries
|
||||
run: |
|
||||
ssh -o StrictHostKeyChecking=accept-new -v root@staging-ipv4.testrun.org chown opendkim:opendkim -R /etc/dkimkeys
|
||||
cmdeploy dns --zonefile staging-generated.zone
|
||||
cat staging-generated.zone >> .github/workflows/staging-ipv4.testrun.org-default.zone
|
||||
ssh root@staging-ipv4.testrun.org "cd relay && scripts/cmdeploy dns --zonefile staging-generated.zone --ssh-host localhost"
|
||||
ssh root@staging-ipv4.testrun.org cat relay/staging-generated.zone >> .github/workflows/staging-ipv4.testrun.org-default.zone
|
||||
cat .github/workflows/staging-ipv4.testrun.org-default.zone
|
||||
scp .github/workflows/staging-ipv4.testrun.org-default.zone root@ns.testrun.org:/etc/nsd/staging-ipv4.testrun.org.zone
|
||||
ssh root@ns.testrun.org nsd-checkzone staging-ipv4.testrun.org /etc/nsd/staging-ipv4.testrun.org.zone
|
||||
ssh root@ns.testrun.org systemctl reload nsd
|
||||
|
||||
- name: cmdeploy test
|
||||
run: CHATMAIL_DOMAIN2=nine.testrun.org cmdeploy test --slow
|
||||
run: ssh root@staging-ipv4.testrun.org "cd relay && CHATMAIL_DOMAIN2=ci-chatmail.testrun.org scripts/cmdeploy test --slow --ssh-host localhost"
|
||||
|
||||
- name: cmdeploy dns
|
||||
run: cmdeploy dns -v
|
||||
run: ssh root@staging-ipv4.testrun.org "cd relay && scripts/cmdeploy dns -v --ssh-host localhost"
|
||||
|
||||
|
||||
23
.github/workflows/test-and-deploy.yaml
vendored
23
.github/workflows/test-and-deploy.yaml
vendored
@@ -16,13 +16,11 @@ jobs:
|
||||
name: deploy on staging2.testrun.org, and run tests
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
concurrency:
|
||||
group: ci-${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: ${{ !contains(github.ref, '$GITHUB_REF') }}
|
||||
environment:
|
||||
name: staging2.testrun.org
|
||||
url: https://staging2.testrun.org/
|
||||
concurrency: staging2.testrun.org
|
||||
steps:
|
||||
- uses: jsok/serialize-workflow-action@v1
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: prepare SSH
|
||||
@@ -70,19 +68,20 @@ jobs:
|
||||
rsync -avz dkimkeys-restore/dkimkeys root@staging2.testrun.org:/etc/ || true
|
||||
ssh -o StrictHostKeyChecking=accept-new -v root@staging2.testrun.org chown root:root -R /var/lib/acme || true
|
||||
|
||||
- name: run formatting checks
|
||||
run: cmdeploy fmt -v
|
||||
- name: add hpk42 key to staging server
|
||||
run: ssh root@staging2.testrun.org 'curl -s https://github.com/hpk42.keys >> .ssh/authorized_keys'
|
||||
|
||||
- name: run deploy-chatmail offline tests
|
||||
run: pytest --pyargs cmdeploy
|
||||
|
||||
- run: cmdeploy init staging2.testrun.org
|
||||
- run: |
|
||||
cmdeploy init staging2.testrun.org
|
||||
sed -i 's/#\s*mtail_address/mtail_address/' chatmail.ini
|
||||
|
||||
- run: cmdeploy run --verbose
|
||||
- run: cmdeploy run --verbose --skip-dns-check
|
||||
|
||||
- name: set DNS entries
|
||||
run: |
|
||||
ssh -o StrictHostKeyChecking=accept-new root@staging2.testrun.org chown opendkim:opendkim -R /etc/dkimkeys
|
||||
cmdeploy dns --zonefile staging-generated.zone --verbose
|
||||
cat staging-generated.zone >> .github/workflows/staging.testrun.org-default.zone
|
||||
cat .github/workflows/staging.testrun.org-default.zone
|
||||
@@ -91,7 +90,7 @@ jobs:
|
||||
ssh root@ns.testrun.org systemctl reload nsd
|
||||
|
||||
- name: cmdeploy test
|
||||
run: CHATMAIL_DOMAIN2=nine.testrun.org cmdeploy test --slow
|
||||
run: CHATMAIL_DOMAIN2=ci-chatmail.testrun.org cmdeploy test --slow
|
||||
|
||||
- name: cmdeploy dns
|
||||
run: cmdeploy dns -v
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -4,7 +4,8 @@ __pycache__/
|
||||
*$py.class
|
||||
*.swp
|
||||
*qr-*.png
|
||||
chatmail.ini
|
||||
chatmail*.ini
|
||||
lxconfigs/
|
||||
|
||||
|
||||
# C extensions
|
||||
|
||||
408
CHANGELOG.md
408
CHANGELOG.md
@@ -1,43 +1,293 @@
|
||||
# Changelog for chatmail deployment
|
||||
|
||||
## untagged
|
||||
## 1.9.0 2025-12-18
|
||||
|
||||
### Documentation
|
||||
|
||||
- Add RELEASE.md and CONTRIBUTING.md
|
||||
- README update, mention Chatmail Cookbook project
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- Expire messages also from IMAP subfolders
|
||||
- Use absolute path instead of relative path in message expiration script
|
||||
- Restart Postfix and Dovecot automatically on failure
|
||||
- acmetool: Use a fixed name and `reconcile` instead of `want`
|
||||
|
||||
### Features
|
||||
|
||||
- Report DKIM error code in SMTP response
|
||||
- Remove development notice from the web pages
|
||||
|
||||
### Miscellaneous Tasks
|
||||
|
||||
- Update the heading in the CHANGELOG.md
|
||||
- Setup git-cliff
|
||||
- Run tests against ci-chatmail.testrun.org instead of nine.testrun.org
|
||||
- Cleanup remaining echobot code, remove echobot user from deployment and passthrough recipients
|
||||
|
||||
## 1.8.0 2025-12-12
|
||||
|
||||
- Add imap_compress option to chatmail.ini
|
||||
([#760](https://github.com/chatmail/relay/pull/760))
|
||||
|
||||
- Remove echobot from relays
|
||||
([#753](https://github.com/chatmail/relay/pull/753))
|
||||
|
||||
- Fix `cmdeploy webdev`
|
||||
([#743](https://github.com/chatmail/relay/pull/743))
|
||||
|
||||
- Add robots.txt to exclude all web crawlers
|
||||
([#732](https://github.com/chatmail/relay/pull/732))
|
||||
|
||||
- acmetool: accept new Let's Encrypt ToS: https://letsencrypt.org/documents/LE-SA-v1.6-August-18-2025.pdf
|
||||
([#729](https://github.com/chatmail/relay/pull/729))
|
||||
|
||||
- Organized cmdeploy into install, configure, and activate stages
|
||||
([#695](https://github.com/chatmail/relay/pull/695))
|
||||
|
||||
- docs: move readme.md docs to sphinx documentation rendered at https://chatmail.at/doc/relay
|
||||
([#711](https://github.com/chatmail/relay/pull/711))
|
||||
|
||||
- acmetool: replace cronjob with a systemd timer
|
||||
([#719](https://github.com/chatmail/relay/pull/719))
|
||||
|
||||
- remove xstore@testrun.org from default passthrough recipients
|
||||
([#722](https://github.com/chatmail/relay/pull/722))
|
||||
|
||||
- don't deploy the website if there are merge conflicts in the www folder
|
||||
([#714](https://github.com/chatmail/relay/pull/714))
|
||||
|
||||
- acmetool: use ECDSA keys instead of RSA
|
||||
([#689](https://github.com/chatmail/relay/pull/689))
|
||||
|
||||
- Require TLS 1.2 for outgoing SMTP connections
|
||||
([#685](https://github.com/chatmail/relay/pull/685), [#730](https://github.com/chatmail/relay/pull/730))
|
||||
|
||||
- require STARTTLS for incoming port 25 connections
|
||||
([#684](https://github.com/chatmail/relay/pull/684), [#730](https://github.com/chatmail/relay/pull/730))
|
||||
|
||||
- filtermail: run CPU-intensive handle_DATA in a thread pool executor
|
||||
([#676](https://github.com/chatmail/relay/pull/676))
|
||||
|
||||
- don't use the complicated logging module in filtermail to exclude a potential source of errors.
|
||||
([#674](https://github.com/chatmail/relay/pull/674))
|
||||
|
||||
- Specify nginx.conf to only handle `mail_domain`, www, and mta-sts domains
|
||||
([#636](https://github.com/chatmail/relay/pull/636))
|
||||
|
||||
- Setup TURN server
|
||||
([#621](https://github.com/chatmail/relay/pull/621))
|
||||
|
||||
- cmdeploy: make --ssh-host work with localhost
|
||||
([#659](https://github.com/chatmail/relay/pull/659))
|
||||
|
||||
- Update iroh-relay to 0.35.0
|
||||
([#650](https://github.com/chatmail/relay/pull/650))
|
||||
|
||||
- filtermail: accept mails from Protonmail
|
||||
([#616](https://github.com/chatmail/relay/pull/616))
|
||||
|
||||
- Ignore all RCPT TO: parameters
|
||||
([#651](https://github.com/chatmail/relay/pull/651))
|
||||
|
||||
- Increase opendkim DNS Timeout from 5 to 60 seconds
|
||||
([#672](https://github.com/chatmail/relay/pull/672))
|
||||
|
||||
- Add config parameter for Let's Encrypt ACME email
|
||||
([#663](https://github.com/chatmail/relay/pull/663))
|
||||
|
||||
- Use max username length in newemail.py, not min
|
||||
([#648](https://github.com/chatmail/relay/pull/648))
|
||||
|
||||
- Add startup for `fcgiwrap.service` because sometimes it did not start automatically.
|
||||
([#657](https://github.com/chatmail/relay/pull/657))
|
||||
|
||||
- Add `cmdeploy init --force` command for recreating chatmail.ini
|
||||
([#656](https://github.com/chatmail/relay/pull/656))
|
||||
|
||||
- Increase maxproc for reinjecting ports from 10 to 100
|
||||
([#646](https://github.com/chatmail/relay/pull/646))
|
||||
|
||||
- Allow ports 143 and 993 to be used by `dovecot` process
|
||||
([#639](https://github.com/chatmail/relay/pull/639))
|
||||
|
||||
- Add `--skip-dns-check` argument to `cmdeploy run` command, which disables DNS record checking before installation.
|
||||
([#661](https://github.com/chatmail/relay/pull/661))
|
||||
|
||||
- Rework expiry of message files and mailboxes in Python
|
||||
to only do a single iteration over sometimes millions of messages
|
||||
instead of doing "find" commands that iterate 9 times over the messages.
|
||||
Provide an "fsreport" CLI for more fine grained analysis of message files.
|
||||
([#637](https://github.com/chatmail/relay/pull/637))
|
||||
|
||||
|
||||
## 1.7.0 2025-09-11
|
||||
|
||||
- Make www upload path configurable
|
||||
([#618](https://github.com/chatmail/relay/pull/618))
|
||||
|
||||
- Check whether GCC is installed in initenv.sh
|
||||
([#608](https://github.com/chatmail/relay/pull/608))
|
||||
|
||||
- Expire push notification tokens after 90 days
|
||||
([#583](https://github.com/chatmail/relay/pull/583))
|
||||
|
||||
- Use official `mtail` binary instead of `mtail` package
|
||||
([#581](https://github.com/chatmail/relay/pull/581))
|
||||
|
||||
- dovecot: install from download.delta.chat instead of openSUSE Build Service
|
||||
([#590](https://github.com/chatmail/relay/pull/590))
|
||||
|
||||
- Reconfigure Dovecot imap-login service to high-performance mode
|
||||
([#578](https://github.com/chatmail/relay/pull/578))
|
||||
|
||||
- Set timezone to improve dovecot performance
|
||||
([#584](https://github.com/chatmail/relay/pull/584))
|
||||
|
||||
- Increase nginx connection limits
|
||||
([#576](https://github.com/chatmail/relay/pull/576))
|
||||
|
||||
- If `dns-utils` needs to be installed before cmdeploy run, apt update to make sure it works
|
||||
([#560](https://github.com/chatmail/relay/pull/560))
|
||||
|
||||
- filtermail: respect config message size limit
|
||||
([#572](https://github.com/chatmail/relay/pull/572))
|
||||
|
||||
- Don't deploy if one of the ports used for chatmail relay services is occupied by an unexpected process
|
||||
([#568](https://github.com/chatmail/relay/pull/568))
|
||||
|
||||
- Add config value after how many days large files are deleted
|
||||
([#555](https://github.com/chatmail/relay/pull/555))
|
||||
|
||||
- cmdeploy: push relay version to /etc/chatmail-version
|
||||
([#573](https://github.com/chatmail/relay/pull/573))
|
||||
|
||||
- filtermail: allow partial body length in OpenPGP payloads
|
||||
([#570](https://github.com/chatmail/relay/pull/570))
|
||||
|
||||
- chatmaild: allow echobot to receive unencrypted messages by default
|
||||
([#556](https://github.com/chatmail/relay/pull/556))
|
||||
|
||||
|
||||
## 1.6.0 2025-04-11
|
||||
|
||||
- Handle Port-25 connect errors more gracefully (common with VPNs)
|
||||
([#552](https://github.com/chatmail/relay/pull/552))
|
||||
|
||||
- Avoid "acmetool not found" during initial run
|
||||
([#550](https://github.com/chatmail/relay/pull/550))
|
||||
|
||||
- Fix timezone handling such that client/servers do not need to use
|
||||
same timezone.
|
||||
([#553](https://github.com/chatmail/relay/pull/553))
|
||||
|
||||
- Enforce end-to-end encryption for incoming messages.
|
||||
New user address mailboxes now get a `enforceE2EEincoming` file
|
||||
which prohibits incoming cleartext messages from other domains.
|
||||
An outside MTA trying to submit a cleartext message will
|
||||
get a "523 Encryption Needed" response, see RFC5248.
|
||||
If the file does not exist (as it the case for all existing accounts)
|
||||
incoming cleartext messages are accepted.
|
||||
([#538](https://github.com/chatmail/server/pull/538))
|
||||
|
||||
- Enforce end-to-end encryption between local addresses
|
||||
([#535](https://github.com/chatmail/server/pull/535))
|
||||
|
||||
- unbound: check that port 53 is not occupied by a different process
|
||||
([#537](https://github.com/chatmail/server/pull/537))
|
||||
|
||||
- unbound: before unbound is there, use 9.9.9.9 for resolving
|
||||
([#518](https://github.com/chatmail/relay/pull/518))
|
||||
|
||||
- Limit the bind for the HTTPS server on 8443 to 127.0.0.1
|
||||
([#522](https://github.com/chatmail/server/pull/522))
|
||||
([#532](https://github.com/chatmail/server/pull/532))
|
||||
|
||||
- Send SNI when connecting to outside servers
|
||||
([#524](https://github.com/chatmail/server/pull/524))
|
||||
|
||||
- postfix master.cf: use 127.0.0.1 for consistency
|
||||
([#544](https://github.com/chatmail/relay/pull/544))
|
||||
|
||||
- Pass through `original_content` instead of `content` in filtermail
|
||||
([#509](https://github.com/chatmail/server/pull/509))
|
||||
|
||||
- Document TLS requirements in the readme
|
||||
([#514](https://github.com/chatmail/server/pull/514))
|
||||
|
||||
- Remove cleanup service from submission ports
|
||||
([#512](https://github.com/chatmail/server/pull/512))
|
||||
|
||||
- cmdeploy dovecot: delete big messages after 7 days
|
||||
([#504](https://github.com/chatmail/server/pull/504))
|
||||
|
||||
- mtail: fix getting logs from STDIN
|
||||
([#502](https://github.com/chatmail/server/pull/502))
|
||||
|
||||
- filtermail: don't require exactly 2 lines after openPGP payload
|
||||
([#497](https://github.com/chatmail/server/pull/497))
|
||||
|
||||
- cmdeploy dns: offer alternative DKIM record format for some web interfaces
|
||||
([#470](https://github.com/deltachat/chatmail/pull/470))
|
||||
([#470](https://github.com/chatmail/server/pull/470))
|
||||
|
||||
- journald: remove old logs from disk
|
||||
([#490](https://github.com/chatmail/server/pull/490))
|
||||
|
||||
- opendkim: restart once every day to mend RAM leaks
|
||||
([#498](https://github.com/chatmail/server/pull/498)
|
||||
|
||||
- migration guide: let opendkim own the DKIM keys directory
|
||||
([#468](https://github.com/deltachat/chatmail/pull/468))
|
||||
([#468](https://github.com/chatmail/server/pull/468))
|
||||
|
||||
- improve secure-join message detection
|
||||
([#473](https://github.com/chatmail/server/pull/473))
|
||||
|
||||
- use old crypt lib in python < 3.11
|
||||
([#483](https://github.com/chatmail/server/pull/483))
|
||||
|
||||
- chatmaild: set umask to 0700 for doveauth + metadata
|
||||
([#490](https://github.com/chatmail/server/pull/492))
|
||||
|
||||
- remove MTA-STS daemon
|
||||
([#488](https://github.com/chatmail/server/pull/488))
|
||||
|
||||
- replace `Subject` with `[...]` for all outgoing mails.
|
||||
([#481](https://github.com/chatmail/server/pull/481))
|
||||
|
||||
- opendkim: use su instead of sudo
|
||||
([#491](https://github.com/chatmail/server/pull/491))
|
||||
|
||||
## 1.5.0 2024-12-20
|
||||
|
||||
- cmdeploy dns: always show recommended DNS records
|
||||
([#463](https://github.com/deltachat/chatmail/pull/463))
|
||||
([#463](https://github.com/chatmail/server/pull/463))
|
||||
|
||||
- add `--all` to `cmdeploy dns`
|
||||
([#462](https://github.com/deltachat/chatmail/pull/462))
|
||||
([#462](https://github.com/chatmail/server/pull/462))
|
||||
|
||||
- fix `_mta-sts` TXT DNS record
|
||||
([#461](https://github.com/deltachat/chatmail/pull/461)
|
||||
([#461](https://github.com/chatmail/server/pull/461)
|
||||
|
||||
- deploy `iroh-relay` and also update "realtime relay services" in privacy policy.
|
||||
([#434](https://github.com/deltachat/chatmail/pull/434))
|
||||
([#451](https://github.com/deltachat/chatmail/pull/451))
|
||||
([#434](https://github.com/chatmail/server/pull/434))
|
||||
([#451](https://github.com/chatmail/server/pull/451))
|
||||
|
||||
- add guide to migrate chatmail to a new server
|
||||
([#429](https://github.com/deltachat/chatmail/pull/429))
|
||||
([#429](https://github.com/chatmail/server/pull/429))
|
||||
|
||||
- disable anvil authentication penalty
|
||||
([#414](https://github.com/deltachat/chatmail/pull/444)
|
||||
([#414](https://github.com/chatmail/server/pull/444)
|
||||
|
||||
- increase `request_queue_size` for UNIX sockets to 1000.
|
||||
([#437](https://github.com/deltachat/chatmail/pull/437))
|
||||
([#437](https://github.com/chatmail/server/pull/437))
|
||||
|
||||
- add argument to `cmdeploy run` for specifying
|
||||
a different SSH host than `mail_domain`
|
||||
([#439](https://github.com/deltachat/chatmail/pull/439))
|
||||
([#439](https://github.com/chatmail/server/pull/439))
|
||||
|
||||
- query autoritative nameserver to bypass DNS cache
|
||||
([#424](https://github.com/deltachat/chatmail/pull/424))
|
||||
([#424](https://github.com/chatmail/server/pull/424))
|
||||
|
||||
- add mtail support (new optional `mtail_address` ini value)
|
||||
This defines the address on which [`mtail`](https://google.github.io/mtail/)
|
||||
@@ -47,195 +297,195 @@
|
||||
and assign an IP address from this network to the host.
|
||||
If you do not plan to collect metrics,
|
||||
keep this setting unset.
|
||||
([#388](https://github.com/deltachat/chatmail/pull/388))
|
||||
([#388](https://github.com/chatmail/server/pull/388))
|
||||
|
||||
- fix checking for required DNS records
|
||||
([#412](https://github.com/deltachat/chatmail/pull/412))
|
||||
([#412](https://github.com/chatmail/server/pull/412))
|
||||
|
||||
- add support for specifying whole domains for recipient passthrough list
|
||||
([#408](https://github.com/deltachat/chatmail/pull/408))
|
||||
([#408](https://github.com/chatmail/server/pull/408))
|
||||
|
||||
- add a paragraph about "account deletion" to info page
|
||||
([#405](https://github.com/deltachat/chatmail/pull/405))
|
||||
([#405](https://github.com/chatmail/server/pull/405))
|
||||
|
||||
- avoid nginx listening on ipv6 if v6 is dsiabled
|
||||
([#402](https://github.com/deltachat/chatmail/pull/402))
|
||||
([#402](https://github.com/chatmail/server/pull/402))
|
||||
|
||||
- refactor ssh-based execution to allow organizing remote functions in
|
||||
modules.
|
||||
([#396](https://github.com/deltachat/chatmail/pull/396))
|
||||
([#396](https://github.com/chatmail/server/pull/396))
|
||||
|
||||
- trigger "apt upgrade" during "cmdeploy run"
|
||||
([#398](https://github.com/deltachat/chatmail/pull/398))
|
||||
([#398](https://github.com/chatmail/server/pull/398))
|
||||
|
||||
- drop hispanilandia passthrough address
|
||||
([#401](https://github.com/deltachat/chatmail/pull/401))
|
||||
([#401](https://github.com/chatmail/server/pull/401))
|
||||
|
||||
- set CAA record flags to 0
|
||||
|
||||
- add IMAP capabilities instead of overwriting them
|
||||
([#413](https://github.com/deltachat/chatmail/pull/413))
|
||||
([#413](https://github.com/chatmail/server/pull/413))
|
||||
|
||||
- fix OpenPGP payload check
|
||||
([#435](https://github.com/deltachat/chatmail/pull/435))
|
||||
([#435](https://github.com/chatmail/server/pull/435))
|
||||
|
||||
- fix Dovecot quota_max_mail_size to use max_message_size config value
|
||||
([#438](https://github.com/deltachat/chatmail/pull/438))
|
||||
([#438](https://github.com/chatmail/server/pull/438))
|
||||
|
||||
|
||||
## 1.4.1 2024-07-31
|
||||
|
||||
- fix metadata dictproxy which would confuse transactions
|
||||
resulting in missed notifications and other issues.
|
||||
([#393](https://github.com/deltachat/chatmail/pull/393))
|
||||
([#394](https://github.com/deltachat/chatmail/pull/394))
|
||||
([#393](https://github.com/chatmail/server/pull/393))
|
||||
([#394](https://github.com/chatmail/server/pull/394))
|
||||
|
||||
- add optional "imap_rawlog" config option. If true,
|
||||
.in/.out files are created in user home dirs
|
||||
containing the imap protocol messages.
|
||||
([#389](https://github.com/deltachat/chatmail/pull/389))
|
||||
([#389](https://github.com/chatmail/server/pull/389))
|
||||
|
||||
## 1.4.0 2024-07-28
|
||||
|
||||
- Add `disable_ipv6` config option to chatmail.ini.
|
||||
Required if the server doesn't have IPv6 connectivity.
|
||||
([#312](https://github.com/deltachat/chatmail/pull/312))
|
||||
([#312](https://github.com/chatmail/server/pull/312))
|
||||
|
||||
- allow current K9/Thunderbird-mail releases to send encrypted messages
|
||||
outside by accepting their localized "encrypted subject" strings.
|
||||
([#370](https://github.com/deltachat/chatmail/pull/370))
|
||||
([#370](https://github.com/chatmail/server/pull/370))
|
||||
|
||||
- Migrate and remove sqlite database in favor of password/lastlogin tracking
|
||||
in a user's maildir.
|
||||
([#379](https://github.com/deltachat/chatmail/pull/379))
|
||||
([#379](https://github.com/chatmail/server/pull/379))
|
||||
|
||||
- Require pyinfra V3 installed on the client side,
|
||||
run `./scripts/initenv.sh` to upgrade locally.
|
||||
([#378](https://github.com/deltachat/chatmail/pull/378))
|
||||
([#378](https://github.com/chatmail/server/pull/378))
|
||||
|
||||
- don't hardcode "/home/vmail" paths but rather set them
|
||||
once in the config object and use it everywhere else,
|
||||
thereby also improving testability.
|
||||
([#351](https://github.com/deltachat/chatmail/pull/351))
|
||||
([#351](https://github.com/chatmail/server/pull/351))
|
||||
temporarily introduced obligatory "passdb_path" and "mailboxes_dir"
|
||||
settings but they were removed/obsoleted in
|
||||
([#380](https://github.com/deltachat/chatmail/pull/380))
|
||||
([#380](https://github.com/chatmail/server/pull/380))
|
||||
|
||||
- BREAKING: new required chatmail.ini value 'delete_inactive_users_after = 100'
|
||||
which removes users from database and mails after 100 days without any login.
|
||||
([#350](https://github.com/deltachat/chatmail/pull/350))
|
||||
([#350](https://github.com/chatmail/server/pull/350))
|
||||
|
||||
- Refine DNS checking to distinguish between "required" and "recommended" settings
|
||||
([#372](https://github.com/deltachat/chatmail/pull/372))
|
||||
([#372](https://github.com/chatmail/server/pull/372))
|
||||
|
||||
- reload nginx in the acmetool cronjob
|
||||
([#360](https://github.com/deltachat/chatmail/pull/360))
|
||||
([#360](https://github.com/chatmail/server/pull/360))
|
||||
|
||||
- remove checking of reverse-DNS PTR records. Chatmail-servers don't
|
||||
depend on it and even in the wider e-mail system it's not common anymore.
|
||||
If it's an issue, a chatmail operator can still care to properly set reverse DNS.
|
||||
([#348](https://github.com/deltachat/chatmail/pull/348))
|
||||
([#348](https://github.com/chatmail/server/pull/348))
|
||||
|
||||
- Make DNS-checking faster and more interactive, run it fully during "cmdeploy run",
|
||||
also introducing a generic mechanism for rapid remote ssh-based python function execution.
|
||||
([#346](https://github.com/deltachat/chatmail/pull/346))
|
||||
([#346](https://github.com/chatmail/server/pull/346))
|
||||
|
||||
- Don't fix file owner ship of /home/vmail
|
||||
([#345](https://github.com/deltachat/chatmail/pull/345))
|
||||
([#345](https://github.com/chatmail/server/pull/345))
|
||||
|
||||
- Support iterating over all users with doveadm commands
|
||||
([#344](https://github.com/deltachat/chatmail/pull/344))
|
||||
([#344](https://github.com/chatmail/server/pull/344))
|
||||
|
||||
- Test and fix for attempts to create inadmissible accounts
|
||||
([#333](https://github.com/deltachat/chatmail/pull/321))
|
||||
([#333](https://github.com/chatmail/server/pull/321))
|
||||
|
||||
- check that OpenPGP has only PKESK, SKESK and SEIPD packets
|
||||
([#323](https://github.com/deltachat/chatmail/pull/323),
|
||||
[#324](https://github.com/deltachat/chatmail/pull/324))
|
||||
([#323](https://github.com/chatmail/server/pull/323),
|
||||
[#324](https://github.com/chatmail/server/pull/324))
|
||||
|
||||
- improve filtermail checks for encrypted messages and drop support for unencrypted MDNs
|
||||
([#320](https://github.com/deltachat/chatmail/pull/320))
|
||||
([#320](https://github.com/chatmail/server/pull/320))
|
||||
|
||||
- replace `bash` with `/bin/sh`
|
||||
([#334](https://github.com/deltachat/chatmail/pull/334))
|
||||
([#334](https://github.com/chatmail/server/pull/334))
|
||||
|
||||
- Increase number of logged in IMAP sessions to 50000
|
||||
([#335](https://github.com/deltachat/chatmail/pull/335))
|
||||
([#335](https://github.com/chatmail/server/pull/335))
|
||||
|
||||
- filtermail: do not allow ASCII armor without actual payload
|
||||
([#325](https://github.com/deltachat/chatmail/pull/325))
|
||||
([#325](https://github.com/chatmail/server/pull/325))
|
||||
|
||||
- Remove sieve to enable hardlink deduplication in LMTP
|
||||
([#343](https://github.com/deltachat/chatmail/pull/343))
|
||||
([#343](https://github.com/chatmail/server/pull/343))
|
||||
|
||||
- dovecot: enable gzip compression on disk
|
||||
([#341](https://github.com/deltachat/chatmail/pull/341))
|
||||
([#341](https://github.com/chatmail/server/pull/341))
|
||||
|
||||
- DKIM-sign Content-Type and oversign all signed headers
|
||||
([#296](https://github.com/deltachat/chatmail/pull/296))
|
||||
([#296](https://github.com/chatmail/server/pull/296))
|
||||
|
||||
- Add nonci_accounts metric
|
||||
([#347](https://github.com/deltachat/chatmail/pull/347))
|
||||
([#347](https://github.com/chatmail/server/pull/347))
|
||||
|
||||
- doveauth: log when a new account is created
|
||||
([#349](https://github.com/deltachat/chatmail/pull/349))
|
||||
([#349](https://github.com/chatmail/server/pull/349))
|
||||
|
||||
- Multiplex HTTPS, IMAP and SMTP on port 443
|
||||
([#357](https://github.com/deltachat/chatmail/pull/357))
|
||||
([#357](https://github.com/chatmail/server/pull/357))
|
||||
|
||||
## 1.3.0 - 2024-06-06
|
||||
|
||||
- don't check necessary DNS records on cmdeploy init anymore
|
||||
([#316](https://github.com/deltachat/chatmail/pull/316))
|
||||
([#316](https://github.com/chatmail/server/pull/316))
|
||||
|
||||
- ensure cron and acl are installed
|
||||
([#293](https://github.com/deltachat/chatmail/pull/293),
|
||||
[#310](https://github.com/deltachat/chatmail/pull/310))
|
||||
([#293](https://github.com/chatmail/server/pull/293),
|
||||
[#310](https://github.com/chatmail/server/pull/310))
|
||||
|
||||
- change default for delete_mails_after from 40 to 20 days
|
||||
([#300](https://github.com/deltachat/chatmail/pull/300))
|
||||
([#300](https://github.com/chatmail/server/pull/300))
|
||||
|
||||
- save journald logs only to memory and save nginx logs to journald instead of file
|
||||
([#299](https://github.com/deltachat/chatmail/pull/299))
|
||||
([#299](https://github.com/chatmail/server/pull/299))
|
||||
|
||||
- fix writing of multiple obs repositories in `/etc/apt/sources.list`
|
||||
([#290](https://github.com/deltachat/chatmail/pull/290))
|
||||
([#290](https://github.com/chatmail/server/pull/290))
|
||||
|
||||
- metadata: add support for `/shared/vendor/deltachat/irohrelay`
|
||||
([#284](https://github.com/deltachat/chatmail/pull/284))
|
||||
([#284](https://github.com/chatmail/server/pull/284))
|
||||
|
||||
- Emit "XCHATMAIL" capability from IMAP server
|
||||
([#278](https://github.com/deltachat/chatmail/pull/278))
|
||||
([#278](https://github.com/chatmail/server/pull/278))
|
||||
|
||||
- Move echobot `into /var/lib/echobot`
|
||||
([#281](https://github.com/deltachat/chatmail/pull/281))
|
||||
([#281](https://github.com/chatmail/server/pull/281))
|
||||
|
||||
- Accept Let's Encrypt's new Terms of Services
|
||||
([#275](https://github.com/deltachat/chatmail/pull/276))
|
||||
([#275](https://github.com/chatmail/server/pull/276))
|
||||
|
||||
- Reload Dovecot and Postfix when TLS certificate updates
|
||||
([#271](https://github.com/deltachat/chatmail/pull/271))
|
||||
([#271](https://github.com/chatmail/server/pull/271))
|
||||
|
||||
- Use forked version of dovecot without hardcoded delays
|
||||
([#270](https://github.com/deltachat/chatmail/pull/270))
|
||||
([#270](https://github.com/chatmail/server/pull/270))
|
||||
|
||||
## 1.2.0 - 2024-04-04
|
||||
|
||||
- Install dig on the server to resolve DNS records
|
||||
([#267](https://github.com/deltachat/chatmail/pull/267))
|
||||
([#267](https://github.com/chatmail/server/pull/267))
|
||||
|
||||
- preserve notification order and exponentially backoff with
|
||||
retries for tokens where we didn't get a successful return
|
||||
([#265](https://github.com/deltachat/chatmail/pull/263))
|
||||
([#265](https://github.com/chatmail/server/pull/263))
|
||||
|
||||
- Run chatmail-metadata and doveauth as vmail
|
||||
([#261](https://github.com/deltachat/chatmail/pull/261))
|
||||
([#261](https://github.com/chatmail/server/pull/261))
|
||||
|
||||
- Apply systemd restrictions to echobot
|
||||
([#259](https://github.com/deltachat/chatmail/pull/259))
|
||||
([#259](https://github.com/chatmail/server/pull/259))
|
||||
|
||||
- re-enable running the CI in pull requests, but not concurrently
|
||||
([#258](https://github.com/deltachat/chatmail/pull/258))
|
||||
([#258](https://github.com/chatmail/server/pull/258))
|
||||
|
||||
|
||||
## 1.1.0 - 2024-03-28
|
||||
@@ -243,27 +493,27 @@
|
||||
### The changelog starts to record changes from March 15th, 2024
|
||||
|
||||
- Move systemd unit templates to cmdeploy package
|
||||
([#255](https://github.com/deltachat/chatmail/pull/255))
|
||||
([#255](https://github.com/chatmail/server/pull/255))
|
||||
|
||||
- Persist push tokens and support multiple device per address
|
||||
([#254](https://github.com/deltachat/chatmail/pull/254))
|
||||
([#254](https://github.com/chatmail/server/pull/254))
|
||||
|
||||
- Avoid warning for regular doveauth protocol's hello message.
|
||||
([#250](https://github.com/deltachat/chatmail/pull/250))
|
||||
([#250](https://github.com/chatmail/server/pull/250))
|
||||
|
||||
- Fix various tests to pass again with "cmdeploy test".
|
||||
([#245](https://github.com/deltachat/chatmail/pull/245),
|
||||
[#242](https://github.com/deltachat/chatmail/pull/242)
|
||||
([#245](https://github.com/chatmail/server/pull/245),
|
||||
[#242](https://github.com/chatmail/server/pull/242)
|
||||
|
||||
- Ensure lets-encrypt certificates are reloaded after renewal
|
||||
([#244]) https://github.com/deltachat/chatmail/pull/244
|
||||
([#244]) https://github.com/chatmail/server/pull/244
|
||||
|
||||
- Persist tokens to avoid iOS users loosing push-notifications when the
|
||||
chatmail metadata service is restarted (happens regularly during deploys)
|
||||
([#238](https://github.com/deltachat/chatmail/pull/239)
|
||||
([#238](https://github.com/chatmail/server/pull/239)
|
||||
|
||||
- Fix failing sieve-script compile errors on incoming messages
|
||||
([#237](https://github.com/deltachat/chatmail/pull/239)
|
||||
([#237](https://github.com/chatmail/server/pull/239)
|
||||
|
||||
- Fix quota reporting after expunging of old mails
|
||||
([#233](https://github.com/deltachat/chatmail/pull/239)
|
||||
([#233](https://github.com/chatmail/server/pull/239)
|
||||
|
||||
7
CONTRIBUTING.md
Normal file
7
CONTRIBUTING.md
Normal file
@@ -0,0 +1,7 @@
|
||||
# Contributing to the chatmail relay
|
||||
|
||||
Commit messages follow the [Conventional Commits] notation.
|
||||
We use [git-cliff] to generate the changelog from commit messages before the release.
|
||||
|
||||
[Conventional Commits]: https://www.conventionalcommits.org/
|
||||
[git-cliff]: https://git-cliff.org/
|
||||
442
README.md
442
README.md
@@ -1,440 +1,20 @@
|
||||
|
||||
<img width="800px" src="www/src/collage-top.png"/>
|
||||
# Chatmail relays for end-to-end encrypted email
|
||||
|
||||
# Chatmail services optimized for Delta Chat apps
|
||||
Chatmail relay servers are interoperable Mail Transport Agents (MTAs) designed for:
|
||||
|
||||
This repository helps to setup a ready-to-use chatmail server
|
||||
comprised of a minimal setup of the battle-tested
|
||||
[postfix smtp](https://www.postfix.org) and [dovecot imap](https://www.dovecot.org) services.
|
||||
- **Zero State:** no private data or metadata collected, messages are auto-deleted, low disk usage
|
||||
|
||||
The setup is designed and optimized for providing chatmail accounts
|
||||
for use by [Delta Chat apps](https://delta.chat).
|
||||
- **Instant/Realtime:** sub-second message delivery, realtime P2P
|
||||
streaming, privacy-preserving Push Notifications for Apple, Google, and Huawei;
|
||||
|
||||
Chatmail accounts are automatically created by a first login,
|
||||
after which the initially specified password is required for using them.
|
||||
- **Security Enforcement**: only strict TLS, DKIM and OpenPGP with minimized metadata accepted
|
||||
|
||||
## Deploying your own chatmail server
|
||||
- **Reliable Federation and Decentralization:** No spam or IP reputation checks, federating
|
||||
depends on established IETF standards and protocols.
|
||||
|
||||
To deploy chatmail on your own server, you must have set-up ssh authentication and need to use an ed25519 key, due to an [upstream bug in paramiko](https://github.com/paramiko/paramiko/issues/2191). You also need to add your private key to the local ssh-agent, because you can't type in your password during deployment.
|
||||
This repository contains everything needed to setup a ready-to-use chatmail relay on an ssh-reachable host.
|
||||
For getting started and more information please refer to the web version of this repositories' documentation at
|
||||
|
||||
We use `chat.example.org` as the chatmail domain in the following steps.
|
||||
Please substitute it with your own domain.
|
||||
[https://chatmail.at/doc/relay](https://chatmail.at/doc/relay)
|
||||
|
||||
1. Install the `cmdeploy` command in a virtualenv
|
||||
|
||||
```
|
||||
git clone https://github.com/deltachat/chatmail
|
||||
cd chatmail
|
||||
scripts/initenv.sh
|
||||
```
|
||||
|
||||
2. Create chatmail configuration file `chatmail.ini`:
|
||||
|
||||
```
|
||||
scripts/cmdeploy init chat.example.org # <-- use your domain
|
||||
```
|
||||
|
||||
3. Point your domain to the server's IP address,
|
||||
if you haven't done so already.
|
||||
Verify that SSH root login works:
|
||||
|
||||
```
|
||||
ssh root@chat.example.org # <-- use your domain
|
||||
```
|
||||
|
||||
4. Deploy to the remote chatmail server:
|
||||
|
||||
```
|
||||
scripts/cmdeploy run
|
||||
```
|
||||
This script will check that you have all necessary DNS records.
|
||||
If DNS records are missing, it will recommend
|
||||
which you should configure at your DNS provider
|
||||
(it can take some time until they are public).
|
||||
|
||||
### Other helpful commands:
|
||||
|
||||
To check the status of your remotely running chatmail service:
|
||||
|
||||
```
|
||||
scripts/cmdeploy status
|
||||
```
|
||||
|
||||
To display and check all recommended DNS records:
|
||||
|
||||
```
|
||||
scripts/cmdeploy dns
|
||||
```
|
||||
|
||||
To test whether your chatmail service is working correctly:
|
||||
|
||||
```
|
||||
scripts/cmdeploy test
|
||||
```
|
||||
|
||||
To measure the performance of your chatmail service:
|
||||
|
||||
```
|
||||
scripts/cmdeploy bench
|
||||
```
|
||||
|
||||
## Overview of this repository
|
||||
|
||||
This repository has four directories:
|
||||
|
||||
- [cmdeploy](https://github.com/deltachat/chatmail/tree/main/cmdeploy)
|
||||
is a collection of configuration files
|
||||
and a [pyinfra](https://pyinfra.com)-based deployment script.
|
||||
|
||||
- [chatmaild](https://github.com/deltachat/chatmail/tree/main/chatmaild)
|
||||
is a python package containing several small services
|
||||
which handle authentication,
|
||||
trigger push notifications on new messages,
|
||||
ensure that outbound mails are encrypted,
|
||||
delete inactive users,
|
||||
and some other minor things.
|
||||
chatmaild can also be installed as a stand-alone python package.
|
||||
|
||||
- [www](https://github.com/deltachat/chatmail/tree/main/www)
|
||||
contains the html, css, and markdown files
|
||||
which make up a chatmail server's web page.
|
||||
Edit them before deploying to make your chatmail server stand out.
|
||||
|
||||
- [scripts](https://github.com/deltachat/chatmail/tree/main/scripts)
|
||||
offers two convenience tools for beginners;
|
||||
`initenv.sh` installs the necessary dependencies to a local virtual environment,
|
||||
and the `scripts/cmdeploy` script enables you
|
||||
to run the `cmdeploy` command line tool in the local virtual environment.
|
||||
|
||||
### cmdeploy
|
||||
|
||||
The `cmdeploy/src/cmdeploy/cmdeploy.py` command line tool
|
||||
helps with setting up and managing the chatmail service.
|
||||
`cmdeploy init` creates the `chatmail.ini` config file.
|
||||
`cmdeploy run` uses a [pyinfra](https://pyinfra.com/)-based [script](`cmdeploy/src/cmdeploy/__init__.py`)
|
||||
to automatically install or upgrade all chatmail components on a server,
|
||||
according to the `chatmail.ini` config.
|
||||
|
||||
The components of chatmail are:
|
||||
|
||||
- [postfix smtp server](https://www.postfix.org) accepts sent messages (both from your users and from other servers)
|
||||
|
||||
- [dovecot imap server](https://www.dovecot.org) stores messages for your users until they download them
|
||||
|
||||
- [nginx](https://nginx.org/) shows the web page with your privacy policy and additional information
|
||||
|
||||
- [acmetool](https://hlandau.github.io/acmetool/) manages TLS certificates for dovecot, postfix, and nginx
|
||||
|
||||
- [opendkim](http://www.opendkim.org/) for signing messages with DKIM and rejecting inbound messages without DKIM
|
||||
|
||||
- [mtail](https://google.github.io/mtail/) for collecting anonymized metrics in case you have monitoring
|
||||
|
||||
- and the chatmaild services, explained in the next section:
|
||||
|
||||
### chatmaild
|
||||
|
||||
chatmaild offers several commands
|
||||
which differentiate a *chatmail* server from a classic mail server.
|
||||
If you deploy them with cmdeploy,
|
||||
they are run by systemd services in the background.
|
||||
A short overview:
|
||||
|
||||
- [`doveauth`](https://github.com/deltachat/chatmail/blob/main/chatmaild/src/chatmaild/doveauth.py) implements
|
||||
create-on-login account creation semantics and is used
|
||||
by Dovecot during login authentication and by Postfix
|
||||
which in turn uses [Dovecot SASL](https://doc.dovecot.org/configuration_manual/authentication/dict/#complete-example-for-authenticating-via-a-unix-socket)
|
||||
to authenticate users
|
||||
to send mails for them.
|
||||
|
||||
- [`filtermail`](https://github.com/deltachat/chatmail/blob/main/chatmaild/src/chatmaild/filtermail.py) prevents
|
||||
unencrypted e-mail from leaving the chatmail service
|
||||
and is integrated into postfix's outbound mail pipelines.
|
||||
|
||||
- [`chatmail-metadata`](https://github.com/deltachat/chatmail/blob/main/chatmaild/src/chatmaild/metadata.py) is contacted by a
|
||||
[dovecot lua script](https://github.com/deltachat/chatmail/blob/main/cmdeploy/src/cmdeploy/dovecot/push_notification.lua)
|
||||
to store user-specific server-side config.
|
||||
On new messages,
|
||||
it [passes the user's push notification token](https://github.com/deltachat/chatmail/blob/main/chatmaild/src/chatmaild/notifier.py)
|
||||
to [notifications.delta.chat](https://delta.chat/help#instant-delivery)
|
||||
so the push notifications on the user's phone can be triggered
|
||||
by Apple/Google.
|
||||
|
||||
- [`delete_inactive_users`](https://github.com/deltachat/chatmail/blob/main/chatmaild/src/chatmaild/delete_inactive_users.py)
|
||||
deletes users if they have not logged in for a very long time.
|
||||
The timeframe can be configured in `chatmail.ini`.
|
||||
|
||||
- [`lastlogin`](https://github.com/deltachat/chatmail/blob/main/chatmaild/src/chatmaild/lastlogin.py)
|
||||
is contacted by dovecot when a user logs in
|
||||
and stores the date of the login.
|
||||
|
||||
- [`echobot`](https://github.com/deltachat/chatmail/blob/main/chatmaild/src/chatmaild/echo.py)
|
||||
is a small bot for test purposes.
|
||||
It simply echoes back messages from users.
|
||||
|
||||
- [`chatmail-metrics`](https://github.com/deltachat/chatmail/blob/main/chatmaild/src/chatmaild/metrics.py)
|
||||
collects some metrics and displays them at `https://example.org/metrics`.
|
||||
|
||||
### Home page and getting started for users
|
||||
|
||||
`cmdeploy run` also creates default static Web pages and deploys them
|
||||
to a nginx web server with:
|
||||
|
||||
- a default `index.html` along with a QR code that users can click to
|
||||
create accounts on your chatmail provider,
|
||||
|
||||
- a default `info.html` that is linked from the home page,
|
||||
|
||||
- a default `policy.html` that is linked from the home page.
|
||||
|
||||
All `.html` files are generated
|
||||
by the according markdown `.md` file in the `www/src` directory.
|
||||
|
||||
|
||||
### Refining the web pages
|
||||
|
||||
|
||||
```
|
||||
scripts/cmdeploy webdev
|
||||
```
|
||||
|
||||
This starts a local live development cycle for chatmail Web pages:
|
||||
|
||||
- uses the `www/src/page-layout.html` file for producing static
|
||||
HTML pages from `www/src/*.md` files
|
||||
|
||||
- continously builds the web presence reading files from `www/src` directory
|
||||
and generating html files and copying assets to the `www/build` directory.
|
||||
|
||||
- Starts a browser window automatically where you can "refresh" as needed.
|
||||
|
||||
|
||||
## Emergency Commands to disable automatic account creation
|
||||
|
||||
If you need to stop account creation,
|
||||
e.g. because some script is wildly creating accounts,
|
||||
login to the server with ssh and run:
|
||||
|
||||
```
|
||||
touch /etc/chatmail-nocreate
|
||||
```
|
||||
|
||||
While this file is present, account creation will be blocked.
|
||||
|
||||
### Ports
|
||||
|
||||
[Postfix](http://www.postfix.org/) listens on ports 25 (smtp) and 587 (submission) and 465 (submissions).
|
||||
[Dovecot](https://www.dovecot.org/) listens on ports 143 (imap) and 993 (imaps).
|
||||
[nginx](https://www.nginx.com/) listens on port 8443 (https-alt) and 443 (https).
|
||||
Port 443 multiplexes HTTPS, IMAP and SMTP using ALPN to redirect connections to ports 8443, 465 or 993.
|
||||
[acmetool](https://hlandau.github.io/acmetool/) listens on port 80 (http).
|
||||
|
||||
Delta Chat apps will, however, discover all ports and configurations
|
||||
automatically by reading the [autoconfig XML file](https://www.ietf.org/archive/id/draft-bucksch-autoconfig-00.html) from the chatmail service.
|
||||
|
||||
## Email authentication
|
||||
|
||||
chatmail servers rely on [DKIM](https://www.rfc-editor.org/rfc/rfc6376)
|
||||
to authenticate incoming emails.
|
||||
Incoming emails must have a valid DKIM signature with
|
||||
Signing Domain Identifier (SDID, `d=` parameter in the DKIM-Signature header)
|
||||
equal to the `From:` header domain.
|
||||
This property is checked by OpenDKIM screen policy script
|
||||
before validating the signatures.
|
||||
This correpsonds to strict [DMARC](https://www.rfc-editor.org/rfc/rfc7489) alignment (`adkim=s`),
|
||||
but chatmail does not rely on DMARC and does not consult the sender policy published in DMARC records.
|
||||
Other legacy authentication mechanisms such as [iprev](https://www.rfc-editor.org/rfc/rfc8601#section-2.7.3)
|
||||
and [SPF](https://www.rfc-editor.org/rfc/rfc7208) are also not taken into account.
|
||||
If there is no valid DKIM signature on the incoming email,
|
||||
the sender receives a "5.7.1 No valid DKIM signature found" error.
|
||||
|
||||
Outgoing emails must be sent over authenticated connection
|
||||
with envelope MAIL FROM (return path) corresponding to the login.
|
||||
This is ensured by Postfix which maps login username
|
||||
to MAIL FROM with
|
||||
[`smtpd_sender_login_maps`](https://www.postfix.org/postconf.5.html#smtpd_sender_login_maps)
|
||||
and rejects incorrectly authenticated emails with [`reject_sender_login_mismatch`](reject_sender_login_mismatch) policy.
|
||||
`From:` header must correspond to envelope MAIL FROM,
|
||||
this is ensured by `filtermail` proxy.
|
||||
|
||||
## Migrating chatmail server to a new host
|
||||
|
||||
If you want to migrate chatmail from an old machine
|
||||
to a new machine,
|
||||
you can use these steps.
|
||||
They were tested with a linux laptop;
|
||||
you might need to adjust some of the steps to your environment.
|
||||
|
||||
Let's assume that your `mail_domain` is `mail.example.org`,
|
||||
all involved machines run Debian 12,
|
||||
your old server's IP address is `13.37.13.37`,
|
||||
and your new server's IP address is `13.12.23.42`.
|
||||
|
||||
During the guide, you might get a warning about changed SSH Host keys;
|
||||
in this case, just run `ssh-keygen -R "mail.example.org"` as recommended
|
||||
to make sure you can connect with SSH.
|
||||
|
||||
1. First, copy `/var/lib/acme` to the new server with
|
||||
`ssh root@13.37.13.37 tar c /var/lib/acme | ssh root@13.12.23.42 tar x -C /var/lib/`.
|
||||
This transfers your TLS certificate.
|
||||
|
||||
2. You should also copy `/etc/dkimkeys` to the new server with
|
||||
`ssh root@13.37.13.37 tar c /etc/dkimkeys | ssh root@13.12.23.42 tar x -C /etc/`
|
||||
so the DKIM DNS record stays correct.
|
||||
|
||||
3. On the new server, run `chown root: -R /var/lib/acme` and `chown opendkim: -R /etc/dkimkeys` to make sure the permissions are correct.
|
||||
|
||||
4. Run `cmdeploy run --disable-mail --ssh-host 13.12.23.42` to install chatmail on the new machine.
|
||||
postfix and dovecot are disabled for now,
|
||||
we will enable them later.
|
||||
|
||||
5. Now, point DNS to the new IP addresses.
|
||||
|
||||
You can already remove the old IP addresses from DNS.
|
||||
Existing Delta Chat users will still be able to connect
|
||||
to the old server, send and receive messages,
|
||||
but new users will fail to create new profiles
|
||||
with your chatmail server.
|
||||
|
||||
If other servers try to deliver messages to your new server they will fail,
|
||||
but normally email servers will retry delivering messages
|
||||
for at least a week, so messages will not be lost.
|
||||
|
||||
6. Now you can run `cmdeploy run --disable-mail --ssh-host 13.37.13.37` to disable your old server.
|
||||
|
||||
Now your users will notice the migration
|
||||
and will not be able to send or receive messages
|
||||
until the migration is completed.
|
||||
|
||||
7. After everything is stopped,
|
||||
you can copy the `/home/vmail/mail` directory to the new server.
|
||||
It includes all user data, messages, password hashes, etc.
|
||||
|
||||
Just run: `ssh root@13.37.13.37 tar c /home/vmail/mail | ssh root@13.12.23.42 tar x -C /home/vmail/`
|
||||
|
||||
After this, your new server has all the necessary files to start operating :)
|
||||
|
||||
8. To be sure the permissions are still fine,
|
||||
run `chown vmail: -R /home/vmail` on the new server.
|
||||
|
||||
9. Finally, you can run `cmdeploy run` to turn on chatmail on the new server.
|
||||
Your users can continue using the chatmail server,
|
||||
and messages which were sent after step 6. should arrive now.
|
||||
Voilà!
|
||||
|
||||
## Setting up a reverse proxy
|
||||
|
||||
A chatmail server does not depend on the client IP address
|
||||
for its operation, so it can be run behind a reverse proxy.
|
||||
This will not even affect incoming mail authentication
|
||||
as DKIM only checks the cryptographic signature
|
||||
of the message and does not use the IP address as the input.
|
||||
|
||||
For example, you may want to self-host your chatmail server
|
||||
and only use hosted VPS to provide a public IP address
|
||||
for client connections and incoming mail.
|
||||
You can connect chatmail server to VPS
|
||||
using a tunnel protocol
|
||||
such as [WireGuard](https://www.wireguard.com/)
|
||||
and setup a reverse proxy on a VPS
|
||||
to forward connections to the chatmail server
|
||||
over the tunnel.
|
||||
You can also setup multiple reverse proxies
|
||||
for your chatmail server in different networks
|
||||
to ensure your server is reachable even when
|
||||
one of the IPs becomes inaccessible due to
|
||||
hosting or routing problems.
|
||||
|
||||
Note that your server still needs
|
||||
to be able to make outgoing connections on port 25
|
||||
to send messages outside.
|
||||
|
||||
To setup a reverse proxy
|
||||
(or rather Destination NAT, DNAT)
|
||||
for your chatmail server,
|
||||
put the following configuration in `/etc/nftables.conf`:
|
||||
```
|
||||
#!/usr/sbin/nft -f
|
||||
|
||||
flush ruleset
|
||||
|
||||
define wan = eth0
|
||||
|
||||
# Which ports to proxy.
|
||||
#
|
||||
# Note that SSH is not proxied
|
||||
# so it is possible to log into the proxy server
|
||||
# and not the original one.
|
||||
define ports = { smtp, http, https, imap, imaps, submission, submissions }
|
||||
|
||||
# The host we want to proxy to.
|
||||
define ipv4_address = AAA.BBB.CCC.DDD
|
||||
define ipv6_address = [XXX::1]
|
||||
|
||||
table ip nat {
|
||||
chain prerouting {
|
||||
type nat hook prerouting priority dstnat; policy accept;
|
||||
iif $wan tcp dport $ports dnat to $ipv4_address
|
||||
}
|
||||
|
||||
chain postrouting {
|
||||
type nat hook postrouting priority 0;
|
||||
|
||||
oifname $wan masquerade
|
||||
}
|
||||
}
|
||||
|
||||
table ip6 nat {
|
||||
chain prerouting {
|
||||
type nat hook prerouting priority dstnat; policy accept;
|
||||
iif $wan tcp dport $ports dnat to $ipv6_address
|
||||
}
|
||||
|
||||
chain postrouting {
|
||||
type nat hook postrouting priority 0;
|
||||
|
||||
oifname $wan masquerade
|
||||
}
|
||||
}
|
||||
|
||||
table inet filter {
|
||||
chain input {
|
||||
type filter hook input priority filter; policy drop;
|
||||
|
||||
# Accept ICMP.
|
||||
# It is especially important to accept ICMPv6 ND messages,
|
||||
# otherwise IPv6 connectivity breaks.
|
||||
icmp type { echo-request } accept
|
||||
icmpv6 type { echo-request, nd-neighbor-solicit, nd-router-advert, nd-neighbor-advert } accept
|
||||
|
||||
# Allow incoming SSH connections.
|
||||
tcp dport { ssh } accept
|
||||
|
||||
ct state established accept
|
||||
}
|
||||
chain forward {
|
||||
type filter hook forward priority filter; policy drop;
|
||||
|
||||
ct state established accept
|
||||
ip daddr $ipv4_address counter accept
|
||||
ip6 daddr $ipv6_address counter accept
|
||||
}
|
||||
chain output {
|
||||
type filter hook output priority filter;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Run `systemctl enable nftables.service`
|
||||
to ensure configuration is reloaded when the proxy server reboots.
|
||||
|
||||
Uncomment in `/etc/sysctl.conf` the following two lines:
|
||||
|
||||
```
|
||||
net.ipv4.ip_forward=1
|
||||
net.ipv6.conf.all.forwarding=1
|
||||
```
|
||||
|
||||
Then reboot the server or do `sysctl -p` and `nft -f /etc/nftables.conf`.
|
||||
|
||||
Once proxy server is set up,
|
||||
you can add its IP address to the DNS.
|
||||
|
||||
15
RELEASE.md
Normal file
15
RELEASE.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# Releasing a new version of chatmail relay
|
||||
|
||||
For example, to release version 1.9.0 of chatmail relay, do the following steps.
|
||||
|
||||
1. Update the changelog: `git cliff --unreleased --tag 1.9.0 --prepend CHANGELOG.md` or `git cliff -u -t 1.9.0 -p CHANGELOG.md`.
|
||||
|
||||
2. Open the changelog in the editor, edit it if required.
|
||||
|
||||
3. Commit the changes to the changelog with a commit message `chore(release): prepare for 1.9.0`.
|
||||
|
||||
3. Tag the release: `git tag --annotate 1.9.0`.
|
||||
|
||||
4. Push the release tag: `git push origin 1.9.0`.
|
||||
|
||||
5. Create a GitHub release: `gh release create 1.9.0`.
|
||||
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "chatmaild"
|
||||
version = "0.2"
|
||||
version = "0.3"
|
||||
dependencies = [
|
||||
"aiosmtpd",
|
||||
"iniconfig",
|
||||
@@ -12,7 +12,7 @@ dependencies = [
|
||||
"deltachat-rpc-client",
|
||||
"filelock",
|
||||
"requests",
|
||||
"crypt-r",
|
||||
"crypt-r >= 3.13.1 ; python_version >= '3.11'",
|
||||
]
|
||||
|
||||
[tool.setuptools]
|
||||
@@ -24,11 +24,10 @@ where = ['src']
|
||||
[project.scripts]
|
||||
doveauth = "chatmaild.doveauth:main"
|
||||
chatmail-metadata = "chatmaild.metadata:main"
|
||||
filtermail = "chatmaild.filtermail:main"
|
||||
echobot = "chatmaild.echo:main"
|
||||
chatmail-metrics = "chatmaild.metrics:main"
|
||||
delete_inactive_users = "chatmaild.delete_inactive_users:main"
|
||||
chatmail-expire = "chatmaild.expire:main"
|
||||
chatmail-fsreport = "chatmaild.fsreport:main"
|
||||
lastlogin = "chatmaild.lastlogin:main"
|
||||
turnserver = "chatmaild.turnserver:main"
|
||||
|
||||
[project.entry-points.pytest11]
|
||||
"chatmaild.testplugin" = "chatmaild.tests.plugin"
|
||||
@@ -48,6 +47,9 @@ lint.select = [
|
||||
"PLE", # Pylint Error
|
||||
"PLW", # Pylint Warning
|
||||
]
|
||||
lint.ignore = [
|
||||
"PLC0415" # import-outside-top-level
|
||||
]
|
||||
|
||||
[tool.tox]
|
||||
legacy_tox_ini = """
|
||||
@@ -67,5 +69,7 @@ commands =
|
||||
[testenv]
|
||||
deps = pytest
|
||||
pdbpp
|
||||
pytest-localserver
|
||||
execnet
|
||||
commands = pytest -v -rsXx {posargs}
|
||||
"""
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
"""Generated from deltachat, draft-ietf-lamps-header-protection, and
|
||||
encrypted_subject localizations in
|
||||
https://github.com/thunderbird/thunderbird-android/
|
||||
|
||||
"""
|
||||
|
||||
common_encrypted_subjects = {
|
||||
"...",
|
||||
"[...]",
|
||||
"암호화된 메시지",
|
||||
"Ĉifrita mesaĝo",
|
||||
"Courriel chiffré",
|
||||
"Dulrituð skilaboð",
|
||||
"Encrypted Message",
|
||||
"Fersifere berjocht",
|
||||
"Kemennadenn enrineget",
|
||||
"Krüptitud kiri",
|
||||
"Krypterat meddelande",
|
||||
"Krypteret besked",
|
||||
"Kryptert melding",
|
||||
"Mensagem criptografada",
|
||||
"Mensagem encriptada",
|
||||
"Mensaje cifrado",
|
||||
"Mensaxe cifrada",
|
||||
"Mesaj Criptat",
|
||||
"Mesazh i Fshehtëzuar",
|
||||
"Messaggio criptato",
|
||||
"Messaghju cifratu",
|
||||
"Missatge encriptat",
|
||||
"Neges wedi'i Hamgryptio",
|
||||
"Pesan terenkripsi",
|
||||
"Salattu viesti",
|
||||
"Şifreli İleti",
|
||||
"Šifrēta ziņa",
|
||||
"Šifrirana poruka",
|
||||
"Šifrirano sporočilo",
|
||||
"Šifruotas laiškas",
|
||||
"Tin nhắn được mã hóa",
|
||||
"Titkosított üzenet",
|
||||
"Verschlüsselte Nachricht",
|
||||
"Versleuteld bericht",
|
||||
"Zašifrovaná zpráva",
|
||||
"Zaszyfrowana wiadomość",
|
||||
"Zifratu mezua",
|
||||
"Κρυπτογραφημένο μήνυμα",
|
||||
"Зашифроване повідомлення",
|
||||
"Зашифрованное сообщение",
|
||||
"Зашыфраваны ліст",
|
||||
"Криптирано съобщение",
|
||||
"Шифрована порука",
|
||||
"დაშიფრული წერილი",
|
||||
"הודעה מוצפנת",
|
||||
"پیام رمزنگاریشده",
|
||||
"رسالة مشفّرة",
|
||||
"എൻക്രിപ്റ്റുചെയ്ത സന്ദേശം",
|
||||
"加密邮件",
|
||||
"已加密的訊息",
|
||||
"暗号化されたメッセージ",
|
||||
}
|
||||
@@ -1,38 +1,54 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import iniconfig
|
||||
|
||||
from chatmaild.user import User
|
||||
|
||||
echobot_password_path = Path("/run/echobot/password")
|
||||
|
||||
|
||||
def read_config(inipath):
|
||||
assert Path(inipath).exists(), inipath
|
||||
cfg = iniconfig.IniConfig(inipath)
|
||||
params = cfg.sections["params"]
|
||||
return Config(inipath, params=params)
|
||||
default_config_content = get_default_config_content(params["mail_domain"])
|
||||
df_params = iniconfig.IniConfig("ini", data=default_config_content)["params"]
|
||||
new_params = dict(df_params.items())
|
||||
new_params.update(params)
|
||||
return Config(inipath, params=new_params)
|
||||
|
||||
|
||||
class Config:
|
||||
def __init__(self, inipath, params):
|
||||
self._inipath = inipath
|
||||
self.mail_domain = params["mail_domain"]
|
||||
self.max_user_send_per_minute = int(params["max_user_send_per_minute"])
|
||||
self.max_user_send_per_minute = int(params.get("max_user_send_per_minute", 60))
|
||||
self.max_user_send_burst_size = int(params.get("max_user_send_burst_size", 10))
|
||||
self.max_mailbox_size = params["max_mailbox_size"]
|
||||
self.max_message_size = int(params.get("max_message_size", "31457280"))
|
||||
self.delete_mails_after = params["delete_mails_after"]
|
||||
self.delete_large_after = params["delete_large_after"]
|
||||
self.delete_inactive_users_after = int(params["delete_inactive_users_after"])
|
||||
self.username_min_length = int(params["username_min_length"])
|
||||
self.username_max_length = int(params["username_max_length"])
|
||||
self.password_min_length = int(params["password_min_length"])
|
||||
self.passthrough_senders = params["passthrough_senders"].split()
|
||||
self.passthrough_recipients = params["passthrough_recipients"].split()
|
||||
self.filtermail_smtp_port = int(params["filtermail_smtp_port"])
|
||||
self.postfix_reinject_port = int(params["postfix_reinject_port"])
|
||||
self.www_folder = params.get("www_folder", "")
|
||||
self.filtermail_smtp_port = int(params.get("filtermail_smtp_port", "10080"))
|
||||
self.filtermail_smtp_port_incoming = int(
|
||||
params.get("filtermail_smtp_port_incoming", "10081")
|
||||
)
|
||||
self.postfix_reinject_port = int(params.get("postfix_reinject_port", "10025"))
|
||||
self.postfix_reinject_port_incoming = int(
|
||||
params.get("postfix_reinject_port_incoming", "10026")
|
||||
)
|
||||
self.mtail_address = params.get("mtail_address")
|
||||
self.disable_ipv6 = params.get("disable_ipv6", "false").lower() == "true"
|
||||
self.addr_v4 = os.environ.get("CHATMAIL_ADDR_V4", "")
|
||||
self.addr_v6 = os.environ.get("CHATMAIL_ADDR_V6", "")
|
||||
self.acme_email = params.get("acme_email", "")
|
||||
self.imap_rawlog = params.get("imap_rawlog", "false").lower() == "true"
|
||||
self.imap_compress = params.get("imap_compress", "false").lower() == "true"
|
||||
if "iroh_relay" not in params:
|
||||
self.iroh_relay = "https://" + params["mail_domain"]
|
||||
self.enable_iroh_relay = True
|
||||
@@ -44,6 +60,31 @@ class Config:
|
||||
self.privacy_pdo = params.get("privacy_pdo")
|
||||
self.privacy_supervisor = params.get("privacy_supervisor")
|
||||
|
||||
# TLS certificate management.
|
||||
# If tls_external_cert_and_key is set, use externally managed certs.
|
||||
# Otherwise derived from the domain name:
|
||||
# - Domains starting with "_" use self-signed certificates
|
||||
# - All other domains use ACME.
|
||||
external = params.get("tls_external_cert_and_key", "").strip()
|
||||
|
||||
if external:
|
||||
parts = external.split()
|
||||
if len(parts) != 2:
|
||||
raise ValueError(
|
||||
"tls_external_cert_and_key must have two space-separated"
|
||||
" paths: CERT_PATH KEY_PATH"
|
||||
)
|
||||
self.tls_cert_mode = "external"
|
||||
self.tls_cert_path, self.tls_key_path = parts
|
||||
elif self.mail_domain.startswith("_"):
|
||||
self.tls_cert_mode = "self"
|
||||
self.tls_cert_path = "/etc/ssl/certs/mailserver.pem"
|
||||
self.tls_key_path = "/etc/ssl/private/mailserver.key"
|
||||
else:
|
||||
self.tls_cert_mode = "acme"
|
||||
self.tls_cert_path = f"/var/lib/acme/live/{self.mail_domain}/fullchain"
|
||||
self.tls_key_path = f"/var/lib/acme/live/{self.mail_domain}/privkey"
|
||||
|
||||
# deprecated option
|
||||
mbdir = params.get("mailboxes_dir", f"/home/vmail/mail/{self.mail_domain}")
|
||||
self.mailboxes_dir = Path(mbdir.strip())
|
||||
@@ -54,21 +95,23 @@ class Config:
|
||||
def _getbytefile(self):
|
||||
return open(self._inipath, "rb")
|
||||
|
||||
def get_user(self, addr):
|
||||
def get_user(self, addr) -> User:
|
||||
if not addr or "@" not in addr or "/" in addr:
|
||||
raise ValueError(f"invalid address {addr!r}")
|
||||
|
||||
maildir = self.mailboxes_dir.joinpath(addr)
|
||||
if addr.startswith("echo@"):
|
||||
password_path = echobot_password_path
|
||||
else:
|
||||
password_path = maildir.joinpath("password")
|
||||
password_path = maildir.joinpath("password")
|
||||
|
||||
return User(maildir, addr, password_path, uid="vmail", gid="vmail")
|
||||
|
||||
|
||||
def write_initial_config(inipath, mail_domain, overrides):
|
||||
"""Write out default config file, using the specified config value overrides."""
|
||||
content = get_default_config_content(mail_domain, **overrides)
|
||||
inipath.write_text(content)
|
||||
|
||||
|
||||
def get_default_config_content(mail_domain, **overrides):
|
||||
from importlib.resources import files
|
||||
|
||||
inidir = files(__package__).joinpath("ini")
|
||||
@@ -100,7 +143,7 @@ def write_initial_config(inipath, mail_domain, overrides):
|
||||
lines = []
|
||||
for line in content.split("\n"):
|
||||
for key, value in privacy.items():
|
||||
value_lines = value.strip().split("\n")
|
||||
value_lines = value.format(mail_domain=mail_domain).strip().split("\n")
|
||||
if not line.startswith(f"{key} =") or not value_lines:
|
||||
continue
|
||||
if len(value_lines) == 1:
|
||||
@@ -113,5 +156,4 @@ def write_initial_config(inipath, mail_domain, overrides):
|
||||
else:
|
||||
lines.append(line)
|
||||
content = "\n".join(lines)
|
||||
|
||||
inipath.write_text(content)
|
||||
return content
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
"""
|
||||
Remove inactive users
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
|
||||
from .config import read_config
|
||||
|
||||
|
||||
def delete_inactive_users(config):
|
||||
cutoff_date = time.time() - config.delete_inactive_users_after * 86400
|
||||
for addr in os.listdir(config.mailboxes_dir):
|
||||
try:
|
||||
user = config.get_user(addr)
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
read_timestamp = user.get_last_login_timestamp()
|
||||
if read_timestamp and read_timestamp < cutoff_date:
|
||||
path = config.mailboxes_dir.joinpath(addr)
|
||||
assert path == user.maildir
|
||||
shutil.rmtree(path, ignore_errors=True)
|
||||
|
||||
|
||||
def main():
|
||||
(cfgpath,) = sys.argv[1:]
|
||||
config = read_config(cfgpath)
|
||||
delete_inactive_users(config)
|
||||
@@ -22,7 +22,7 @@ class DictProxy:
|
||||
wfile.flush()
|
||||
|
||||
def handle_dovecot_request(self, msg, transactions):
|
||||
# see https://doc.dovecot.org/developer_manual/design/dict_protocol/#dovecot-dict-protocol
|
||||
# see https://doc.dovecot.org/2.3/developer_manual/design/dict_protocol/#dovecot-dict-protocol
|
||||
short_command = msg[0]
|
||||
parts = msg[1:].split("\t")
|
||||
|
||||
|
||||
@@ -1,19 +1,26 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
import crypt_r
|
||||
import filelock
|
||||
|
||||
try:
|
||||
import crypt_r
|
||||
except ImportError:
|
||||
import crypt as crypt_r
|
||||
|
||||
from .config import Config, read_config
|
||||
from .dictproxy import DictProxy
|
||||
from .migrate_db import migrate_from_db_to_maildir
|
||||
|
||||
NOCREATE_FILE = "/etc/chatmail-nocreate"
|
||||
VALID_LOCALPART_RE = re.compile(r"^[a-z0-9._-]+$")
|
||||
|
||||
|
||||
def encrypt_password(password: str):
|
||||
# https://doc.dovecot.org/configuration_manual/authentication/password_schemes/
|
||||
# https://doc.dovecot.org/2.3/configuration_manual/authentication/password_schemes/
|
||||
passhash = crypt_r.crypt(password, crypt_r.METHOD_SHA512)
|
||||
return "{SHA512-CRYPT}" + passhash
|
||||
|
||||
@@ -37,10 +44,6 @@ def is_allowed_to_create(config: Config, user, cleartext_password) -> bool:
|
||||
return False
|
||||
localpart, domain = parts
|
||||
|
||||
if localpart == "echo":
|
||||
# echobot account should not be created in the database
|
||||
return False
|
||||
|
||||
if (
|
||||
len(localpart) > config.username_max_length
|
||||
or len(localpart) < config.username_min_length
|
||||
@@ -53,6 +56,10 @@ def is_allowed_to_create(config: Config, user, cleartext_password) -> bool:
|
||||
)
|
||||
return False
|
||||
|
||||
if not VALID_LOCALPART_RE.match(localpart):
|
||||
logging.warning("localpart %r contains invalid characters", localpart)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
@@ -141,8 +148,13 @@ class AuthDictProxy(DictProxy):
|
||||
if not is_allowed_to_create(self.config, addr, cleartext_password):
|
||||
return
|
||||
|
||||
user.set_password(encrypt_password(cleartext_password))
|
||||
print(f"Created address: {addr}", file=sys.stderr)
|
||||
lock = filelock.FileLock(str(user.password_path) + ".lock", timeout=5)
|
||||
with lock:
|
||||
userdata = user.get_userdb_dict()
|
||||
if userdata:
|
||||
return userdata
|
||||
user.set_password(encrypt_password(cleartext_password))
|
||||
print(f"Created address: {addr}", file=sys.stderr)
|
||||
return user.get_userdb_dict()
|
||||
|
||||
|
||||
|
||||
@@ -1,104 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Advanced echo bot example.
|
||||
|
||||
it will echo back any message that has non-empty text and also supports the /help command.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from deltachat_rpc_client import Bot, DeltaChat, EventType, Rpc, events
|
||||
|
||||
from chatmaild.config import echobot_password_path, read_config
|
||||
from chatmaild.doveauth import encrypt_password
|
||||
from chatmaild.newemail import create_newemail_dict
|
||||
|
||||
hooks = events.HookCollection()
|
||||
|
||||
|
||||
@hooks.on(events.RawEvent)
|
||||
def log_event(event):
|
||||
if event.kind == EventType.INFO:
|
||||
logging.info(event.msg)
|
||||
elif event.kind == EventType.WARNING:
|
||||
logging.warning(event.msg)
|
||||
|
||||
|
||||
@hooks.on(events.RawEvent(EventType.ERROR))
|
||||
def log_error(event):
|
||||
logging.error("%s", event.msg)
|
||||
|
||||
|
||||
@hooks.on(events.MemberListChanged)
|
||||
def on_memberlist_changed(event):
|
||||
logging.info(
|
||||
"member %s was %s", event.member, "added" if event.member_added else "removed"
|
||||
)
|
||||
|
||||
|
||||
@hooks.on(events.GroupImageChanged)
|
||||
def on_group_image_changed(event):
|
||||
logging.info("group image %s", "deleted" if event.image_deleted else "changed")
|
||||
|
||||
|
||||
@hooks.on(events.GroupNameChanged)
|
||||
def on_group_name_changed(event):
|
||||
logging.info(f"group name changed, old name: {event.old_name}")
|
||||
|
||||
|
||||
@hooks.on(events.NewMessage(func=lambda e: not e.command))
|
||||
def echo(event):
|
||||
snapshot = event.message_snapshot
|
||||
if snapshot.is_info:
|
||||
# Ignore info messages
|
||||
return
|
||||
if snapshot.text or snapshot.file:
|
||||
snapshot.chat.send_message(text=snapshot.text, file=snapshot.file)
|
||||
|
||||
|
||||
@hooks.on(events.NewMessage(command="/help"))
|
||||
def help_command(event):
|
||||
snapshot = event.message_snapshot
|
||||
snapshot.chat.send_text("Send me any message and I will echo it back")
|
||||
|
||||
|
||||
def main():
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
path = os.environ.get("PATH")
|
||||
venv_path = sys.argv[0].strip("echobot")
|
||||
os.environ["PATH"] = path + ":" + venv_path
|
||||
with Rpc() as rpc:
|
||||
deltachat = DeltaChat(rpc)
|
||||
system_info = deltachat.get_system_info()
|
||||
logging.info(f"Running deltachat core {system_info.deltachat_core_version}")
|
||||
|
||||
accounts = deltachat.get_all_accounts()
|
||||
account = accounts[0] if accounts else deltachat.add_account()
|
||||
|
||||
bot = Bot(account, hooks)
|
||||
|
||||
config = read_config(sys.argv[1])
|
||||
addr = "echo@" + config.mail_domain
|
||||
|
||||
# Create password file
|
||||
if bot.is_configured():
|
||||
password = bot.account.get_config("mail_pw")
|
||||
else:
|
||||
password = create_newemail_dict(config)["password"]
|
||||
|
||||
echobot_password_path.write_text(encrypt_password(password))
|
||||
# Give the user which doveauth runs as access to the password file.
|
||||
subprocess.check_call(
|
||||
["/usr/bin/setfacl", "-m", "user:vmail:r", echobot_password_path],
|
||||
)
|
||||
|
||||
if not bot.is_configured():
|
||||
bot.configure(addr, password)
|
||||
|
||||
bot.run_forever()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
206
chatmaild/src/chatmaild/expire.py
Normal file
206
chatmaild/src/chatmaild/expire.py
Normal file
@@ -0,0 +1,206 @@
|
||||
"""
|
||||
Expire old messages and addresses.
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
from argparse import ArgumentParser
|
||||
from collections import namedtuple
|
||||
from datetime import datetime
|
||||
from stat import S_ISREG
|
||||
|
||||
from chatmaild.config import read_config
|
||||
|
||||
FileEntry = namedtuple("FileEntry", ("path", "mtime", "size"))
|
||||
|
||||
|
||||
def iter_mailboxes(basedir, maxnum):
|
||||
if not os.path.exists(basedir):
|
||||
print_info(f"no mailboxes found at: {basedir}")
|
||||
return
|
||||
|
||||
for name in os_listdir_if_exists(basedir)[:maxnum]:
|
||||
if "@" in name:
|
||||
yield MailboxStat(basedir + "/" + name)
|
||||
|
||||
|
||||
def get_file_entry(path):
|
||||
"""return a FileEntry or None if the path does not exist or is not a regular file."""
|
||||
try:
|
||||
st = os.stat(path)
|
||||
except FileNotFoundError:
|
||||
return None
|
||||
if not S_ISREG(st.st_mode):
|
||||
return None
|
||||
return FileEntry(path, st.st_mtime, st.st_size)
|
||||
|
||||
|
||||
def os_listdir_if_exists(path):
|
||||
"""return a list of names obtained from os.listdir or an empty list if the path does not exist."""
|
||||
try:
|
||||
return os.listdir(path)
|
||||
except FileNotFoundError:
|
||||
return []
|
||||
|
||||
|
||||
class MailboxStat:
|
||||
last_login = None
|
||||
|
||||
def __init__(self, basedir):
|
||||
self.basedir = str(basedir)
|
||||
self.messages = []
|
||||
self.extrafiles = []
|
||||
self.scandir(self.basedir)
|
||||
|
||||
def scandir(self, folderdir):
|
||||
for name in os_listdir_if_exists(folderdir):
|
||||
path = f"{folderdir}/{name}"
|
||||
if name in ("cur", "new", "tmp"):
|
||||
for msg_name in os_listdir_if_exists(path):
|
||||
entry = get_file_entry(f"{path}/{msg_name}")
|
||||
if entry is not None:
|
||||
self.messages.append(entry)
|
||||
elif os.path.isdir(path):
|
||||
self.scandir(path)
|
||||
else:
|
||||
entry = get_file_entry(path)
|
||||
if entry is not None:
|
||||
self.extrafiles.append(entry)
|
||||
if name == "password":
|
||||
self.last_login = entry.mtime
|
||||
self.extrafiles.sort(key=lambda x: -x.size)
|
||||
|
||||
|
||||
def print_info(msg):
|
||||
print(msg, file=sys.stderr)
|
||||
|
||||
|
||||
class Expiry:
|
||||
def __init__(self, config, dry, now, verbose):
|
||||
self.config = config
|
||||
self.dry = dry
|
||||
self.now = now
|
||||
self.verbose = verbose
|
||||
self.del_mboxes = 0
|
||||
self.all_mboxes = 0
|
||||
self.del_files = 0
|
||||
self.all_files = 0
|
||||
self.start = time.time()
|
||||
|
||||
def remove_mailbox(self, mboxdir):
|
||||
if self.verbose:
|
||||
print_info(f"removing {mboxdir}")
|
||||
if not self.dry:
|
||||
shutil.rmtree(mboxdir)
|
||||
self.del_mboxes += 1
|
||||
|
||||
def remove_file(self, path, mtime=None):
|
||||
if self.verbose:
|
||||
if mtime is not None:
|
||||
date = datetime.fromtimestamp(mtime).strftime("%b %d")
|
||||
print_info(f"removing {date} {path}")
|
||||
else:
|
||||
print_info(f"removing {path}")
|
||||
if not self.dry:
|
||||
try:
|
||||
os.unlink(path)
|
||||
except FileNotFoundError:
|
||||
print_info(f"file not found/vanished {path}")
|
||||
self.del_files += 1
|
||||
|
||||
def process_mailbox_stat(self, mbox):
|
||||
cutoff_without_login = (
|
||||
self.now - int(self.config.delete_inactive_users_after) * 86400
|
||||
)
|
||||
cutoff_mails = self.now - int(self.config.delete_mails_after) * 86400
|
||||
cutoff_large_mails = self.now - int(self.config.delete_large_after) * 86400
|
||||
|
||||
self.all_mboxes += 1
|
||||
changed = False
|
||||
if mbox.last_login and mbox.last_login < cutoff_without_login:
|
||||
self.remove_mailbox(mbox.basedir)
|
||||
return
|
||||
|
||||
mboxname = os.path.basename(mbox.basedir)
|
||||
if self.verbose:
|
||||
date = datetime.fromtimestamp(mbox.last_login) if mbox.last_login else None
|
||||
if date:
|
||||
print_info(f"checking mailbox {date.strftime('%b %d')} {mboxname}")
|
||||
else:
|
||||
print_info(f"checking mailbox (no last_login) {mboxname}")
|
||||
self.all_files += len(mbox.messages)
|
||||
for message in mbox.messages:
|
||||
if message.mtime < cutoff_mails:
|
||||
self.remove_file(message.path, mtime=message.mtime)
|
||||
elif message.size > 200000 and message.mtime < cutoff_large_mails:
|
||||
# we only remove noticed large files (not unnoticed ones in new/)
|
||||
parts = message.path.split("/")
|
||||
if len(parts) >= 2 and parts[-2] == "cur":
|
||||
self.remove_file(message.path, mtime=message.mtime)
|
||||
else:
|
||||
continue
|
||||
changed = True
|
||||
if changed:
|
||||
self.remove_file(f"{mbox.basedir}/maildirsize")
|
||||
|
||||
def get_summary(self):
|
||||
return (
|
||||
f"Removed {self.del_mboxes} out of {self.all_mboxes} mailboxes "
|
||||
f"and {self.del_files} out of {self.all_files} files in existing mailboxes "
|
||||
f"in {time.time() - self.start:2.2f} seconds"
|
||||
)
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""Expire mailboxes and messages according to chatmail config"""
|
||||
parser = ArgumentParser(description=main.__doc__)
|
||||
ini = "/usr/local/lib/chatmaild/chatmail.ini"
|
||||
parser.add_argument(
|
||||
"chatmail_ini",
|
||||
action="store",
|
||||
nargs="?",
|
||||
help=f"path pointing to chatmail.ini file, default: {ini}",
|
||||
default=ini,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--days", action="store", help="assume date to be days older than now"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--maxnum",
|
||||
default=None,
|
||||
action="store",
|
||||
help="maximum number of mailboxes to iterate on",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-v",
|
||||
dest="verbose",
|
||||
action="store_true",
|
||||
help="print out removed files and mailboxes",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--remove",
|
||||
dest="remove",
|
||||
action="store_true",
|
||||
help="actually remove all expired files and dirs",
|
||||
)
|
||||
args = parser.parse_args(args)
|
||||
|
||||
config = read_config(args.chatmail_ini)
|
||||
now = datetime.utcnow().timestamp()
|
||||
if args.days:
|
||||
now = now - 86400 * int(args.days)
|
||||
|
||||
maxnum = int(args.maxnum) if args.maxnum else None
|
||||
exp = Expiry(config, dry=not args.remove, now=now, verbose=args.verbose)
|
||||
for mailbox in iter_mailboxes(str(config.mailboxes_dir), maxnum=maxnum):
|
||||
exp.process_mailbox_stat(mailbox)
|
||||
print(exp.get_summary())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(sys.argv[1:])
|
||||
@@ -1,253 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import asyncio
|
||||
import base64
|
||||
import binascii
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
from email import policy
|
||||
from email.parser import BytesParser
|
||||
from email.utils import parseaddr
|
||||
from smtplib import SMTP as SMTPClient
|
||||
|
||||
from aiosmtpd.controller import Controller
|
||||
|
||||
from .common_encrypted_subjects import common_encrypted_subjects
|
||||
from .config import read_config
|
||||
|
||||
|
||||
def check_openpgp_payload(payload: bytes):
|
||||
"""Checks the OpenPGP payload.
|
||||
|
||||
OpenPGP payload must consist only of PKESK and SKESK packets
|
||||
terminated by a single SEIPD packet.
|
||||
|
||||
Returns True if OpenPGP payload is correct,
|
||||
False otherwise.
|
||||
|
||||
May raise IndexError while trying to read OpenPGP packet header
|
||||
if it is truncated.
|
||||
"""
|
||||
i = 0
|
||||
while i < len(payload):
|
||||
# Only OpenPGP format is allowed.
|
||||
if payload[i] & 0xC0 != 0xC0:
|
||||
return False
|
||||
|
||||
packet_type_id = payload[i] & 0x3F
|
||||
i += 1
|
||||
if payload[i] < 192:
|
||||
# One-octet length.
|
||||
body_len = payload[i]
|
||||
i += 1
|
||||
elif payload[i] < 224:
|
||||
# Two-octet length.
|
||||
body_len = ((payload[i] - 192) << 8) + payload[i + 1] + 192
|
||||
i += 2
|
||||
elif payload[i] == 255:
|
||||
# Five-octet length.
|
||||
body_len = (
|
||||
(payload[i + 1] << 24)
|
||||
| (payload[i + 2] << 16)
|
||||
| (payload[i + 3] << 8)
|
||||
| payload[i + 4]
|
||||
)
|
||||
i += 5
|
||||
else:
|
||||
# Partial body length is not allowed.
|
||||
return False
|
||||
|
||||
i += body_len
|
||||
|
||||
if i == len(payload):
|
||||
# Last packet should be
|
||||
# Symmetrically Encrypted and Integrity Protected Data Packet (SEIPD)
|
||||
#
|
||||
# This is the only place where this function may return `True`.
|
||||
return packet_type_id == 18
|
||||
elif packet_type_id not in [1, 3]:
|
||||
# All packets except the last one must be either
|
||||
# Public-Key Encrypted Session Key Packet (PKESK)
|
||||
# or
|
||||
# Symmetric-Key Encrypted Session Key Packet (SKESK)
|
||||
return False
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def check_armored_payload(payload: str):
|
||||
prefix = "-----BEGIN PGP MESSAGE-----\r\n\r\n"
|
||||
if not payload.startswith(prefix):
|
||||
return False
|
||||
payload = payload.removeprefix(prefix)
|
||||
|
||||
suffix = "-----END PGP MESSAGE-----\r\n\r\n"
|
||||
if not payload.endswith(suffix):
|
||||
return False
|
||||
payload = payload.removesuffix(suffix)
|
||||
|
||||
# Remove CRC24.
|
||||
payload = payload.rpartition("=")[0]
|
||||
|
||||
try:
|
||||
payload = base64.b64decode(payload)
|
||||
except binascii.Error:
|
||||
return False
|
||||
|
||||
try:
|
||||
return check_openpgp_payload(payload)
|
||||
except IndexError:
|
||||
return False
|
||||
|
||||
|
||||
def check_encrypted(message):
|
||||
"""Check that the message is an OpenPGP-encrypted message.
|
||||
|
||||
MIME structure of the message must correspond to <https://www.rfc-editor.org/rfc/rfc3156>.
|
||||
"""
|
||||
if not message.is_multipart():
|
||||
return False
|
||||
if message.get("subject") not in common_encrypted_subjects:
|
||||
return False
|
||||
if message.get_content_type() != "multipart/encrypted":
|
||||
return False
|
||||
parts_count = 0
|
||||
for part in message.iter_parts():
|
||||
# We explicitly check Content-Type of each part later,
|
||||
# but this is to be absolutely sure `get_payload()` returns string and not list.
|
||||
if part.is_multipart():
|
||||
return False
|
||||
|
||||
if parts_count == 0:
|
||||
if part.get_content_type() != "application/pgp-encrypted":
|
||||
return False
|
||||
|
||||
payload = part.get_payload()
|
||||
if payload.strip() != "Version: 1":
|
||||
return False
|
||||
elif parts_count == 1:
|
||||
if part.get_content_type() != "application/octet-stream":
|
||||
return False
|
||||
|
||||
if not check_armored_payload(part.get_payload()):
|
||||
return False
|
||||
else:
|
||||
return False
|
||||
parts_count += 1
|
||||
return True
|
||||
|
||||
|
||||
async def asyncmain_beforequeue(config):
|
||||
port = config.filtermail_smtp_port
|
||||
Controller(BeforeQueueHandler(config), hostname="127.0.0.1", port=port).start()
|
||||
|
||||
|
||||
def recipient_matches_passthrough(recipient, passthrough_recipients):
|
||||
for addr in passthrough_recipients:
|
||||
if recipient == addr:
|
||||
return True
|
||||
if addr[0] == "@" and recipient.endswith(addr):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class BeforeQueueHandler:
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.send_rate_limiter = SendRateLimiter()
|
||||
|
||||
async def handle_MAIL(self, server, session, envelope, address, mail_options):
|
||||
logging.info(f"handle_MAIL from {address}")
|
||||
envelope.mail_from = address
|
||||
max_sent = self.config.max_user_send_per_minute
|
||||
if not self.send_rate_limiter.is_sending_allowed(address, max_sent):
|
||||
return f"450 4.7.1: Too much mail from {address}"
|
||||
|
||||
parts = envelope.mail_from.split("@")
|
||||
if len(parts) != 2:
|
||||
return f"500 Invalid from address <{envelope.mail_from!r}>"
|
||||
|
||||
return "250 OK"
|
||||
|
||||
async def handle_DATA(self, server, session, envelope):
|
||||
logging.info("handle_DATA before-queue")
|
||||
error = self.check_DATA(envelope)
|
||||
if error:
|
||||
return error
|
||||
logging.info("re-injecting the mail that passed checks")
|
||||
client = SMTPClient("localhost", self.config.postfix_reinject_port)
|
||||
client.sendmail(envelope.mail_from, envelope.rcpt_tos, envelope.content)
|
||||
return "250 OK"
|
||||
|
||||
def check_DATA(self, envelope):
|
||||
"""the central filtering function for e-mails."""
|
||||
logging.info(f"Processing DATA message from {envelope.mail_from}")
|
||||
|
||||
message = BytesParser(policy=policy.default).parsebytes(envelope.content)
|
||||
mail_encrypted = check_encrypted(message)
|
||||
|
||||
_, from_addr = parseaddr(message.get("from").strip())
|
||||
envelope_from_domain = from_addr.split("@").pop()
|
||||
|
||||
logging.info(f"mime-from: {from_addr} envelope-from: {envelope.mail_from!r}")
|
||||
if envelope.mail_from.lower() != from_addr.lower():
|
||||
return f"500 Invalid FROM <{from_addr!r}> for <{envelope.mail_from!r}>"
|
||||
|
||||
if mail_encrypted:
|
||||
print("Filtering encrypted mail.", file=sys.stderr)
|
||||
else:
|
||||
print("Filtering unencrypted mail.", file=sys.stderr)
|
||||
|
||||
if envelope.mail_from in self.config.passthrough_senders:
|
||||
return
|
||||
|
||||
passthrough_recipients = self.config.passthrough_recipients
|
||||
|
||||
is_securejoin = message.get("secure-join") in [
|
||||
"vc-request",
|
||||
"vg-request",
|
||||
]
|
||||
if is_securejoin:
|
||||
return
|
||||
|
||||
for recipient in envelope.rcpt_tos:
|
||||
if envelope.mail_from == recipient:
|
||||
# Always allow sending emails to self.
|
||||
continue
|
||||
if recipient_matches_passthrough(recipient, passthrough_recipients):
|
||||
continue
|
||||
res = recipient.split("@")
|
||||
if len(res) != 2:
|
||||
return f"500 Invalid address <{recipient}>"
|
||||
_recipient_addr, recipient_domain = res
|
||||
|
||||
is_outgoing = recipient_domain != envelope_from_domain
|
||||
if is_outgoing and not mail_encrypted:
|
||||
print("Rejected unencrypted mail.", file=sys.stderr)
|
||||
return f"500 Invalid unencrypted mail to <{recipient}>"
|
||||
|
||||
|
||||
class SendRateLimiter:
|
||||
def __init__(self):
|
||||
self.addr2timestamps = {}
|
||||
|
||||
def is_sending_allowed(self, mail_from, max_send_per_minute):
|
||||
last = self.addr2timestamps.setdefault(mail_from, [])
|
||||
now = time.time()
|
||||
last[:] = [ts for ts in last if ts >= (now - 60)]
|
||||
if len(last) <= max_send_per_minute:
|
||||
last.append(now)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
args = sys.argv[1:]
|
||||
assert len(args) == 1
|
||||
config = read_config(args[0])
|
||||
logging.basicConfig(level=logging.WARN)
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
task = asyncmain_beforequeue(config)
|
||||
loop.create_task(task)
|
||||
loop.run_forever()
|
||||
287
chatmaild/src/chatmaild/fsreport.py
Normal file
287
chatmaild/src/chatmaild/fsreport.py
Normal file
@@ -0,0 +1,287 @@
|
||||
"""
|
||||
command line tool to analyze mailbox message storage
|
||||
|
||||
example invocation:
|
||||
|
||||
python -m chatmaild.fsreport /path/to/chatmail.ini
|
||||
|
||||
to show storage summaries for all "cur" folders
|
||||
|
||||
python -m chatmaild.fsreport /path/to/chatmail.ini --mdir cur
|
||||
|
||||
to show storage summaries only for first 1000 mailboxes
|
||||
|
||||
python -m chatmaild.fsreport /path/to/chatmail.ini --maxnum 1000
|
||||
|
||||
to write Prometheus textfile for node_exporter
|
||||
|
||||
python -m chatmaild.fsreport --textfile /var/lib/prometheus/node-exporter/
|
||||
|
||||
writes to /var/lib/prometheus/node-exporter/fsreport.prom
|
||||
|
||||
to also write legacy metrics.py style output (default: /var/www/html/metrics):
|
||||
|
||||
python -m chatmaild.fsreport --textfile /var/lib/prometheus/node-exporter/ --legacy-metrics
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
from argparse import ArgumentParser
|
||||
from datetime import datetime
|
||||
|
||||
from chatmaild.config import read_config
|
||||
from chatmaild.expire import iter_mailboxes
|
||||
|
||||
DAYSECONDS = 24 * 60 * 60
|
||||
MONTHSECONDS = DAYSECONDS * 30
|
||||
|
||||
|
||||
def HSize(size: int):
|
||||
"""Format a size integer as a Human-readable string Kilobyte, Megabyte or Gigabyte"""
|
||||
if size < 10000:
|
||||
return f"{size / 1000:5.2f}K"
|
||||
if size < 1000 * 1000:
|
||||
return f"{size / 1000:5.0f}K"
|
||||
if size < 1000 * 1000 * 1000:
|
||||
return f"{int(size / 1000000):5.0f}M"
|
||||
return f"{size / 1000000000:5.2f}G"
|
||||
|
||||
|
||||
class Report:
|
||||
def __init__(self, now, min_login_age, mdir):
|
||||
self.size_extra = 0
|
||||
self.size_messages = 0
|
||||
self.now = now
|
||||
self.min_login_age = min_login_age
|
||||
self.mdir = mdir
|
||||
|
||||
self.num_ci_logins = self.num_all_logins = 0
|
||||
self.login_buckets = {x: 0 for x in (1, 10, 30, 40, 80, 100, 150)}
|
||||
|
||||
KiB = 1024
|
||||
MiB = 1024 * KiB
|
||||
self.message_size_thresholds = (
|
||||
0,
|
||||
100 * KiB,
|
||||
MiB // 2,
|
||||
1 * MiB,
|
||||
2 * MiB,
|
||||
5 * MiB,
|
||||
10 * MiB,
|
||||
)
|
||||
self.message_buckets = {x: 0 for x in self.message_size_thresholds}
|
||||
self.message_count_buckets = {x: 0 for x in self.message_size_thresholds}
|
||||
|
||||
def process_mailbox_stat(self, mailbox):
|
||||
# categorize login times
|
||||
last_login = mailbox.last_login
|
||||
if last_login:
|
||||
self.num_all_logins += 1
|
||||
if os.path.basename(mailbox.basedir)[:3] == "ci-":
|
||||
self.num_ci_logins += 1
|
||||
else:
|
||||
for days in self.login_buckets:
|
||||
if last_login >= self.now - days * DAYSECONDS:
|
||||
self.login_buckets[days] += 1
|
||||
|
||||
cutoff_login_date = self.now - self.min_login_age * DAYSECONDS
|
||||
if last_login and last_login <= cutoff_login_date:
|
||||
# categorize message sizes
|
||||
for size in self.message_buckets:
|
||||
for msg in mailbox.messages:
|
||||
if msg.size >= size:
|
||||
if self.mdir and f"/{self.mdir}/" not in msg.path:
|
||||
continue
|
||||
self.message_buckets[size] += msg.size
|
||||
self.message_count_buckets[size] += 1
|
||||
|
||||
self.size_messages += sum(entry.size for entry in mailbox.messages)
|
||||
self.size_extra += sum(entry.size for entry in mailbox.extrafiles)
|
||||
|
||||
def dump_summary(self):
|
||||
all_messages = self.size_messages
|
||||
print()
|
||||
print("## Mailbox storage use analysis")
|
||||
print(f"Mailbox data total size: {HSize(self.size_extra + all_messages)}")
|
||||
print(f"Messages total size : {HSize(all_messages)}")
|
||||
try:
|
||||
percent = self.size_extra / (self.size_extra + all_messages) * 100
|
||||
except ZeroDivisionError:
|
||||
percent = 100
|
||||
print(f"Extra files : {HSize(self.size_extra)} ({percent:.2f}%)")
|
||||
|
||||
print()
|
||||
if self.min_login_age:
|
||||
print(f"### Message storage for {self.min_login_age} days old logins")
|
||||
|
||||
pref = f"[{self.mdir}] " if self.mdir else ""
|
||||
for minsize, sumsize in self.message_buckets.items():
|
||||
count = self.message_count_buckets[minsize]
|
||||
percent = (sumsize / all_messages * 100) if all_messages else 0
|
||||
print(
|
||||
f"{pref}larger than {HSize(minsize)}: {HSize(sumsize)} ({percent:.2f}%), {count} msgs"
|
||||
)
|
||||
|
||||
user_logins = self.num_all_logins - self.num_ci_logins
|
||||
|
||||
def p(num):
|
||||
return f"({num / user_logins * 100:2.2f}%)" if user_logins else "100%"
|
||||
|
||||
print()
|
||||
print(f"## Login stats, from date reference {datetime.fromtimestamp(self.now)}")
|
||||
print(f"all: {HSize(self.num_all_logins)}")
|
||||
print(f"non-ci: {HSize(user_logins)}")
|
||||
print(f"ci: {HSize(self.num_ci_logins)}")
|
||||
for days, active in self.login_buckets.items():
|
||||
print(f"last {days:3} days: {HSize(active)} {p(active)}")
|
||||
|
||||
def _write_atomic(self, filepath, content):
|
||||
"""Atomically write content to filepath via tmp+rename."""
|
||||
dirpath = os.path.dirname(os.path.abspath(filepath))
|
||||
fd, tmppath = tempfile.mkstemp(dir=dirpath, suffix=".tmp")
|
||||
try:
|
||||
with os.fdopen(fd, "w") as f:
|
||||
f.write(content)
|
||||
os.chmod(tmppath, 0o644)
|
||||
os.rename(tmppath, filepath)
|
||||
except BaseException:
|
||||
try:
|
||||
os.unlink(tmppath)
|
||||
except OSError:
|
||||
pass
|
||||
raise
|
||||
|
||||
def dump_textfile(self, filepath):
|
||||
"""Dump metrics in Prometheus exposition format."""
|
||||
lines = []
|
||||
|
||||
lines.append("# HELP chatmail_storage_bytes Mailbox storage in bytes.")
|
||||
lines.append("# TYPE chatmail_storage_bytes gauge")
|
||||
lines.append(f'chatmail_storage_bytes{{kind="messages"}} {self.size_messages}')
|
||||
lines.append(f'chatmail_storage_bytes{{kind="extra"}} {self.size_extra}')
|
||||
total = self.size_extra + self.size_messages
|
||||
lines.append(f'chatmail_storage_bytes{{kind="total"}} {total}')
|
||||
|
||||
lines.append("# HELP chatmail_messages_bytes Sum of msg bytes >= threshold.")
|
||||
lines.append("# TYPE chatmail_messages_bytes gauge")
|
||||
for minsize, sumsize in self.message_buckets.items():
|
||||
lines.append(f'chatmail_messages_bytes{{min_size="{minsize}"}} {sumsize}')
|
||||
|
||||
lines.append("# HELP chatmail_messages_count Number of msgs >= size threshold.")
|
||||
lines.append("# TYPE chatmail_messages_count gauge")
|
||||
for minsize, count in self.message_count_buckets.items():
|
||||
lines.append(f'chatmail_messages_count{{min_size="{minsize}"}} {count}')
|
||||
|
||||
lines.append("# HELP chatmail_accounts Number of accounts.")
|
||||
lines.append("# TYPE chatmail_accounts gauge")
|
||||
user_logins = self.num_all_logins - self.num_ci_logins
|
||||
lines.append(f'chatmail_accounts{{kind="all"}} {self.num_all_logins}')
|
||||
lines.append(f'chatmail_accounts{{kind="ci"}} {self.num_ci_logins}')
|
||||
lines.append(f'chatmail_accounts{{kind="user"}} {user_logins}')
|
||||
|
||||
lines.append(
|
||||
"# HELP chatmail_accounts_active Non-CI accounts active within N days."
|
||||
)
|
||||
lines.append("# TYPE chatmail_accounts_active gauge")
|
||||
for days, active in self.login_buckets.items():
|
||||
lines.append(f'chatmail_accounts_active{{days="{days}"}} {active}')
|
||||
|
||||
self._write_atomic(filepath, "\n".join(lines) + "\n")
|
||||
|
||||
def dump_compat_textfile(self, filepath):
|
||||
"""Dump legacy metrics.py style metrics."""
|
||||
user_logins = self.num_all_logins - self.num_ci_logins
|
||||
lines = [
|
||||
"# HELP total number of accounts",
|
||||
"# TYPE accounts gauge",
|
||||
f"accounts {self.num_all_logins}",
|
||||
"# HELP number of CI accounts",
|
||||
"# TYPE ci_accounts gauge",
|
||||
f"ci_accounts {self.num_ci_logins}",
|
||||
"# HELP number of non-CI accounts",
|
||||
"# TYPE nonci_accounts gauge",
|
||||
f"nonci_accounts {user_logins}",
|
||||
]
|
||||
self._write_atomic(filepath, "\n".join(lines) + "\n")
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""Report about filesystem storage usage of all mailboxes and messages"""
|
||||
parser = ArgumentParser(description=main.__doc__)
|
||||
ini = "/usr/local/lib/chatmaild/chatmail.ini"
|
||||
parser.add_argument(
|
||||
"chatmail_ini",
|
||||
action="store",
|
||||
nargs="?",
|
||||
help=f"path pointing to chatmail.ini file, default: {ini}",
|
||||
default=ini,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--days",
|
||||
default=0,
|
||||
action="store",
|
||||
help="assume date to be DAYS older than now",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--min-login-age",
|
||||
default=0,
|
||||
metavar="DAYS",
|
||||
dest="min_login_age",
|
||||
action="store",
|
||||
help="only sum up message size if last login is at least DAYS days old",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--mdir",
|
||||
metavar="{cur,new,tmp}",
|
||||
action="store",
|
||||
help="only consider messages in specified Maildir subdirectory for summary",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--maxnum",
|
||||
default=None,
|
||||
action="store",
|
||||
help="maximum number of mailboxes to iterate on",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--textfile",
|
||||
metavar="PATH",
|
||||
default=None,
|
||||
help="write Prometheus textfile to PATH (directory or file); "
|
||||
"if PATH is a directory, writes 'fsreport.prom' inside it",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--legacy-metrics",
|
||||
metavar="FILENAME",
|
||||
nargs="?",
|
||||
const="/var/www/html/metrics",
|
||||
default=None,
|
||||
help="write legacy metrics.py textfile (default: /var/www/html/metrics)",
|
||||
)
|
||||
|
||||
args = parser.parse_args(args)
|
||||
|
||||
config = read_config(args.chatmail_ini)
|
||||
|
||||
now = datetime.utcnow().timestamp()
|
||||
if args.days:
|
||||
now = now - 86400 * int(args.days)
|
||||
|
||||
maxnum = int(args.maxnum) if args.maxnum else None
|
||||
rep = Report(now=now, min_login_age=int(args.min_login_age), mdir=args.mdir)
|
||||
for mbox in iter_mailboxes(str(config.mailboxes_dir), maxnum=maxnum):
|
||||
rep.process_mailbox_stat(mbox)
|
||||
if args.textfile:
|
||||
path = args.textfile
|
||||
if os.path.isdir(path):
|
||||
path = os.path.join(path, "fsreport.prom")
|
||||
rep.dump_textfile(path)
|
||||
if args.legacy_metrics:
|
||||
rep.dump_compat_textfile(args.legacy_metrics)
|
||||
if not args.textfile and not args.legacy_metrics:
|
||||
rep.dump_summary()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -11,11 +11,14 @@ mail_domain = {mail_domain}
|
||||
# Restrictions on user addresses
|
||||
#
|
||||
|
||||
# how many mails a user can send out per minute
|
||||
# email sending rate per user and minute
|
||||
max_user_send_per_minute = 60
|
||||
|
||||
# per-user max burst size for sending rate limiting (GCRA bucket capacity)
|
||||
max_user_send_burst_size = 10
|
||||
|
||||
# maximum mailbox size of a chatmail address
|
||||
max_mailbox_size = 100M
|
||||
max_mailbox_size = 500M
|
||||
|
||||
# maximum message size for an e-mail in bytes
|
||||
max_message_size = 31457280
|
||||
@@ -23,6 +26,9 @@ max_message_size = 31457280
|
||||
# days after which mails are unconditionally deleted
|
||||
delete_mails_after = 20
|
||||
|
||||
# days after which large messages (>200k) are unconditionally deleted
|
||||
delete_large_after = 7
|
||||
|
||||
# days after which users without a successful login are deleted (database and mails)
|
||||
delete_inactive_users_after = 90
|
||||
|
||||
@@ -40,21 +46,36 @@ passthrough_senders =
|
||||
|
||||
# list of e-mail recipients for which to accept outbound un-encrypted mails
|
||||
# (space-separated, item may start with "@" to whitelist whole recipient domains)
|
||||
passthrough_recipients = xstore@testrun.org
|
||||
passthrough_recipients =
|
||||
|
||||
# Use externally managed TLS certificates instead of built-in acmetool.
|
||||
# Paths refer to files on the deployment server (not the build machine).
|
||||
# Both files must already exist before running cmdeploy.
|
||||
# Certificate renewal is your responsibility; changed files are
|
||||
# picked up automatically by all relay services.
|
||||
# tls_external_cert_and_key = /path/to/fullchain.pem /path/to/privkey.pem
|
||||
|
||||
# path to www directory - documented here: https://chatmail.at/doc/relay/getting_started.html#custom-web-pages
|
||||
#www_folder = www
|
||||
|
||||
#
|
||||
# Deployment Details
|
||||
#
|
||||
|
||||
# where the filtermail SMTP service listens
|
||||
# SMTP outgoing filtermail and reinjection
|
||||
filtermail_smtp_port = 10080
|
||||
|
||||
# postfix accepts on the localhost reinject SMTP port
|
||||
postfix_reinject_port = 10025
|
||||
|
||||
# SMTP incoming filtermail and reinjection
|
||||
filtermail_smtp_port_incoming = 10081
|
||||
postfix_reinject_port_incoming = 10026
|
||||
|
||||
# if set to "True" IPv6 is disabled
|
||||
disable_ipv6 = False
|
||||
|
||||
# Your email adress, which will be used in acmetool to manage Let's Encrypt SSL certificates
|
||||
acme_email =
|
||||
|
||||
# Defaults to https://iroh.{{mail_domain}} and running `iroh-relay` on the chatmail
|
||||
# service.
|
||||
# If you set it to anything else, the service will be disabled
|
||||
@@ -88,6 +109,12 @@ disable_ipv6 = False
|
||||
# so use this option with caution on production servers.
|
||||
imap_rawlog = false
|
||||
|
||||
# set to true if you want to enable the IMAP COMPRESS Extension,
|
||||
# which allows IMAP connections to be efficiently compressed.
|
||||
# WARNING: Enabling this makes it impossible to hibernate IMAP
|
||||
# processes which will result in much higher memory/RAM usage.
|
||||
imap_compress = false
|
||||
|
||||
|
||||
#
|
||||
# Privacy Policy
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
|
||||
[privacy]
|
||||
|
||||
passthrough_recipients = privacy@testrun.org xstore@testrun.org
|
||||
passthrough_recipients = privacy@testrun.org echo@{mail_domain}
|
||||
|
||||
privacy_postal =
|
||||
Merlinux GmbH, Represented by the managing director H. Krekel,
|
||||
|
||||
@@ -13,8 +13,6 @@ class LastLoginDictProxy(DictProxy):
|
||||
keyname = parts[1].split("/")
|
||||
value = parts[2] if len(parts) > 2 else ""
|
||||
if keyname[0] == "shared" and keyname[1] == "last-login":
|
||||
if addr.startswith("echo@"):
|
||||
return True
|
||||
addr = keyname[2]
|
||||
timestamp = int(value)
|
||||
user = self.config.get_user(addr)
|
||||
|
||||
@@ -1,14 +1,24 @@
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
from contextlib import contextmanager
|
||||
|
||||
from .config import read_config
|
||||
from .dictproxy import DictProxy
|
||||
from .filedict import FileDict
|
||||
from .notifier import Notifier
|
||||
from .turnserver import turn_credentials
|
||||
|
||||
|
||||
def _is_valid_token_timestamp(timestamp, now):
|
||||
# Token if invalid after 90 days
|
||||
# or if the timestamp is in the future.
|
||||
return timestamp > now - 3600 * 24 * 90 and timestamp < now + 60
|
||||
|
||||
|
||||
class Metadata:
|
||||
# each SETMETADATA on this key appends to a list of unique device tokens
|
||||
# each SETMETADATA on this key appends to dictionary
|
||||
# mapping of unique device tokens
|
||||
# which only ever get removed if the upstream indicates the token is invalid
|
||||
DEVICETOKEN_KEY = "devicetoken"
|
||||
|
||||
@@ -18,29 +28,60 @@ class Metadata:
|
||||
def get_metadata_dict(self, addr):
|
||||
return FileDict(self.vmail_dir / addr / "metadata.json")
|
||||
|
||||
def add_token_to_addr(self, addr, token):
|
||||
@contextmanager
|
||||
def _modify_tokens(self, addr):
|
||||
with self.get_metadata_dict(addr).modify() as data:
|
||||
tokens = data.setdefault(self.DEVICETOKEN_KEY, [])
|
||||
if token not in tokens:
|
||||
tokens.append(token)
|
||||
tokens = data.setdefault(self.DEVICETOKEN_KEY, {})
|
||||
now = int(time.time())
|
||||
if isinstance(tokens, list):
|
||||
data[self.DEVICETOKEN_KEY] = tokens = {t: now for t in tokens}
|
||||
|
||||
expired_tokens = [
|
||||
token
|
||||
for token, timestamp in tokens.items()
|
||||
if not _is_valid_token_timestamp(tokens[token], now)
|
||||
]
|
||||
for expired_token in expired_tokens:
|
||||
del tokens[expired_token]
|
||||
|
||||
yield tokens
|
||||
|
||||
def add_token_to_addr(self, addr, token):
|
||||
with self._modify_tokens(addr) as tokens:
|
||||
tokens[token] = int(time.time())
|
||||
|
||||
def remove_token_from_addr(self, addr, token):
|
||||
with self.get_metadata_dict(addr).modify() as data:
|
||||
tokens = data.get(self.DEVICETOKEN_KEY, [])
|
||||
with self._modify_tokens(addr) as tokens:
|
||||
if token in tokens:
|
||||
tokens.remove(token)
|
||||
del tokens[token]
|
||||
|
||||
def get_tokens_for_addr(self, addr):
|
||||
mdict = self.get_metadata_dict(addr).read()
|
||||
return mdict.get(self.DEVICETOKEN_KEY, [])
|
||||
tokens = mdict.get(self.DEVICETOKEN_KEY, {})
|
||||
|
||||
now = int(time.time())
|
||||
if isinstance(tokens, dict):
|
||||
token_list = [
|
||||
token
|
||||
for token, timestamp in tokens.items()
|
||||
if _is_valid_token_timestamp(timestamp, now)
|
||||
]
|
||||
if len(token_list) < len(tokens):
|
||||
# Some tokens have expired, remove them.
|
||||
with self._modify_tokens(addr) as _tokens:
|
||||
pass
|
||||
else:
|
||||
token_list = []
|
||||
return token_list
|
||||
|
||||
|
||||
class MetadataDictProxy(DictProxy):
|
||||
def __init__(self, notifier, metadata, iroh_relay=None):
|
||||
def __init__(self, notifier, metadata, iroh_relay=None, turn_hostname=None):
|
||||
super().__init__()
|
||||
self.notifier = notifier
|
||||
self.metadata = metadata
|
||||
self.iroh_relay = iroh_relay
|
||||
self.turn_hostname = turn_hostname
|
||||
|
||||
def handle_lookup(self, parts):
|
||||
# Lpriv/43f5f508a7ea0366dff30200c15250e3/devicetoken\tlkj123poi@c2.testrun.org
|
||||
@@ -59,6 +100,15 @@ class MetadataDictProxy(DictProxy):
|
||||
):
|
||||
# Handle `GETMETADATA "" /shared/vendor/deltachat/irohrelay`
|
||||
return f"O{self.iroh_relay}\n"
|
||||
elif keyname == "vendor/vendor.dovecot/pvt/server/vendor/deltachat/turn":
|
||||
try:
|
||||
res = turn_credentials()
|
||||
except Exception:
|
||||
logging.exception("failed to get TURN credentials")
|
||||
return "N\n"
|
||||
port = 3478
|
||||
return f"O{self.turn_hostname}:{port}:{res}\n"
|
||||
|
||||
logging.warning(f"lookup ignored: {parts!r}")
|
||||
return "N\n"
|
||||
|
||||
@@ -82,6 +132,7 @@ def main():
|
||||
|
||||
config = read_config(config_path)
|
||||
iroh_relay = config.iroh_relay
|
||||
mail_domain = config.mail_domain
|
||||
|
||||
vmail_dir = config.mailboxes_dir
|
||||
if not vmail_dir.exists():
|
||||
@@ -95,7 +146,10 @@ def main():
|
||||
notifier.start_notification_threads(metadata.remove_token_from_addr)
|
||||
|
||||
dictproxy = MetadataDictProxy(
|
||||
notifier=notifier, metadata=metadata, iroh_relay=iroh_relay
|
||||
notifier=notifier,
|
||||
metadata=metadata,
|
||||
iroh_relay=iroh_relay,
|
||||
turn_hostname=mail_domain,
|
||||
)
|
||||
|
||||
dictproxy.serve_forever_from_socket(socket)
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def main(vmail_dir=None):
|
||||
if vmail_dir is None:
|
||||
vmail_dir = sys.argv[1]
|
||||
|
||||
accounts = 0
|
||||
ci_accounts = 0
|
||||
|
||||
for path in Path(vmail_dir).iterdir():
|
||||
accounts += 1
|
||||
if path.name[:3] in ("ci-", "ac_"):
|
||||
ci_accounts += 1
|
||||
|
||||
print("# HELP total number of accounts")
|
||||
print("# TYPE accounts gauge")
|
||||
print(f"accounts {accounts}")
|
||||
print("# HELP number of CI accounts")
|
||||
print("# TYPE ci_accounts gauge")
|
||||
print(f"ci_accounts {ci_accounts}")
|
||||
print("# HELP number of non-CI accounts")
|
||||
print("# TYPE nonci_accounts gauge")
|
||||
print(f"nonci_accounts {accounts - ci_accounts}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -3,9 +3,9 @@
|
||||
"""CGI script for creating new accounts."""
|
||||
|
||||
import json
|
||||
import random
|
||||
import secrets
|
||||
import string
|
||||
from urllib.parse import quote
|
||||
|
||||
from chatmaild.config import Config, read_config
|
||||
|
||||
@@ -15,7 +15,9 @@ ALPHANUMERIC_PUNCT = string.ascii_letters + string.digits + string.punctuation
|
||||
|
||||
|
||||
def create_newemail_dict(config: Config):
|
||||
user = "".join(random.choices(ALPHANUMERIC, k=config.username_min_length))
|
||||
user = "".join(
|
||||
secrets.choice(ALPHANUMERIC) for _ in range(config.username_max_length)
|
||||
)
|
||||
password = "".join(
|
||||
secrets.choice(ALPHANUMERIC_PUNCT)
|
||||
for _ in range(config.password_min_length + 3)
|
||||
@@ -23,13 +25,26 @@ def create_newemail_dict(config: Config):
|
||||
return dict(email=f"{user}@{config.mail_domain}", password=f"{password}")
|
||||
|
||||
|
||||
def create_dclogin_url(email, password):
|
||||
"""Build a dclogin: URL with credentials and self-signed cert acceptance.
|
||||
|
||||
Uses ic=3 (AcceptInvalidCertificates) so chatmail clients
|
||||
can connect to servers with self-signed TLS certificates.
|
||||
"""
|
||||
return f"dclogin:{quote(email, safe='@')}?p={quote(password, safe='')}&v=1&ic=3"
|
||||
|
||||
|
||||
def print_new_account():
|
||||
config = read_config(CONFIG_PATH)
|
||||
creds = create_newemail_dict(config)
|
||||
|
||||
result = dict(email=creds["email"], password=creds["password"])
|
||||
if config.tls_cert_mode == "self":
|
||||
result["dclogin_url"] = create_dclogin_url(creds["email"], creds["password"])
|
||||
|
||||
print("Content-Type: application/json")
|
||||
print("")
|
||||
print(json.dumps(creds))
|
||||
print(json.dumps(result))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -17,11 +17,11 @@ and which are scheduled for retry using exponential back-off timing.
|
||||
If a token notification would be scheduled more than DROP_DEADLINE seconds
|
||||
after its first attempt, it is dropped with a log error.
|
||||
|
||||
Note that tokens are completely opaque to the notification machinery here
|
||||
and will in the future be encrypted foreclosing all ability to distinguish
|
||||
Note that tokens are opaque to the notification machinery here
|
||||
and are encrypted foreclosing all ability to distinguish
|
||||
which device token ultimately goes to which phone-provider notification service,
|
||||
or to understand the relation of "device tokens" and chatmail addresses.
|
||||
The meaning and format of tokens is basically a matter of Delta-Chat Core and
|
||||
The meaning and format of tokens is basically a matter of chatmail Core and
|
||||
the `notification.delta.chat` service.
|
||||
"""
|
||||
|
||||
@@ -95,7 +95,12 @@ class Notifier:
|
||||
logging.warning(f"removing spurious queue item: {queue_path!r}")
|
||||
queue_path.unlink()
|
||||
continue
|
||||
queue_item = PersistentQueueItem.read_from_path(queue_path)
|
||||
try:
|
||||
queue_item = PersistentQueueItem.read_from_path(queue_path)
|
||||
except ValueError:
|
||||
logging.warning(f"removing spurious queue item: {queue_path!r}")
|
||||
queue_path.unlink()
|
||||
continue
|
||||
self.queue_for_retry(queue_item)
|
||||
|
||||
def queue_for_retry(self, queue_item, retry_num=0):
|
||||
|
||||
56
chatmaild/src/chatmaild/tests/mail-data/asm.eml
Normal file
56
chatmaild/src/chatmaild/tests/mail-data/asm.eml
Normal file
@@ -0,0 +1,56 @@
|
||||
From: {from_addr}
|
||||
To: {to_addr}
|
||||
Autocrypt-Setup-Message: v1
|
||||
Subject: Autocrypt Setup Message
|
||||
Date: Tue, 22 Jan 2019 12:56:29 +0100
|
||||
Content-type: multipart/mixed; boundary="Y6fyGi9SoGeH8WwRaEdC6bbBcYOedDzrQ"
|
||||
|
||||
--Y6fyGi9SoGeH8WwRaEdC6bbBcYOedDzrQ
|
||||
Content-Type: text/plain
|
||||
|
||||
This message contains all information to transfer your Autocrypt
|
||||
settings along with your secret key securely from your original
|
||||
device.
|
||||
|
||||
To set up your new device for Autocrypt, please follow the
|
||||
instuctions that should be presented by your new device.
|
||||
|
||||
You can keep this message and use it as a backup for your secret
|
||||
key. If you want to do this, you should write down the Setup Code
|
||||
and store it securely.
|
||||
--Y6fyGi9SoGeH8WwRaEdC6bbBcYOedDzrQ
|
||||
Content-Type: application/autocrypt-setup
|
||||
Content-Disposition: attachment; filename="autocrypt-setup-message.html"
|
||||
|
||||
<html><body>
|
||||
<p>
|
||||
This is the Autocrypt setup file used to transfer settings and
|
||||
keys between clients. You can decrypt it using the Setup Code
|
||||
presented on your old device, and then import the contained key
|
||||
into your keyring.
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
-----BEGIN PGP MESSAGE-----
|
||||
Passphrase-Format: numeric9x4
|
||||
Passphrase-Begin: 17
|
||||
|
||||
jA0EBwMCFAxADoCdzeX/0ukBlqI5+pfpKb751qd/7nLNbkpy3gVcaf1QwRPZYt40
|
||||
Ynp08UqRQ2g48ZlnzHLSwlTGOPTuv2Jt8ka+pgZ45xzvJSG2gau03xP4VsC271kR
|
||||
VmCjdb0Y6Rk96mAwfGzrkbaRQ9Z7fIoL866GOv6h9neiVIkp+JYlTV6ISD0ZQJ4Q
|
||||
I6dOQkB/TWZyVjtiJDOQHdfNWliA6NtqaLq19wlu9L5xXjuNpY95KwR8EJXWe0+o
|
||||
Y3d2U/KxOAkXKghP2Qg1GtlPVeGC5T4p03TGI6pzKT+kHX6Rrm9wK6sM9aTquMmF
|
||||
Vok84Jg1DFnwivWC2RILR81rXi7k/+Y6MUbveFgJ9cQduqpxnmD7TjOblYu7M6zp
|
||||
YGAUxh8DRKlIMn2QsA++DBYQ6ACZvwuY8qTDLkqPDo4WqM313dsMJbyGjDdVE7EM
|
||||
PESS+RlABETpZXz8g/ycr6DIUNdlbPcmYlsBfHWDOuR2GFFTwmlv5slWS39dJv38
|
||||
E0eIe1CwdxI801Se7t7dUUS/ZF8wb6GlmxOcqGbF8eko1Z0S64IAm7/h13MRQCxI
|
||||
geQnHfGYVJ2FOimoCMEKwfa9x++RFTDW0u7spDC2uWvK/1viV8OfRppFhLr/kmKb
|
||||
18lWXuAz80DAjUDUsVqEq2MvJBJGoCJUEyjuRsLkHYRM5jYk4v50LyyR0Om73nWF
|
||||
nZBqmqNzdr7Xb9PHHdFhnEc0VvoYbrcM0RVYcEMW3YbmejM891j1d6Iv+/n/qND/
|
||||
NdebGrfWJMmFLf/iEkzTZ3/v5inW9LpWoRc94ioCjJTaEo8Rib6ARRFaJVIsmNXi
|
||||
YicFGO98D+zX+a2t9Yz6IpPajVslnOp6ScpmXgts/2XWD7oE+JgxSAqo/dLVsHgP
|
||||
Ufo=
|
||||
=pulM
|
||||
-----END PGP MESSAGE-----
|
||||
</pre></body></html>
|
||||
--Y6fyGi9SoGeH8WwRaEdC6bbBcYOedDzrQ--
|
||||
46
chatmaild/src/chatmaild/tests/mail-data/mailer-daemon.eml
Normal file
46
chatmaild/src/chatmaild/tests/mail-data/mailer-daemon.eml
Normal file
@@ -0,0 +1,46 @@
|
||||
Date: Fri, 8 Jul 1994 09:21:47 -0400
|
||||
From: Mail Delivery Subsystem <MAILER-DAEMON@example.org>
|
||||
Subject: Returned mail: User unknown
|
||||
To: <owner-ups-mib@CS.UTK.EDU>
|
||||
Auto-Submitted: auto-replied
|
||||
MIME-Version: 1.0
|
||||
Content-Type: multipart/report; report-type=delivery-status;
|
||||
boundary="JAA13167.773673707/CS.UTK.EDU"
|
||||
|
||||
--JAA13167.773673707/CS.UTK.EDU
|
||||
content-type: text/plain; charset=us-ascii
|
||||
|
||||
----- The following addresses had delivery problems -----
|
||||
<arathib@vnet.ibm.com> (unrecoverable error)
|
||||
<wsnell@sdcc13.ucsd.edu> (unrecoverable error)
|
||||
|
||||
--JAA13167.773673707/CS.UTK.EDU
|
||||
content-type: message/delivery-status
|
||||
|
||||
Reporting-MTA: dns; cs.utk.edu
|
||||
|
||||
Original-Recipient: rfc822;arathib@vnet.ibm.com
|
||||
Final-Recipient: rfc822;arathib@vnet.ibm.com
|
||||
Action: failed
|
||||
Status: 5.0.0 (permanent failure)
|
||||
Diagnostic-Code: smtp;
|
||||
550 'arathib@vnet.IBM.COM' is not a registered gateway user
|
||||
Remote-MTA: dns; vnet.ibm.com
|
||||
|
||||
Original-Recipient: rfc822;johnh@hpnjld.njd.hp.com
|
||||
Final-Recipient: rfc822;johnh@hpnjld.njd.hp.com
|
||||
Action: delayed
|
||||
Status: 4.0.0 (hpnjld.njd.jp.com: host name lookup failure)
|
||||
|
||||
Original-Recipient: rfc822;wsnell@sdcc13.ucsd.edu
|
||||
Final-Recipient: rfc822;wsnell@sdcc13.ucsd.edu
|
||||
Action: failed
|
||||
Status: 5.0.0
|
||||
Diagnostic-Code: smtp; 550 user unknown
|
||||
Remote-MTA: dns; sdcc13.ucsd.edu
|
||||
|
||||
--JAA13167.773673707/CS.UTK.EDU
|
||||
content-type: message/rfc822
|
||||
|
||||
[original message goes here]
|
||||
--JAA13167.773673707/CS.UTK.EDU--
|
||||
@@ -0,0 +1,21 @@
|
||||
Subject: Message from {from_addr}
|
||||
From: <{from_addr}>
|
||||
To: <{to_addr}>
|
||||
Date: Sun, 15 Oct 2023 16:43:25 +0000
|
||||
Message-ID: <Mr.78MWtlV7RAi.goCFzBhCYfy@c2.testrun.org>
|
||||
Chat-Version: 1.0
|
||||
Secure-Join: vc-request
|
||||
Secure-Join-Invitenumber: RANDOM-TOKEN
|
||||
MIME-Version: 1.0
|
||||
Content-Type: multipart/mixed; boundary="Gl92xgZjOShJ5PGHntqYkoo2OK2Dvi"
|
||||
|
||||
|
||||
--Gl92xgZjOShJ5PGHntqYkoo2OK2Dvi
|
||||
Content-Type: text/plain; charset=utf-8
|
||||
|
||||
Buy viagra!
|
||||
|
||||
|
||||
--Gl92xgZjOShJ5PGHntqYkoo2OK2Dvi--
|
||||
|
||||
|
||||
21
chatmaild/src/chatmaild/tests/mail-data/securejoin-vc.eml
Normal file
21
chatmaild/src/chatmaild/tests/mail-data/securejoin-vc.eml
Normal file
@@ -0,0 +1,21 @@
|
||||
Subject: Message from {from_addr}
|
||||
From: <{from_addr}>
|
||||
To: <{to_addr}>
|
||||
Date: Sun, 15 Oct 2023 16:43:25 +0000
|
||||
Message-ID: <Mr.78MWtlV7RAi.goCFzBhCYfy@c2.testrun.org>
|
||||
Chat-Version: 1.0
|
||||
Secure-Join: vc-request
|
||||
Secure-Join-Invitenumber: RANDOM-TOKEN
|
||||
MIME-Version: 1.0
|
||||
Content-Type: multipart/mixed; boundary="Gl92xgZjOShJ5PGHntqYkoo2OK2Dvi"
|
||||
|
||||
|
||||
--Gl92xgZjOShJ5PGHntqYkoo2OK2Dvi
|
||||
Content-Type: text/plain; charset=utf-8
|
||||
|
||||
Secure-Join: vc-request
|
||||
|
||||
|
||||
--Gl92xgZjOShJ5PGHntqYkoo2OK2Dvi--
|
||||
|
||||
|
||||
@@ -69,12 +69,11 @@ def maildata(request):
|
||||
|
||||
assert datadir.exists(), datadir
|
||||
|
||||
def maildata(name, from_addr, to_addr, subject="..."):
|
||||
def maildata(name, from_addr, to_addr, subject="[...]"):
|
||||
# Using `.read_bytes().decode()` instead of `.read_text()` to preserve newlines.
|
||||
data = datadir.joinpath(name).read_bytes().decode()
|
||||
|
||||
text = data.format(from_addr=from_addr, to_addr=to_addr, subject=subject)
|
||||
return BytesParser(policy=policy.default).parsebytes(text.encode())
|
||||
return BytesParser(policy=policy.SMTP).parsebytes(text.encode())
|
||||
|
||||
return maildata
|
||||
|
||||
@@ -86,13 +85,13 @@ def mockout():
|
||||
captured_green = []
|
||||
captured_plain = []
|
||||
|
||||
def red(self, msg):
|
||||
def red(self, msg, **kw):
|
||||
self.captured_red.append(msg)
|
||||
|
||||
def green(self, msg):
|
||||
def green(self, msg, **kw):
|
||||
self.captured_green.append(msg)
|
||||
|
||||
def __call__(self, msg):
|
||||
def print(self, msg="", **kw):
|
||||
self.captured_plain.append(msg)
|
||||
|
||||
return MockOut()
|
||||
|
||||
@@ -15,6 +15,14 @@ def test_read_config_basic(example_config):
|
||||
assert example_config.mail_domain == "chat.example.org"
|
||||
|
||||
|
||||
def test_read_config_basic_using_defaults(tmp_path, maildomain):
|
||||
inipath = tmp_path.joinpath("chatmail.ini")
|
||||
inipath.write_text(f"[params]\nmail_domain = {maildomain}")
|
||||
example_config = read_config(inipath)
|
||||
assert example_config.max_user_send_per_minute == 60
|
||||
assert example_config.filtermail_smtp_port_incoming == 10081
|
||||
|
||||
|
||||
def test_read_config_testrun(make_config):
|
||||
config = make_config("something.testrun.org")
|
||||
assert config.mail_domain == "something.testrun.org"
|
||||
@@ -25,8 +33,9 @@ def test_read_config_testrun(make_config):
|
||||
assert config.filtermail_smtp_port == 10080
|
||||
assert config.postfix_reinject_port == 10025
|
||||
assert config.max_user_send_per_minute == 60
|
||||
assert config.max_mailbox_size == "100M"
|
||||
assert config.max_mailbox_size == "500M"
|
||||
assert config.delete_mails_after == "20"
|
||||
assert config.delete_large_after == "7"
|
||||
assert config.username_min_length == 9
|
||||
assert config.username_max_length == 9
|
||||
assert config.password_min_length == 9
|
||||
@@ -64,3 +73,51 @@ def test_config_userstate_paths(make_config, tmp_path):
|
||||
def test_config_max_message_size(make_config, tmp_path):
|
||||
config = make_config("something.testrun.org", dict(max_message_size="10000"))
|
||||
assert config.max_message_size == 10000
|
||||
|
||||
|
||||
def test_config_tls_default_acme(make_config):
|
||||
config = make_config("chat.example.org")
|
||||
assert config.tls_cert_mode == "acme"
|
||||
assert config.tls_cert_path == "/var/lib/acme/live/chat.example.org/fullchain"
|
||||
assert config.tls_key_path == "/var/lib/acme/live/chat.example.org/privkey"
|
||||
|
||||
|
||||
def test_config_tls_self(make_config):
|
||||
config = make_config("_test.example.org")
|
||||
assert config.tls_cert_mode == "self"
|
||||
assert config.tls_cert_path == "/etc/ssl/certs/mailserver.pem"
|
||||
assert config.tls_key_path == "/etc/ssl/private/mailserver.key"
|
||||
|
||||
|
||||
def test_config_tls_external(make_config):
|
||||
config = make_config(
|
||||
"chat.example.org",
|
||||
{
|
||||
"tls_external_cert_and_key": "/custom/fullchain.pem /custom/privkey.pem",
|
||||
},
|
||||
)
|
||||
assert config.tls_cert_mode == "external"
|
||||
assert config.tls_cert_path == "/custom/fullchain.pem"
|
||||
assert config.tls_key_path == "/custom/privkey.pem"
|
||||
|
||||
|
||||
def test_config_tls_external_overrides_underscore(make_config):
|
||||
config = make_config(
|
||||
"_test.example.org",
|
||||
{
|
||||
"tls_external_cert_and_key": "/certs/fullchain.pem /certs/privkey.pem",
|
||||
},
|
||||
)
|
||||
assert config.tls_cert_mode == "external"
|
||||
assert config.tls_cert_path == "/certs/fullchain.pem"
|
||||
assert config.tls_key_path == "/certs/privkey.pem"
|
||||
|
||||
|
||||
def test_config_tls_external_bad_format(make_config):
|
||||
with pytest.raises(ValueError, match="two space-separated"):
|
||||
make_config(
|
||||
"chat.example.org",
|
||||
{
|
||||
"tls_external_cert_and_key": "/only/one/path.pem",
|
||||
},
|
||||
)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import time
|
||||
|
||||
from chatmaild.delete_inactive_users import delete_inactive_users
|
||||
from chatmaild.doveauth import AuthDictProxy
|
||||
from chatmaild.expire import main as main_expire
|
||||
|
||||
|
||||
def test_login_timestamps(example_config):
|
||||
@@ -45,7 +45,12 @@ def test_delete_inactive_users(example_config):
|
||||
for addr in to_remove:
|
||||
assert example_config.get_user(addr).maildir.exists()
|
||||
|
||||
delete_inactive_users(example_config)
|
||||
main_expire(
|
||||
args=[
|
||||
"--remove",
|
||||
str(example_config._inipath),
|
||||
]
|
||||
)
|
||||
|
||||
for p in example_config.mailboxes_dir.iterdir():
|
||||
assert not p.name.startswith("old")
|
||||
|
||||
@@ -120,6 +120,60 @@ def test_handle_dovecot_protocol_iterate(gencreds, example_config):
|
||||
assert not lines[2]
|
||||
|
||||
|
||||
def test_invalid_localpart_characters(make_config):
|
||||
"""Test that is_allowed_to_create rejects localparts with invalid characters."""
|
||||
config = make_config("chat.example.org", {"username_min_length": "3"})
|
||||
password = "zequ0Aimuchoodaechik"
|
||||
domain = config.mail_domain
|
||||
|
||||
# valid localparts
|
||||
assert is_allowed_to_create(config, f"abc123@{domain}", password)
|
||||
assert is_allowed_to_create(config, f"a.b-c_d@{domain}", password)
|
||||
|
||||
# uppercase rejected
|
||||
assert not is_allowed_to_create(config, f"Abc123@{domain}", password)
|
||||
assert not is_allowed_to_create(config, f"ABCDEFG@{domain}", password)
|
||||
|
||||
# spaces and special chars rejected
|
||||
assert not is_allowed_to_create(config, f"a b cde@{domain}", password)
|
||||
assert not is_allowed_to_create(config, f"abc+def@{domain}", password)
|
||||
assert not is_allowed_to_create(config, f"abc!def@{domain}", password)
|
||||
assert not is_allowed_to_create(config, f"ab@cdef@{domain}", password)
|
||||
assert not is_allowed_to_create(config, f"abc/def@{domain}", password)
|
||||
assert not is_allowed_to_create(config, f"abc\\def@{domain}", password)
|
||||
|
||||
|
||||
def test_concurrent_creation_same_account(dictproxy):
|
||||
"""Test that concurrent creation of the same account doesn't corrupt password."""
|
||||
addr = "racetest1@chat.example.org"
|
||||
password = "zequ0Aimuchoodaechik"
|
||||
num_threads = 10
|
||||
results = queue.Queue()
|
||||
|
||||
def create():
|
||||
try:
|
||||
res = dictproxy.lookup_passdb(addr, password)
|
||||
results.put(("ok", res))
|
||||
except Exception:
|
||||
results.put(("err", traceback.format_exc()))
|
||||
|
||||
threads = [threading.Thread(target=create, daemon=True) for _ in range(num_threads)]
|
||||
for t in threads:
|
||||
t.start()
|
||||
for t in threads:
|
||||
t.join(timeout=10)
|
||||
|
||||
passwords_seen = set()
|
||||
for _ in range(num_threads):
|
||||
status, res = results.get()
|
||||
if status == "err":
|
||||
pytest.fail(f"concurrent creation failed\n{res}")
|
||||
passwords_seen.add(res["password"])
|
||||
|
||||
# all threads must see the same password hash
|
||||
assert len(passwords_seen) == 1
|
||||
|
||||
|
||||
def test_50_concurrent_lookups_different_accounts(gencreds, dictproxy):
|
||||
num_threads = 50
|
||||
req_per_thread = 5
|
||||
|
||||
198
chatmaild/src/chatmaild/tests/test_expire.py
Normal file
198
chatmaild/src/chatmaild/tests/test_expire.py
Normal file
@@ -0,0 +1,198 @@
|
||||
import os
|
||||
import random
|
||||
from datetime import datetime
|
||||
from fnmatch import fnmatch
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from chatmaild.expire import (
|
||||
FileEntry,
|
||||
MailboxStat,
|
||||
get_file_entry,
|
||||
iter_mailboxes,
|
||||
os_listdir_if_exists,
|
||||
)
|
||||
from chatmaild.expire import main as expiry_main
|
||||
from chatmaild.fsreport import main as report_main
|
||||
|
||||
|
||||
def fill_mbox(folderdir):
|
||||
password = folderdir.joinpath("password")
|
||||
password.write_text("xxx")
|
||||
folderdir.joinpath("maildirsize").write_text("xxx")
|
||||
|
||||
garbagedir = folderdir.joinpath("garbagedir")
|
||||
garbagedir.mkdir()
|
||||
garbagedir.joinpath("bimbum").write_text("hello")
|
||||
|
||||
create_new_messages(folderdir, ["cur/msg1"], size=500)
|
||||
create_new_messages(folderdir, ["new/msg2"], size=600)
|
||||
|
||||
|
||||
def create_new_messages(basedir, relpaths, size=1000, days=0):
|
||||
now = datetime.utcnow().timestamp()
|
||||
|
||||
for relpath in relpaths:
|
||||
msg_path = Path(basedir).joinpath(relpath)
|
||||
msg_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
msg_path.write_text("x" * size)
|
||||
# accessed now, modified N days ago
|
||||
os.utime(msg_path, (now, now - days * 86400))
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mbox1(example_config):
|
||||
mboxdir = example_config.mailboxes_dir.joinpath("mailbox1@example.org")
|
||||
mboxdir.mkdir()
|
||||
fill_mbox(mboxdir)
|
||||
return MailboxStat(mboxdir)
|
||||
|
||||
|
||||
def test_deltachat_folder(example_config):
|
||||
"""Test old setups that might have a .DeltaChat folder where messages also need to get removed."""
|
||||
mboxdir = example_config.mailboxes_dir.joinpath("mailbox1@example.org")
|
||||
mboxdir.mkdir()
|
||||
mbox2dir = mboxdir.joinpath(".DeltaChat")
|
||||
mbox2dir.mkdir()
|
||||
fill_mbox(mbox2dir)
|
||||
mb = MailboxStat(mboxdir)
|
||||
assert len(mb.messages) == 2
|
||||
|
||||
|
||||
def test_filentry_ordering(tmp_path):
|
||||
l = [FileEntry(f"x{i}", size=i + 10, mtime=1000 - i) for i in range(10)]
|
||||
sorted = list(l)
|
||||
random.shuffle(l)
|
||||
l.sort(key=lambda x: x.size)
|
||||
assert l == sorted
|
||||
|
||||
|
||||
def test_no_mailbxoes(tmp_path, capsys):
|
||||
assert [] == list(iter_mailboxes(str(tmp_path.joinpath("notexists")), maxnum=10))
|
||||
out, err = capsys.readouterr()
|
||||
assert "no mailboxes" in err
|
||||
|
||||
|
||||
def test_stats_mailbox(mbox1):
|
||||
password = Path(mbox1.basedir).joinpath("password")
|
||||
assert mbox1.last_login == password.stat().st_mtime
|
||||
assert len(mbox1.messages) == 2
|
||||
|
||||
msgs = list(sorted(mbox1.messages, key=lambda x: x.size))
|
||||
assert len(msgs) == 2
|
||||
assert msgs[0].size == 500 # cur
|
||||
assert msgs[1].size == 600 # new
|
||||
|
||||
create_new_messages(mbox1.basedir, ["large-extra"], size=1000)
|
||||
create_new_messages(mbox1.basedir, ["index-something"], size=3)
|
||||
mbox2 = MailboxStat(mbox1.basedir)
|
||||
assert len(mbox2.extrafiles) == 5
|
||||
assert mbox2.extrafiles[0].size == 1000
|
||||
|
||||
# cope well with mailbox dirs that have no password (for whatever reason)
|
||||
Path(mbox1.basedir).joinpath("password").unlink()
|
||||
mbox3 = MailboxStat(mbox1.basedir)
|
||||
assert mbox3.last_login is None
|
||||
|
||||
|
||||
def test_report_no_mailboxes(example_config):
|
||||
args = (str(example_config._inipath),)
|
||||
report_main(args)
|
||||
|
||||
|
||||
def test_report(mbox1, example_config):
|
||||
args = (str(example_config._inipath),)
|
||||
report_main(args)
|
||||
args = list(args) + "--days 1".split()
|
||||
report_main(args)
|
||||
args = list(args) + "--min-login-age 1".split()
|
||||
report_main(args)
|
||||
args = list(args) + "--mdir cur".split()
|
||||
report_main(args)
|
||||
|
||||
|
||||
def test_report_mdir_filters_by_path(mbox1, example_config):
|
||||
"""Test that Report with mdir='cur' only counts messages in cur/ subdirectory."""
|
||||
from chatmaild.fsreport import Report
|
||||
|
||||
now = datetime.utcnow().timestamp()
|
||||
|
||||
# Set password mtime to old enough so min_login_age check passes
|
||||
password = Path(mbox1.basedir).joinpath("password")
|
||||
old_time = now - 86400 * 10 # 10 days ago
|
||||
os.utime(password, (old_time, old_time))
|
||||
|
||||
# Reload mailbox with updated mtime
|
||||
from chatmaild.expire import MailboxStat
|
||||
|
||||
mbox = MailboxStat(mbox1.basedir)
|
||||
|
||||
# Report without mdir — should count all messages
|
||||
rep_all = Report(now=now, min_login_age=1, mdir=None)
|
||||
rep_all.process_mailbox_stat(mbox)
|
||||
total_all = rep_all.message_buckets[0]
|
||||
|
||||
# Report with mdir='cur' — should only count cur/ messages
|
||||
rep_cur = Report(now=now, min_login_age=1, mdir="cur")
|
||||
rep_cur.process_mailbox_stat(mbox)
|
||||
total_cur = rep_cur.message_buckets[0]
|
||||
|
||||
# Report with mdir='new' — should only count new/ messages
|
||||
rep_new = Report(now=now, min_login_age=1, mdir="new")
|
||||
rep_new.process_mailbox_stat(mbox)
|
||||
total_new = rep_new.message_buckets[0]
|
||||
|
||||
# cur has 500-byte msg, new has 600-byte msg (from fill_mbox)
|
||||
assert total_cur == 500
|
||||
assert total_new == 600
|
||||
assert total_all == 500 + 600
|
||||
|
||||
|
||||
def test_expiry_cli_basic(example_config, mbox1):
|
||||
args = (str(example_config._inipath),)
|
||||
expiry_main(args)
|
||||
|
||||
|
||||
def test_expiry_cli_old_files(capsys, example_config, mbox1):
|
||||
relpaths_old = ["cur/msg_old1", "cur/msg_old1"]
|
||||
cutoff_days = int(example_config.delete_mails_after) + 1
|
||||
create_new_messages(mbox1.basedir, relpaths_old, size=1000, days=cutoff_days)
|
||||
|
||||
relpaths_large = ["cur/msg_old_large1", "new/msg_old_large2"]
|
||||
cutoff_days = int(example_config.delete_large_after) + 1
|
||||
create_new_messages(
|
||||
mbox1.basedir, relpaths_large, size=1000 * 300, days=cutoff_days
|
||||
)
|
||||
|
||||
create_new_messages(mbox1.basedir, ["cur/shouldstay"], size=1000 * 300, days=1)
|
||||
|
||||
args = str(example_config._inipath), "--remove", "-v"
|
||||
expiry_main(args)
|
||||
out, err = capsys.readouterr()
|
||||
|
||||
allpaths = relpaths_old + relpaths_large + ["maildirsize"]
|
||||
for path in allpaths:
|
||||
for line in err.split("\n"):
|
||||
if fnmatch(line, f"removing*{path}"):
|
||||
break
|
||||
else:
|
||||
if path != "new/msg_old_large2":
|
||||
pytest.fail(f"failed to remove {path}\n{err}")
|
||||
|
||||
assert "shouldstay" not in err
|
||||
|
||||
|
||||
def test_get_file_entry(tmp_path):
|
||||
assert get_file_entry(str(tmp_path.joinpath("123123"))) is None
|
||||
p = tmp_path.joinpath("x")
|
||||
p.write_text("hello")
|
||||
entry = get_file_entry(str(p))
|
||||
assert entry.size == 5
|
||||
assert entry.mtime
|
||||
|
||||
|
||||
def test_os_listdir_if_exists(tmp_path):
|
||||
tmp_path.joinpath("x").write_text("hello")
|
||||
assert len(os_listdir_if_exists(str(tmp_path))) == 1
|
||||
assert len(os_listdir_if_exists(str(tmp_path.joinpath("123123")))) == 0
|
||||
@@ -1,217 +0,0 @@
|
||||
import pytest
|
||||
|
||||
from chatmaild.filtermail import (
|
||||
BeforeQueueHandler,
|
||||
SendRateLimiter,
|
||||
check_armored_payload,
|
||||
check_encrypted,
|
||||
common_encrypted_subjects,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def maildomain():
|
||||
# let's not depend on a real chatmail instance for the offline tests below
|
||||
return "chatmail.example.org"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def handler(make_config, maildomain):
|
||||
config = make_config(maildomain)
|
||||
return BeforeQueueHandler(config)
|
||||
|
||||
|
||||
def test_reject_forged_from(maildata, gencreds, handler):
|
||||
class env:
|
||||
mail_from = gencreds()[0]
|
||||
rcpt_tos = [gencreds()[0]]
|
||||
|
||||
# test that the filter lets good mail through
|
||||
to_addr = gencreds()[0]
|
||||
env.content = maildata(
|
||||
"plain.eml", from_addr=env.mail_from, to_addr=to_addr
|
||||
).as_bytes()
|
||||
|
||||
assert not handler.check_DATA(envelope=env)
|
||||
|
||||
# test that the filter rejects forged mail
|
||||
env.content = maildata(
|
||||
"plain.eml", from_addr="forged@c3.testrun.org", to_addr=to_addr
|
||||
).as_bytes()
|
||||
error = handler.check_DATA(envelope=env)
|
||||
assert "500" in error
|
||||
|
||||
|
||||
def test_filtermail_no_encryption_detection(maildata):
|
||||
msg = maildata(
|
||||
"plain.eml", from_addr="some@example.org", to_addr="other@example.org"
|
||||
)
|
||||
assert not check_encrypted(msg)
|
||||
|
||||
# https://xkcd.com/1181/
|
||||
msg = maildata(
|
||||
"fake-encrypted.eml", from_addr="some@example.org", to_addr="other@example.org"
|
||||
)
|
||||
assert not check_encrypted(msg)
|
||||
|
||||
|
||||
def test_filtermail_encryption_detection(maildata):
|
||||
for subject in common_encrypted_subjects:
|
||||
msg = maildata(
|
||||
"encrypted.eml",
|
||||
from_addr="1@example.org",
|
||||
to_addr="2@example.org",
|
||||
subject=subject,
|
||||
)
|
||||
assert check_encrypted(msg)
|
||||
|
||||
# if the subject is not a known encrypted subject value, it is not considered ac-encrypted
|
||||
msg.replace_header("Subject", "Click this link")
|
||||
assert not check_encrypted(msg)
|
||||
|
||||
|
||||
def test_filtermail_no_literal_packets(maildata):
|
||||
"""Test that literal OpenPGP packet is not considered an encrypted mail."""
|
||||
msg = maildata("literal.eml", from_addr="1@example.org", to_addr="2@example.org")
|
||||
assert not check_encrypted(msg)
|
||||
|
||||
|
||||
def test_filtermail_unencrypted_mdn(maildata, gencreds):
|
||||
"""Unencrypted MDNs should not pass."""
|
||||
from_addr = gencreds()[0]
|
||||
to_addr = gencreds()[0] + ".other"
|
||||
msg = maildata("mdn.eml", from_addr=from_addr, to_addr=to_addr)
|
||||
|
||||
assert not check_encrypted(msg)
|
||||
|
||||
|
||||
def test_send_rate_limiter():
|
||||
limiter = SendRateLimiter()
|
||||
for i in range(100):
|
||||
if limiter.is_sending_allowed("some@example.org", 10):
|
||||
if i <= 10:
|
||||
continue
|
||||
pytest.fail("limiter didn't work")
|
||||
else:
|
||||
assert i == 11
|
||||
break
|
||||
|
||||
|
||||
def test_excempt_privacy(maildata, gencreds, handler):
|
||||
from_addr = gencreds()[0]
|
||||
to_addr = "privacy@testrun.org"
|
||||
handler.config.passthrough_recipients = [to_addr]
|
||||
false_to = "privacy@something.org"
|
||||
|
||||
msg = maildata("plain.eml", from_addr=from_addr, to_addr=to_addr)
|
||||
|
||||
class env:
|
||||
mail_from = from_addr
|
||||
rcpt_tos = [to_addr]
|
||||
content = msg.as_bytes()
|
||||
|
||||
# assert that None/no error is returned
|
||||
assert not handler.check_DATA(envelope=env)
|
||||
|
||||
class env2:
|
||||
mail_from = from_addr
|
||||
rcpt_tos = [to_addr, false_to]
|
||||
content = msg.as_bytes()
|
||||
|
||||
assert "500" in handler.check_DATA(envelope=env2)
|
||||
|
||||
|
||||
def test_passthrough_domains(maildata, gencreds, handler):
|
||||
from_addr = gencreds()[0]
|
||||
to_addr = "privacy@x.y.z"
|
||||
handler.config.passthrough_recipients = ["@x.y.z"]
|
||||
false_to = "something@x.y"
|
||||
|
||||
msg = maildata("plain.eml", from_addr=from_addr, to_addr=to_addr)
|
||||
|
||||
class env:
|
||||
mail_from = from_addr
|
||||
rcpt_tos = [to_addr]
|
||||
content = msg.as_bytes()
|
||||
|
||||
# assert that None/no error is returned
|
||||
assert not handler.check_DATA(envelope=env)
|
||||
|
||||
class env2:
|
||||
mail_from = from_addr
|
||||
rcpt_tos = [to_addr, false_to]
|
||||
content = msg.as_bytes()
|
||||
|
||||
assert "500" in handler.check_DATA(envelope=env2)
|
||||
|
||||
|
||||
def test_passthrough_senders(gencreds, handler, maildata):
|
||||
acc1 = gencreds()[0]
|
||||
to_addr = "recipient@something.org"
|
||||
handler.config.passthrough_senders = [acc1]
|
||||
|
||||
msg = maildata("plain.eml", from_addr=acc1, to_addr=to_addr)
|
||||
|
||||
class env:
|
||||
mail_from = acc1
|
||||
rcpt_tos = to_addr
|
||||
content = msg.as_bytes()
|
||||
|
||||
# assert that None/no error is returned
|
||||
assert not handler.check_DATA(envelope=env)
|
||||
|
||||
|
||||
def test_check_armored_payload():
|
||||
payload = """-----BEGIN PGP MESSAGE-----\r
|
||||
\r
|
||||
wU4DSqFx0d1yqAoSAQdAYkX/ZN/Az4B0k7X47zKyWrXxlDEdS3WOy0Yf2+GJTFgg\r
|
||||
Zk5ql0mLG8Ze+ZifCS0XMO4otlemSyJ0K1ZPdFMGzUDBTgNqzkFabxXoXRIBB0AM\r
|
||||
755wlX41X6Ay3KhnwBq7yEqSykVH6F3x11iHPKraLCAGZoaS8bKKNy/zg5slda1X\r
|
||||
pt14b4aC1VwtSnYhcRRELNLD/wE2TFif+g7poMmFY50VyMPLYjVP96Z5QCT4+z4H\r
|
||||
Ikh/pRRN8S3JNMrRJHc6prooSJmLcx47Y5un7VFy390MsJ+LiUJuQMDdYWRAinfs\r
|
||||
Ebm89Ezjm7F03qbFPXE0X4ZNzVXS/eKO0uhJQdiov/vmbn41rNtHmNpqjaO0vi5+\r
|
||||
sS9tR7yDUrIXiCUCN78eBLVioxtktsPZm5cDORbQWzv+7nmCEz9/JowCUcBVdCGn\r
|
||||
1ofOaH82JCAX/cRx08pLaDNj6iolVBsi56Dd+2bGxJOZOG2AMcEyz0pXY0dOAJCD\r
|
||||
iUThcQeGIdRnU3j8UBcnIEsjLu2+C+rrwMZQESMWKnJ0rnqTk0pK5kXScr6F/L0L\r
|
||||
UE49ccIexNm3xZvYr5drszr6wz3Tv5fdue87P4etBt90gF/Vzknck+g1LLlkzZkp\r
|
||||
d8dI0k2tOSPjUbDPnSy1x+X73WGpPZmj0kWT+RGvq0nH6UkJj3AQTG2qf1T8jK+3\r
|
||||
rTp3LR9vDkMwDjX4R8SA9c0wdnUzzr79OYQC9lTnzcx+fM6BBmgQ2GrS33jaFLp7\r
|
||||
L6/DFpCl5zhnPjM/2dKvMkw/Kd6XS/vjwsO405FQdjSDiQEEAZA+ZvAfcjdccbbU\r
|
||||
yCO+x0QNdeBsufDVnh3xvzuWy4CICdTQT4s1AWRPCzjOj+SGmx5WqCLWfsd8Ma0+\r
|
||||
w/C7SfTYu1FDQILLM+llpq1M/9GPley4QZ8JQjo262AyPXsPF/OW48uuZz0Db1xT\r
|
||||
Yh4iHBztj4VSdy7l2+IyaIf7cnL4EEBFxv/MwmVDXvDlxyvfAfIsd3D9SvJESzKZ\r
|
||||
VWDYwaocgeCN+ojKu1p885lu1EfRbX3fr3YO02K5/c2JYDkc0Py0W3wUP/J1XUax\r
|
||||
pbKpzwlkxEgtmzsGqsOfMJqBV3TNDrOA2uBsa+uBqP5MGYLZ49S/4v/bW9I01Cr1\r
|
||||
D2ZkV510Y1Vgo66WlP8mRqOTyt/5WRhPD+MxXdk67BNN/PmO6tMlVoJDuk+XwWPR\r
|
||||
t2TvNaND/yabT9eYI55Og4fzKD6RIjouUX8DvKLkm+7aXxVs2uuLQ3Jco3O82z55\r
|
||||
dbShU1jYsrw9oouXUz06MHPbkdhNbF/2hfhZ2qA31sNeovJw65iUv7sDKX3LVWgJ\r
|
||||
10jlywcDwqlU8CO7WC9lGixYTbnOkYZpXCGEl8e6Jbs79l42YFo4ogYpFK1NXFhV\r
|
||||
kOXRmDf/wmfj+c/ld3L2PkvwlgofhCudOQknZbo3ub1gjiTn7L+lMGHIj/3suMIl\r
|
||||
ID4EUxAXScIM1ZEz2fjtW5jATlqYcLjLTbf/olw6HFyPNH+9IssqXeZNKnGwPUB9\r
|
||||
3lTXsg0tpzl+x7F/2WjEw1DSNhjC0KnHt1vEYNMkUGDGFdN9y3ERLqX/FIgiASUb\r
|
||||
bTvAVupnAK3raBezGmhrs6LsQtLS9P0VvQiLU3uDhMqw8Z4SISLpcD+NnVBHzQqm\r
|
||||
6W5Qn/8xsCL6av18yUVTi2G3igt3QCNoYx9evt2ZcIkNoyyagUVjfZe5GHXh8Dnz\r
|
||||
GaBXW/hg3HlXLRGaQu4RYCzBMJILcO25OhZOg6jbkCLiEexQlm2e9krB5cXR49Al\r
|
||||
UN4fiB0KR9JyG2ayUdNJVkXZSZLnHyRgiaadlpUo16LVvw==\r
|
||||
=b5Kp\r
|
||||
-----END PGP MESSAGE-----\r
|
||||
\r
|
||||
"""
|
||||
|
||||
assert check_armored_payload(payload) == True
|
||||
|
||||
payload = """-----BEGIN PGP MESSAGE-----\r
|
||||
\r
|
||||
HELLOWORLD
|
||||
-----END PGP MESSAGE-----\r
|
||||
\r
|
||||
"""
|
||||
assert check_armored_payload(payload) == False
|
||||
|
||||
payload = """-----BEGIN PGP MESSAGE-----\r
|
||||
\r
|
||||
=njUN
|
||||
-----END PGP MESSAGE-----\r
|
||||
\r
|
||||
"""
|
||||
assert check_armored_payload(payload) == False
|
||||
90
chatmaild/src/chatmaild/tests/test_filtermail_blackbox.py
Normal file
90
chatmaild/src/chatmaild/tests/test_filtermail_blackbox.py
Normal file
@@ -0,0 +1,90 @@
|
||||
import shutil
|
||||
import smtplib
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
|
||||
pytestmark = pytest.mark.skipif(
|
||||
shutil.which("filtermail") is None,
|
||||
reason="filtermail binary not found",
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def smtpserver():
|
||||
from pytest_localserver import smtp
|
||||
|
||||
server = smtp.Server("127.0.0.1")
|
||||
server.start()
|
||||
yield server
|
||||
server.stop()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def make_popen(request):
|
||||
def popen(cmdargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kw):
|
||||
p = subprocess.Popen(
|
||||
cmdargs,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
|
||||
def fin():
|
||||
p.terminate()
|
||||
out, err = p.communicate()
|
||||
print(out.decode("ascii"))
|
||||
print(err.decode("ascii"), file=sys.stderr)
|
||||
|
||||
request.addfinalizer(fin)
|
||||
return p
|
||||
|
||||
return popen
|
||||
|
||||
|
||||
@pytest.mark.parametrize("filtermail_mode", ["outgoing", "incoming"])
|
||||
def test_one_mail(
|
||||
make_config, make_popen, smtpserver, maildata, filtermail_mode, monkeypatch
|
||||
):
|
||||
monkeypatch.setenv("PYTHONUNBUFFERED", "1")
|
||||
# DKIM is tested by cmdeploy tests.
|
||||
monkeypatch.setenv("FILTERMAIL_SKIP_DKIM", "1")
|
||||
smtp_inject_port = 20025
|
||||
if filtermail_mode == "outgoing":
|
||||
settings = dict(
|
||||
postfix_reinject_port=smtpserver.port,
|
||||
filtermail_smtp_port=smtp_inject_port,
|
||||
)
|
||||
else:
|
||||
settings = dict(
|
||||
postfix_reinject_port_incoming=smtpserver.port,
|
||||
filtermail_smtp_port_incoming=smtp_inject_port,
|
||||
)
|
||||
|
||||
config = make_config("example.org", settings=settings)
|
||||
path = str(config._inipath)
|
||||
|
||||
popen = make_popen(["filtermail", path, filtermail_mode])
|
||||
line = popen.stderr.readline().strip()
|
||||
|
||||
# skip a warning that FILTERMAIL_SKIP_DKIM shouldn't be used in prod
|
||||
if b"DKIM verification DISABLED!" in line:
|
||||
line = popen.stderr.readline().strip()
|
||||
if b"loop" not in line:
|
||||
print(line.decode("ascii"), file=sys.stderr)
|
||||
pytest.fail("starting filtermail failed")
|
||||
|
||||
addr = f"user1@{config.mail_domain}"
|
||||
config.get_user(addr).set_password("l1k2j3l1k2j3l")
|
||||
|
||||
# send encrypted mail
|
||||
data = str(maildata("encrypted.eml", from_addr=addr, to_addr=addr))
|
||||
client = smtplib.SMTP("localhost", smtp_inject_port)
|
||||
client.sendmail(addr, [addr], data)
|
||||
assert len(smtpserver.outbox) == 1
|
||||
|
||||
# send un-encrypted mail that errors
|
||||
data = str(maildata("fake-encrypted.eml", from_addr=addr, to_addr=addr))
|
||||
with pytest.raises(smtplib.SMTPDataError) as e:
|
||||
client.sendmail(addr, [addr], data)
|
||||
assert e.value.smtp_code == 523
|
||||
@@ -36,29 +36,3 @@ def test_handle_dovecot_request_last_login(testaddr, example_config):
|
||||
res = dictproxy.handle_dovecot_request(msg, dictproxy_transactions)
|
||||
assert res == "O\n"
|
||||
assert len(dictproxy_transactions) == 0
|
||||
|
||||
|
||||
def test_handle_dovecot_request_last_login_echobot(example_config):
|
||||
dictproxy = LastLoginDictProxy(config=example_config)
|
||||
|
||||
authproxy = AuthDictProxy(config=example_config)
|
||||
testaddr = f"echo@{example_config.mail_domain}"
|
||||
authproxy.lookup_passdb(testaddr, "ignore")
|
||||
user = dictproxy.config.get_user(testaddr)
|
||||
|
||||
transactions = {}
|
||||
|
||||
# set last-login info for user
|
||||
tx = "1111"
|
||||
msg = f"B{tx}\t{testaddr}"
|
||||
res = dictproxy.handle_dovecot_request(msg, transactions)
|
||||
assert not res
|
||||
assert transactions == {tx: dict(addr=testaddr, res="O\n")}
|
||||
|
||||
timestamp = int(time.time())
|
||||
msg = f"S{tx}\tshared/last-login/{testaddr}\t{timestamp}"
|
||||
res = dictproxy.handle_dovecot_request(msg, transactions)
|
||||
assert not res
|
||||
assert len(transactions) == 1
|
||||
read_timestamp = user.get_last_login_timestamp()
|
||||
assert read_timestamp is None
|
||||
|
||||
@@ -242,6 +242,22 @@ def test_requeue_removes_tmp_files(notifier, metadata, testaddr, caplog):
|
||||
assert queue_item.addr == testaddr
|
||||
|
||||
|
||||
def test_requeue_removes_invalid_files(notifier, metadata, testaddr, caplog):
|
||||
metadata.add_token_to_addr(testaddr, "01234")
|
||||
notifier.new_message_for_addr(testaddr, metadata)
|
||||
# empty/invalid files should be ignored
|
||||
p = notifier.queue_dir.joinpath("1203981203")
|
||||
p.touch()
|
||||
notifier2 = notifier.__class__(notifier.queue_dir)
|
||||
notifier2.requeue_persistent_queue_items()
|
||||
assert "spurious" in caplog.records[0].msg
|
||||
assert not p.exists()
|
||||
assert notifier2.retry_queues[0].qsize() == 1
|
||||
when, queue_item = notifier2.retry_queues[0].get()
|
||||
assert when <= int(time.time())
|
||||
assert queue_item.addr == testaddr
|
||||
|
||||
|
||||
def test_start_and_stop_notification_threads(notifier, testaddr):
|
||||
threads = notifier.start_notification_threads(None)
|
||||
for retry_num, threadlist in threads.items():
|
||||
@@ -298,6 +314,51 @@ def test_persistent_queue_items(tmp_path, testaddr, token):
|
||||
assert not queue_item < item2 and not item2 < queue_item
|
||||
|
||||
|
||||
def test_turn_credentials_exception_returns_N(notifier, metadata, monkeypatch):
|
||||
"""Test that turn_credentials() failure returns N\\n instead of crashing."""
|
||||
import chatmaild.metadata
|
||||
|
||||
dictproxy = MetadataDictProxy(
|
||||
notifier=notifier,
|
||||
metadata=metadata,
|
||||
turn_hostname="turn.example.org",
|
||||
)
|
||||
|
||||
def mock_turn_credentials():
|
||||
raise ConnectionRefusedError("socket not available")
|
||||
|
||||
monkeypatch.setattr(chatmaild.metadata, "turn_credentials", mock_turn_credentials)
|
||||
|
||||
transactions = {}
|
||||
res = dictproxy.handle_dovecot_request(
|
||||
"Lshared/0123/vendor/vendor.dovecot/pvt/server/vendor/deltachat/turn"
|
||||
"\tuser@example.org",
|
||||
transactions,
|
||||
)
|
||||
assert res == "N\n"
|
||||
|
||||
|
||||
def test_turn_credentials_success(notifier, metadata, monkeypatch):
|
||||
"""Test that valid turn_credentials() returns TURN URI."""
|
||||
import chatmaild.metadata
|
||||
|
||||
dictproxy = MetadataDictProxy(
|
||||
notifier=notifier,
|
||||
metadata=metadata,
|
||||
turn_hostname="turn.example.org",
|
||||
)
|
||||
|
||||
monkeypatch.setattr(chatmaild.metadata, "turn_credentials", lambda: "user:pass")
|
||||
|
||||
transactions = {}
|
||||
res = dictproxy.handle_dovecot_request(
|
||||
"Lshared/0123/vendor/vendor.dovecot/pvt/server/vendor/deltachat/turn"
|
||||
"\tuser@example.org",
|
||||
transactions,
|
||||
)
|
||||
assert res == "Oturn.example.org:3478:user:pass\n"
|
||||
|
||||
|
||||
def test_iroh_relay(dictproxy):
|
||||
rfile = io.BytesIO(
|
||||
b"\n".join(
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
from chatmaild.metrics import main
|
||||
|
||||
|
||||
def test_main(tmp_path, capsys):
|
||||
for x in ("ci-asllkj", "ac_12l3kj", "qweqwe", "ci-l1k2j31l2k3"):
|
||||
tmp_path.joinpath(x).mkdir()
|
||||
main(tmp_path)
|
||||
out, _ = capsys.readouterr()
|
||||
d = {}
|
||||
for line in out.split("\n"):
|
||||
if line.strip() and not line.startswith("#"):
|
||||
name, num = line.split()
|
||||
d[name] = int(num)
|
||||
|
||||
assert d["accounts"] == 4
|
||||
assert d["ci_accounts"] == 3
|
||||
assert d["nonci_accounts"] == 1
|
||||
@@ -1,7 +1,11 @@
|
||||
import json
|
||||
|
||||
import chatmaild
|
||||
from chatmaild.newemail import create_newemail_dict, print_new_account
|
||||
from chatmaild.newemail import (
|
||||
create_dclogin_url,
|
||||
create_newemail_dict,
|
||||
print_new_account,
|
||||
)
|
||||
|
||||
|
||||
def test_create_newemail_dict(example_config):
|
||||
@@ -15,6 +19,18 @@ def test_create_newemail_dict(example_config):
|
||||
assert ac1["password"] != ac2["password"]
|
||||
|
||||
|
||||
def test_create_dclogin_url():
|
||||
url = create_dclogin_url("user@example.org", "p@ss w+rd")
|
||||
assert url.startswith("dclogin:")
|
||||
assert "v=1" in url
|
||||
assert "ic=3" in url
|
||||
|
||||
assert "user@example.org" in url
|
||||
# password special chars must be encoded
|
||||
assert "p%40ss" in url
|
||||
assert "w%2Brd" in url
|
||||
|
||||
|
||||
def test_print_new_account(capsys, monkeypatch, maildomain, tmpdir, example_config):
|
||||
monkeypatch.setattr(chatmaild.newemail, "CONFIG_PATH", str(example_config._inipath))
|
||||
print_new_account()
|
||||
@@ -25,3 +41,20 @@ def test_print_new_account(capsys, monkeypatch, maildomain, tmpdir, example_conf
|
||||
dic = json.loads(lines[2])
|
||||
assert dic["email"].endswith(f"@{example_config.mail_domain}")
|
||||
assert len(dic["password"]) >= 10
|
||||
# default tls_cert=acme should not include dclogin_url
|
||||
assert "dclogin_url" not in dic
|
||||
|
||||
|
||||
def test_print_new_account_self_signed(capsys, monkeypatch, make_config):
|
||||
config = make_config("_test.example.org")
|
||||
monkeypatch.setattr(chatmaild.newemail, "CONFIG_PATH", str(config._inipath))
|
||||
print_new_account()
|
||||
out, err = capsys.readouterr()
|
||||
lines = out.split("\n")
|
||||
dic = json.loads(lines[2])
|
||||
assert "dclogin_url" in dic
|
||||
url = dic["dclogin_url"]
|
||||
assert url.startswith("dclogin:")
|
||||
assert "ic=3" in url
|
||||
|
||||
assert dic["email"].split("@")[0] in url
|
||||
|
||||
73
chatmaild/src/chatmaild/tests/test_turnserver.py
Normal file
73
chatmaild/src/chatmaild/tests/test_turnserver.py
Normal file
@@ -0,0 +1,73 @@
|
||||
import socket
|
||||
import threading
|
||||
import time
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from chatmaild.turnserver import turn_credentials
|
||||
|
||||
SOCKET_PATH = "/run/chatmail-turn/turn.socket"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def turn_socket(tmp_path):
|
||||
"""Create a real Unix socket server at a temp path."""
|
||||
sock_path = str(tmp_path / "turn.socket")
|
||||
server = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
server.bind(sock_path)
|
||||
server.listen(1)
|
||||
yield sock_path, server
|
||||
server.close()
|
||||
|
||||
|
||||
def _call_turn_credentials(sock_path):
|
||||
"""Call turn_credentials but connect to sock_path instead of hardcoded path."""
|
||||
original_connect = socket.socket.connect
|
||||
|
||||
def patched_connect(self, address):
|
||||
if address == SOCKET_PATH:
|
||||
address = sock_path
|
||||
return original_connect(self, address)
|
||||
|
||||
with patch.object(socket.socket, "connect", patched_connect):
|
||||
return turn_credentials()
|
||||
|
||||
|
||||
def test_turn_credentials_timeout(turn_socket):
|
||||
"""Server accepts but never responds — must raise socket.timeout."""
|
||||
sock_path, server = turn_socket
|
||||
|
||||
def accept_and_hang():
|
||||
conn, _ = server.accept()
|
||||
time.sleep(30)
|
||||
conn.close()
|
||||
|
||||
t = threading.Thread(target=accept_and_hang, daemon=True)
|
||||
t.start()
|
||||
|
||||
with pytest.raises(socket.timeout):
|
||||
_call_turn_credentials(sock_path)
|
||||
|
||||
|
||||
def test_turn_credentials_connection_refused(tmp_path):
|
||||
"""Socket file doesn't exist — must raise ConnectionRefusedError or FileNotFoundError."""
|
||||
missing = str(tmp_path / "nonexistent.socket")
|
||||
with pytest.raises((ConnectionRefusedError, FileNotFoundError)):
|
||||
_call_turn_credentials(missing)
|
||||
|
||||
|
||||
def test_turn_credentials_success(turn_socket):
|
||||
"""Server responds with credentials — must return stripped string."""
|
||||
sock_path, server = turn_socket
|
||||
|
||||
def respond():
|
||||
conn, _ = server.accept()
|
||||
conn.sendall(b"testuser:testpass\n")
|
||||
conn.close()
|
||||
|
||||
t = threading.Thread(target=respond, daemon=True)
|
||||
t.start()
|
||||
|
||||
result = _call_turn_credentials(sock_path)
|
||||
assert result == "testuser:testpass"
|
||||
@@ -40,3 +40,17 @@ def test_no_mailboxes_dir(testaddr, example_config, tmp_path):
|
||||
user.set_password("someeqkjwelkqwjleqwe")
|
||||
user.set_last_login_timestamp(100000)
|
||||
assert user.get_last_login_timestamp() == 86400
|
||||
|
||||
|
||||
def test_set_get_cleartext_flag(testaddr, example_config, tmp_path):
|
||||
p = tmp_path.joinpath("a", "mailboxes")
|
||||
example_config.mailboxes_dir = p
|
||||
|
||||
user = example_config.get_user(testaddr)
|
||||
user.set_password("someeqkjwelkqwjleqwe")
|
||||
user.set_last_login_timestamp(100000)
|
||||
assert user.get_last_login_timestamp() == 86400
|
||||
|
||||
assert not user.is_incoming_cleartext_ok()
|
||||
user.allow_incoming_cleartext()
|
||||
assert user.is_incoming_cleartext_ok()
|
||||
|
||||
10
chatmaild/src/chatmaild/turnserver.py
Normal file
10
chatmaild/src/chatmaild/turnserver.py
Normal file
@@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env python3
|
||||
import socket
|
||||
|
||||
|
||||
def turn_credentials() -> str:
|
||||
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as client_socket:
|
||||
client_socket.settimeout(5)
|
||||
client_socket.connect("/run/chatmail-turn/turn.socket")
|
||||
with client_socket.makefile("rb") as file:
|
||||
return file.readline().decode("utf-8").strip()
|
||||
@@ -13,12 +13,13 @@ class User:
|
||||
self.maildir = maildir
|
||||
self.addr = addr
|
||||
self.password_path = password_path
|
||||
self.enforce_E2EE_path = maildir.joinpath("enforceE2EEincoming")
|
||||
self.uid = uid
|
||||
self.gid = gid
|
||||
|
||||
@property
|
||||
def can_track(self):
|
||||
return "@" in self.addr and not self.addr.startswith("echo@")
|
||||
return "@" in self.addr
|
||||
|
||||
def get_userdb_dict(self):
|
||||
"""Return a non-empty dovecot 'userdb' style dict
|
||||
@@ -35,6 +36,13 @@ class User:
|
||||
home = str(self.maildir)
|
||||
return dict(addr=self.addr, home=home, uid=self.uid, gid=self.gid, password=pw)
|
||||
|
||||
def is_incoming_cleartext_ok(self):
|
||||
return not self.enforce_E2EE_path.exists()
|
||||
|
||||
def allow_incoming_cleartext(self):
|
||||
if self.enforce_E2EE_path.exists():
|
||||
self.enforce_E2EE_path.unlink()
|
||||
|
||||
def set_password(self, enc_password):
|
||||
"""Set the specified password for this user.
|
||||
|
||||
@@ -47,9 +55,9 @@ class User:
|
||||
try:
|
||||
write_bytes_atomic(self.password_path, password)
|
||||
except PermissionError:
|
||||
if not self.addr.startswith("echo@"):
|
||||
logging.error(f"could not write password for: {self.addr}")
|
||||
raise
|
||||
logging.error(f"could not write password for: {self.addr}")
|
||||
raise
|
||||
self.enforce_E2EE_path.touch()
|
||||
|
||||
def set_last_login_timestamp(self, timestamp):
|
||||
"""Track login time with daily granularity
|
||||
|
||||
94
cliff.toml
Normal file
94
cliff.toml
Normal file
@@ -0,0 +1,94 @@
|
||||
# git-cliff ~ configuration file
|
||||
# https://git-cliff.org/docs/configuration
|
||||
|
||||
|
||||
[changelog]
|
||||
# A Tera template to be rendered for each release in the changelog.
|
||||
# See https://keats.github.io/tera/docs/#introduction
|
||||
body = """
|
||||
{% if version %}\
|
||||
## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }}
|
||||
{% else %}\
|
||||
## [unreleased]
|
||||
{% endif %}\
|
||||
{% for group, commits in commits | group_by(attribute="group") %}
|
||||
### {{ group | striptags | trim | upper_first }}
|
||||
{% for commit in commits %}
|
||||
- {% if commit.scope %}*({{ commit.scope }})* {% endif %}\
|
||||
{% if commit.breaking %}[**breaking**] {% endif %}\
|
||||
{{ commit.message | upper_first }}\
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
"""
|
||||
# Remove leading and trailing whitespaces from the changelog's body.
|
||||
trim = true
|
||||
# Render body even when there are no releases to process.
|
||||
render_always = true
|
||||
# An array of regex based postprocessors to modify the changelog.
|
||||
postprocessors = [
|
||||
# Replace the placeholder <REPO> with a URL.
|
||||
#{ pattern = '<REPO>', replace = "https://github.com/orhun/git-cliff" },
|
||||
]
|
||||
# render body even when there are no releases to process
|
||||
# render_always = true
|
||||
# output file path
|
||||
# output = "test.md"
|
||||
|
||||
[git]
|
||||
# Parse commits according to the conventional commits specification.
|
||||
# See https://www.conventionalcommits.org
|
||||
conventional_commits = true
|
||||
# Exclude commits that do not match the conventional commits specification.
|
||||
filter_unconventional = true
|
||||
# Require all commits to be conventional.
|
||||
# Takes precedence over filter_unconventional.
|
||||
require_conventional = false
|
||||
# Split commits on newlines, treating each line as an individual commit.
|
||||
split_commits = false
|
||||
# An array of regex based parsers to modify commit messages prior to further processing.
|
||||
commit_preprocessors = [
|
||||
# Replace issue numbers with link templates to be updated in `changelog.postprocessors`.
|
||||
#{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](<REPO>/issues/${2}))"},
|
||||
# Check spelling of the commit message using https://github.com/crate-ci/typos.
|
||||
# If the spelling is incorrect, it will be fixed automatically.
|
||||
#{ pattern = '.*', replace_command = 'typos --write-changes -' },
|
||||
]
|
||||
# Prevent commits that are breaking from being excluded by commit parsers.
|
||||
protect_breaking_commits = false
|
||||
# An array of regex based parsers for extracting data from the commit message.
|
||||
# Assigns commits to groups.
|
||||
# Optionally sets the commit's scope and can decide to exclude commits from further processing.
|
||||
commit_parsers = [
|
||||
{ message = "^feat", group = "Features" },
|
||||
{ message = "^fix", group = "Bug Fixes" },
|
||||
{ message = "^docs", group = "Documentation" },
|
||||
{ message = "^perf", group = "Performance" },
|
||||
{ message = "^refactor", group = "Refactor" },
|
||||
{ message = "^style", group = "Styling" },
|
||||
{ message = "^test", group = "Testing" },
|
||||
{ message = "^chore\\(release\\): prepare for", skip = true },
|
||||
{ message = "^chore\\(deps.*\\)", skip = true },
|
||||
{ message = "^chore\\(pr\\)", skip = true },
|
||||
{ message = "^chore\\(pull\\)", skip = true },
|
||||
{ message = "^chore|^ci", group = "Miscellaneous Tasks" },
|
||||
{ body = ".*security", group = "Security" },
|
||||
{ message = "^revert", group = "Revert" },
|
||||
{ message = ".*", group = "Other" },
|
||||
]
|
||||
# Exclude commits that are not matched by any commit parser.
|
||||
filter_commits = false
|
||||
# Fail on a commit that is not matched by any commit parser.
|
||||
fail_on_unmatched_commit = false
|
||||
# An array of link parsers for extracting external references, and turning them into URLs, using regex.
|
||||
link_parsers = []
|
||||
# Include only the tags that belong to the current branch.
|
||||
use_branch_tags = false
|
||||
# Order releases topologically instead of chronologically.
|
||||
topo_order = false
|
||||
# Order commits topologically instead of chronologically.
|
||||
topo_order_commits = true
|
||||
# Order of commits in each group/release within the changelog.
|
||||
# Allowed values: newest, oldest
|
||||
sort_commits = "oldest"
|
||||
# Process submodules commits
|
||||
recurse_submodules = false
|
||||
@@ -20,6 +20,7 @@ dependencies = [
|
||||
"pytest-xdist",
|
||||
"execnet",
|
||||
"imap_tools",
|
||||
"deltachat-rpc-client",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
@@ -41,3 +42,6 @@ lint.select = [
|
||||
"PLE", # Pylint Error
|
||||
"PLW", # Pylint Warning
|
||||
]
|
||||
lint.ignore = [
|
||||
"PLC0415" # import-outside-top-level
|
||||
]
|
||||
|
||||
@@ -1,744 +0,0 @@
|
||||
"""
|
||||
Chat Mail pyinfra deploy.
|
||||
"""
|
||||
|
||||
import importlib.resources
|
||||
import io
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from chatmaild.config import Config, read_config
|
||||
from pyinfra import facts, host
|
||||
from pyinfra.facts.files import File
|
||||
from pyinfra.facts.systemd import SystemdEnabled
|
||||
from pyinfra.operations import apt, files, pip, server, systemd
|
||||
|
||||
from .acmetool import deploy_acmetool
|
||||
|
||||
|
||||
def _build_chatmaild(dist_dir) -> None:
|
||||
dist_dir = Path(dist_dir).resolve()
|
||||
if dist_dir.exists():
|
||||
shutil.rmtree(dist_dir)
|
||||
dist_dir.mkdir()
|
||||
subprocess.check_output(
|
||||
[sys.executable, "-m", "build", "-n"]
|
||||
+ ["--sdist", "chatmaild", "--outdir", str(dist_dir)]
|
||||
)
|
||||
entries = list(dist_dir.iterdir())
|
||||
assert len(entries) == 1
|
||||
return entries[0]
|
||||
|
||||
|
||||
def remove_legacy_artifacts():
|
||||
# disable legacy doveauth-dictproxy.service
|
||||
if host.get_fact(SystemdEnabled).get("doveauth-dictproxy.service"):
|
||||
systemd.service(
|
||||
name="Disable legacy doveauth-dictproxy.service",
|
||||
service="doveauth-dictproxy.service",
|
||||
running=False,
|
||||
enabled=False,
|
||||
)
|
||||
|
||||
|
||||
def _install_remote_venv_with_chatmaild(config) -> None:
|
||||
remove_legacy_artifacts()
|
||||
dist_file = _build_chatmaild(dist_dir=Path("chatmaild/dist"))
|
||||
remote_base_dir = "/usr/local/lib/chatmaild"
|
||||
remote_dist_file = f"{remote_base_dir}/dist/{dist_file.name}"
|
||||
remote_venv_dir = f"{remote_base_dir}/venv"
|
||||
remote_chatmail_inipath = f"{remote_base_dir}/chatmail.ini"
|
||||
root_owned = dict(user="root", group="root", mode="644")
|
||||
|
||||
apt.packages(
|
||||
name="apt install python3-virtualenv",
|
||||
packages=["python3-virtualenv"],
|
||||
)
|
||||
|
||||
files.put(
|
||||
name="Upload chatmaild source package",
|
||||
src=dist_file.open("rb"),
|
||||
dest=remote_dist_file,
|
||||
create_remote_dir=True,
|
||||
**root_owned,
|
||||
)
|
||||
|
||||
files.put(
|
||||
name=f"Upload {remote_chatmail_inipath}",
|
||||
src=config._getbytefile(),
|
||||
dest=remote_chatmail_inipath,
|
||||
**root_owned,
|
||||
)
|
||||
|
||||
pip.virtualenv(
|
||||
name=f"chatmaild virtualenv {remote_venv_dir}",
|
||||
path=remote_venv_dir,
|
||||
always_copy=True,
|
||||
)
|
||||
|
||||
apt.packages(
|
||||
name="install python3-dev to build crypt_r source package",
|
||||
packages=["python3-dev"],
|
||||
)
|
||||
|
||||
server.shell(
|
||||
name=f"forced pip-install {dist_file.name}",
|
||||
commands=[
|
||||
f"{remote_venv_dir}/bin/pip install --force-reinstall {remote_dist_file}"
|
||||
],
|
||||
)
|
||||
|
||||
files.template(
|
||||
src=importlib.resources.files(__package__).joinpath("metrics.cron.j2"),
|
||||
dest="/etc/cron.d/chatmail-metrics",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
config={
|
||||
"mailboxes_dir": config.mailboxes_dir,
|
||||
"execpath": f"{remote_venv_dir}/bin/chatmail-metrics",
|
||||
},
|
||||
)
|
||||
|
||||
# install systemd units
|
||||
for fn in (
|
||||
"doveauth",
|
||||
"filtermail",
|
||||
"echobot",
|
||||
"chatmail-metadata",
|
||||
"lastlogin",
|
||||
):
|
||||
params = dict(
|
||||
execpath=f"{remote_venv_dir}/bin/{fn}",
|
||||
config_path=remote_chatmail_inipath,
|
||||
remote_venv_dir=remote_venv_dir,
|
||||
mail_domain=config.mail_domain,
|
||||
)
|
||||
source_path = importlib.resources.files(__package__).joinpath(
|
||||
"service", f"{fn}.service.f"
|
||||
)
|
||||
content = source_path.read_text().format(**params).encode()
|
||||
|
||||
files.put(
|
||||
name=f"Upload {fn}.service",
|
||||
src=io.BytesIO(content),
|
||||
dest=f"/etc/systemd/system/{fn}.service",
|
||||
**root_owned,
|
||||
)
|
||||
systemd.service(
|
||||
name=f"Setup {fn} service",
|
||||
service=f"{fn}.service",
|
||||
running=True,
|
||||
enabled=True,
|
||||
restarted=True,
|
||||
daemon_reload=True,
|
||||
)
|
||||
|
||||
|
||||
def _configure_opendkim(domain: str, dkim_selector: str = "dkim") -> bool:
|
||||
"""Configures OpenDKIM"""
|
||||
need_restart = False
|
||||
|
||||
main_config = files.template(
|
||||
src=importlib.resources.files(__package__).joinpath("opendkim/opendkim.conf"),
|
||||
dest="/etc/opendkim.conf",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
config={"domain_name": domain, "opendkim_selector": dkim_selector},
|
||||
)
|
||||
need_restart |= main_config.changed
|
||||
|
||||
screen_script = files.put(
|
||||
src=importlib.resources.files(__package__).joinpath("opendkim/screen.lua"),
|
||||
dest="/etc/opendkim/screen.lua",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
need_restart |= screen_script.changed
|
||||
|
||||
final_script = files.put(
|
||||
src=importlib.resources.files(__package__).joinpath("opendkim/final.lua"),
|
||||
dest="/etc/opendkim/final.lua",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
need_restart |= final_script.changed
|
||||
|
||||
files.directory(
|
||||
name="Add opendkim directory to /etc",
|
||||
path="/etc/opendkim",
|
||||
user="opendkim",
|
||||
group="opendkim",
|
||||
mode="750",
|
||||
present=True,
|
||||
)
|
||||
|
||||
keytable = files.template(
|
||||
src=importlib.resources.files(__package__).joinpath("opendkim/KeyTable"),
|
||||
dest="/etc/dkimkeys/KeyTable",
|
||||
user="opendkim",
|
||||
group="opendkim",
|
||||
mode="644",
|
||||
config={"domain_name": domain, "opendkim_selector": dkim_selector},
|
||||
)
|
||||
need_restart |= keytable.changed
|
||||
|
||||
signing_table = files.template(
|
||||
src=importlib.resources.files(__package__).joinpath("opendkim/SigningTable"),
|
||||
dest="/etc/dkimkeys/SigningTable",
|
||||
user="opendkim",
|
||||
group="opendkim",
|
||||
mode="644",
|
||||
config={"domain_name": domain, "opendkim_selector": dkim_selector},
|
||||
)
|
||||
need_restart |= signing_table.changed
|
||||
files.directory(
|
||||
name="Add opendkim socket directory to /var/spool/postfix",
|
||||
path="/var/spool/postfix/opendkim",
|
||||
user="opendkim",
|
||||
group="opendkim",
|
||||
mode="750",
|
||||
present=True,
|
||||
)
|
||||
|
||||
apt.packages(
|
||||
name="apt install opendkim opendkim-tools",
|
||||
packages=["opendkim", "opendkim-tools"],
|
||||
)
|
||||
|
||||
if not host.get_fact(File, f"/etc/dkimkeys/{dkim_selector}.private"):
|
||||
server.shell(
|
||||
name="Generate OpenDKIM domain keys",
|
||||
commands=[
|
||||
f"opendkim-genkey -D /etc/dkimkeys -d {domain} -s {dkim_selector}"
|
||||
],
|
||||
_sudo=True,
|
||||
_sudo_user="opendkim",
|
||||
)
|
||||
|
||||
return need_restart
|
||||
|
||||
|
||||
def _install_mta_sts_daemon() -> bool:
|
||||
need_restart = False
|
||||
|
||||
config = files.put(
|
||||
name="upload postfix-mta-sts-resolver config",
|
||||
src=importlib.resources.files(__package__).joinpath(
|
||||
"postfix/mta-sts-daemon.yml"
|
||||
),
|
||||
dest="/etc/mta-sts-daemon.yml",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
need_restart |= config.changed
|
||||
|
||||
server.shell(
|
||||
name="install postfix-mta-sts-resolver with pip",
|
||||
commands=[
|
||||
"python3 -m virtualenv /usr/local/lib/postfix-mta-sts-resolver",
|
||||
"/usr/local/lib/postfix-mta-sts-resolver/bin/pip install postfix-mta-sts-resolver",
|
||||
],
|
||||
)
|
||||
|
||||
systemd_unit = files.put(
|
||||
name="upload mta-sts-daemon systemd unit",
|
||||
src=importlib.resources.files(__package__).joinpath(
|
||||
"postfix/mta-sts-daemon.service"
|
||||
),
|
||||
dest="/etc/systemd/system/mta-sts-daemon.service",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
need_restart |= systemd_unit.changed
|
||||
|
||||
return need_restart
|
||||
|
||||
|
||||
def _configure_postfix(config: Config, debug: bool = False) -> bool:
|
||||
"""Configures Postfix SMTP server."""
|
||||
need_restart = False
|
||||
|
||||
main_config = files.template(
|
||||
src=importlib.resources.files(__package__).joinpath("postfix/main.cf.j2"),
|
||||
dest="/etc/postfix/main.cf",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
config=config,
|
||||
disable_ipv6=config.disable_ipv6,
|
||||
)
|
||||
need_restart |= main_config.changed
|
||||
|
||||
master_config = files.template(
|
||||
src=importlib.resources.files(__package__).joinpath("postfix/master.cf.j2"),
|
||||
dest="/etc/postfix/master.cf",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
debug=debug,
|
||||
config=config,
|
||||
)
|
||||
need_restart |= master_config.changed
|
||||
|
||||
header_cleanup = files.put(
|
||||
src=importlib.resources.files(__package__).joinpath(
|
||||
"postfix/submission_header_cleanup"
|
||||
),
|
||||
dest="/etc/postfix/submission_header_cleanup",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
need_restart |= header_cleanup.changed
|
||||
|
||||
# Login map that 1:1 maps email address to login.
|
||||
login_map = files.put(
|
||||
src=importlib.resources.files(__package__).joinpath("postfix/login_map"),
|
||||
dest="/etc/postfix/login_map",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
need_restart |= login_map.changed
|
||||
|
||||
return need_restart
|
||||
|
||||
|
||||
def _configure_dovecot(config: Config, debug: bool = False) -> bool:
|
||||
"""Configures Dovecot IMAP server."""
|
||||
need_restart = False
|
||||
|
||||
main_config = files.template(
|
||||
src=importlib.resources.files(__package__).joinpath("dovecot/dovecot.conf.j2"),
|
||||
dest="/etc/dovecot/dovecot.conf",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
config=config,
|
||||
debug=debug,
|
||||
disable_ipv6=config.disable_ipv6,
|
||||
)
|
||||
need_restart |= main_config.changed
|
||||
auth_config = files.put(
|
||||
src=importlib.resources.files(__package__).joinpath("dovecot/auth.conf"),
|
||||
dest="/etc/dovecot/auth.conf",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
need_restart |= auth_config.changed
|
||||
lua_push_notification_script = files.put(
|
||||
src=importlib.resources.files(__package__).joinpath(
|
||||
"dovecot/push_notification.lua"
|
||||
),
|
||||
dest="/etc/dovecot/push_notification.lua",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
need_restart |= lua_push_notification_script.changed
|
||||
|
||||
files.template(
|
||||
src=importlib.resources.files(__package__).joinpath("dovecot/expunge.cron.j2"),
|
||||
dest="/etc/cron.d/expunge",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
config=config,
|
||||
)
|
||||
|
||||
# as per https://doc.dovecot.org/configuration_manual/os/
|
||||
# it is recommended to set the following inotify limits
|
||||
for name in ("max_user_instances", "max_user_watches"):
|
||||
key = f"fs.inotify.{name}"
|
||||
server.sysctl(
|
||||
name=f"Change {key}",
|
||||
key=key,
|
||||
value=65535,
|
||||
persist=True,
|
||||
)
|
||||
|
||||
return need_restart
|
||||
|
||||
|
||||
def _configure_nginx(config: Config, debug: bool = False) -> bool:
|
||||
"""Configures nginx HTTP server."""
|
||||
need_restart = False
|
||||
|
||||
main_config = files.template(
|
||||
src=importlib.resources.files(__package__).joinpath("nginx/nginx.conf.j2"),
|
||||
dest="/etc/nginx/nginx.conf",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
config={"domain_name": config.mail_domain},
|
||||
disable_ipv6=config.disable_ipv6,
|
||||
)
|
||||
need_restart |= main_config.changed
|
||||
|
||||
autoconfig = files.template(
|
||||
src=importlib.resources.files(__package__).joinpath("nginx/autoconfig.xml.j2"),
|
||||
dest="/var/www/html/.well-known/autoconfig/mail/config-v1.1.xml",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
config={"domain_name": config.mail_domain},
|
||||
)
|
||||
need_restart |= autoconfig.changed
|
||||
|
||||
mta_sts_config = files.template(
|
||||
src=importlib.resources.files(__package__).joinpath("nginx/mta-sts.txt.j2"),
|
||||
dest="/var/www/html/.well-known/mta-sts.txt",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
config={"domain_name": config.mail_domain},
|
||||
)
|
||||
need_restart |= mta_sts_config.changed
|
||||
|
||||
# install CGI newemail script
|
||||
#
|
||||
cgi_dir = "/usr/lib/cgi-bin"
|
||||
files.directory(
|
||||
name=f"Ensure {cgi_dir} exists",
|
||||
path=cgi_dir,
|
||||
user="root",
|
||||
group="root",
|
||||
)
|
||||
|
||||
files.put(
|
||||
name="Upload cgi newemail.py script",
|
||||
src=importlib.resources.files("chatmaild").joinpath("newemail.py").open("rb"),
|
||||
dest=f"{cgi_dir}/newemail.py",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="755",
|
||||
)
|
||||
|
||||
return need_restart
|
||||
|
||||
|
||||
def _remove_rspamd() -> None:
|
||||
"""Remove rspamd"""
|
||||
apt.packages(name="Remove rspamd", packages="rspamd", present=False)
|
||||
|
||||
|
||||
def check_config(config):
|
||||
mail_domain = config.mail_domain
|
||||
if mail_domain != "testrun.org" and not mail_domain.endswith(".testrun.org"):
|
||||
blocked_words = "merlinux schmieder testrun.org".split()
|
||||
for key in config.__dict__:
|
||||
value = config.__dict__[key]
|
||||
if key.startswith("privacy") and any(
|
||||
x in str(value) for x in blocked_words
|
||||
):
|
||||
raise ValueError(
|
||||
f"please set your own privacy contacts/addresses in {config._inipath}"
|
||||
)
|
||||
return config
|
||||
|
||||
|
||||
def deploy_mtail(config):
|
||||
apt.packages(
|
||||
name="Install mtail",
|
||||
packages=["mtail"],
|
||||
)
|
||||
|
||||
# Using our own systemd unit instead of `/usr/lib/systemd/system/mtail.service`.
|
||||
# This allows to read from journalctl instead of log files.
|
||||
files.template(
|
||||
src=importlib.resources.files(__package__).joinpath("mtail/mtail.service.j2"),
|
||||
dest="/etc/systemd/system/mtail.service",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
address=config.mtail_address or "127.0.0.1",
|
||||
port=3903,
|
||||
)
|
||||
|
||||
mtail_conf = files.put(
|
||||
name="Mtail configuration",
|
||||
src=importlib.resources.files(__package__).joinpath(
|
||||
"mtail/delivered_mail.mtail"
|
||||
),
|
||||
dest="/etc/mtail/delivered_mail.mtail",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
|
||||
systemd.service(
|
||||
name="Start and enable mtail",
|
||||
service="mtail.service",
|
||||
running=bool(config.mtail_address),
|
||||
enabled=bool(config.mtail_address),
|
||||
restarted=mtail_conf.changed,
|
||||
)
|
||||
|
||||
|
||||
def deploy_iroh_relay(config) -> None:
|
||||
(url, sha256sum) = {
|
||||
"x86_64": (
|
||||
"https://github.com/n0-computer/iroh/releases/download/v0.28.1/iroh-relay-v0.28.1-x86_64-unknown-linux-musl.tar.gz",
|
||||
"2ffacf7c0622c26b67a5895ee8e07388769599f60e5f52a3bd40a3258db89b2c",
|
||||
),
|
||||
"aarch64": (
|
||||
"https://github.com/n0-computer/iroh/releases/download/v0.28.1/iroh-relay-v0.28.1-aarch64-unknown-linux-musl.tar.gz",
|
||||
"b915037bcc1ff1110cc9fcb5de4a17c00ff576fd2f568cd339b3b2d54c420dc4",
|
||||
),
|
||||
}[host.get_fact(facts.server.Arch)]
|
||||
|
||||
apt.packages(
|
||||
name="Install curl",
|
||||
packages=["curl"],
|
||||
)
|
||||
|
||||
server.shell(
|
||||
name="Download iroh-relay",
|
||||
commands=[
|
||||
f"(echo '{sha256sum} /usr/local/bin/iroh-relay' | sha256sum -c) || (curl -L {url} | gunzip | tar -x -f - ./iroh-relay -O >/usr/local/bin/iroh-relay.new && mv /usr/local/bin/iroh-relay.new /usr/local/bin/iroh-relay)",
|
||||
"chmod 755 /usr/local/bin/iroh-relay",
|
||||
],
|
||||
)
|
||||
|
||||
need_restart = False
|
||||
|
||||
systemd_unit = files.put(
|
||||
name="Upload iroh-relay systemd unit",
|
||||
src=importlib.resources.files(__package__).joinpath("iroh-relay.service"),
|
||||
dest="/etc/systemd/system/iroh-relay.service",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
need_restart |= systemd_unit.changed
|
||||
|
||||
iroh_config = files.put(
|
||||
name="Upload iroh-relay config",
|
||||
src=importlib.resources.files(__package__).joinpath("iroh-relay.toml"),
|
||||
dest="/etc/iroh-relay.toml",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
need_restart |= iroh_config.changed
|
||||
|
||||
systemd.service(
|
||||
name="Start and enable iroh-relay",
|
||||
service="iroh-relay.service",
|
||||
running=True,
|
||||
enabled=config.enable_iroh_relay,
|
||||
restarted=need_restart,
|
||||
)
|
||||
|
||||
|
||||
def deploy_chatmail(config_path: Path, disable_mail: bool) -> None:
|
||||
"""Deploy a chat-mail instance.
|
||||
|
||||
:param config_path: path to chatmail.ini
|
||||
:param disable_mail: whether to disable postfix & dovecot
|
||||
"""
|
||||
config = read_config(config_path)
|
||||
check_config(config)
|
||||
mail_domain = config.mail_domain
|
||||
|
||||
from .www import build_webpages
|
||||
|
||||
server.group(name="Create vmail group", group="vmail", system=True)
|
||||
server.user(name="Create vmail user", user="vmail", group="vmail", system=True)
|
||||
server.user(name="Create filtermail user", user="filtermail", system=True)
|
||||
server.group(name="Create opendkim group", group="opendkim", system=True)
|
||||
server.user(
|
||||
name="Create opendkim user",
|
||||
user="opendkim",
|
||||
groups=["opendkim"],
|
||||
system=True,
|
||||
)
|
||||
server.user(
|
||||
name="Add postfix user to opendkim group for socket access",
|
||||
user="postfix",
|
||||
groups=["opendkim"],
|
||||
system=True,
|
||||
)
|
||||
server.user(name="Create echobot user", user="echobot", system=True)
|
||||
server.user(name="Create iroh user", user="iroh", system=True)
|
||||
|
||||
# Add our OBS repository for dovecot_no_delay
|
||||
files.put(
|
||||
name="Add Deltachat OBS GPG key to apt keyring",
|
||||
src=importlib.resources.files(__package__).joinpath("obs-home-deltachat.gpg"),
|
||||
dest="/etc/apt/keyrings/obs-home-deltachat.gpg",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
|
||||
files.line(
|
||||
name="Add DeltaChat OBS home repository to sources.list",
|
||||
path="/etc/apt/sources.list",
|
||||
line="deb [signed-by=/etc/apt/keyrings/obs-home-deltachat.gpg] https://download.opensuse.org/repositories/home:/deltachat/Debian_12/ ./",
|
||||
escape_regex_characters=True,
|
||||
ensure_newline=True,
|
||||
)
|
||||
|
||||
apt.update(name="apt update", cache_time=24 * 3600)
|
||||
apt.upgrade(name="upgrade apt packages", auto_remove=True)
|
||||
|
||||
apt.packages(
|
||||
name="Install rsync",
|
||||
packages=["rsync"],
|
||||
)
|
||||
|
||||
# Run local DNS resolver `unbound`.
|
||||
# `resolvconf` takes care of setting up /etc/resolv.conf
|
||||
# to use 127.0.0.1 as the resolver.
|
||||
apt.packages(
|
||||
name="Install unbound",
|
||||
packages=["unbound", "unbound-anchor", "dnsutils"],
|
||||
)
|
||||
server.shell(
|
||||
name="Generate root keys for validating DNSSEC",
|
||||
commands=[
|
||||
"unbound-anchor -a /var/lib/unbound/root.key || true",
|
||||
"systemctl reset-failed unbound.service",
|
||||
],
|
||||
)
|
||||
systemd.service(
|
||||
name="Start and enable unbound",
|
||||
service="unbound.service",
|
||||
running=True,
|
||||
enabled=True,
|
||||
)
|
||||
|
||||
deploy_iroh_relay(config)
|
||||
|
||||
# Deploy acmetool to have TLS certificates.
|
||||
tls_domains = [mail_domain, f"mta-sts.{mail_domain}", f"www.{mail_domain}"]
|
||||
deploy_acmetool(
|
||||
domains=tls_domains,
|
||||
)
|
||||
|
||||
apt.packages(
|
||||
# required for setfacl for echobot
|
||||
name="Install acl",
|
||||
packages="acl",
|
||||
)
|
||||
|
||||
apt.packages(
|
||||
name="Install Postfix",
|
||||
packages="postfix",
|
||||
)
|
||||
|
||||
apt.packages(
|
||||
name="Install Dovecot",
|
||||
packages=["dovecot-imapd", "dovecot-lmtpd"],
|
||||
)
|
||||
|
||||
apt.packages(
|
||||
name="Install nginx",
|
||||
packages=["nginx", "libnginx-mod-stream"],
|
||||
)
|
||||
|
||||
apt.packages(
|
||||
name="Install fcgiwrap",
|
||||
packages=["fcgiwrap"],
|
||||
)
|
||||
|
||||
www_path = importlib.resources.files(__package__).joinpath("../../../www").resolve()
|
||||
|
||||
build_dir = www_path.joinpath("build")
|
||||
src_dir = www_path.joinpath("src")
|
||||
build_webpages(src_dir, build_dir, config)
|
||||
files.rsync(f"{build_dir}/", "/var/www/html", flags=["-avz"])
|
||||
|
||||
_install_remote_venv_with_chatmaild(config)
|
||||
debug = False
|
||||
dovecot_need_restart = _configure_dovecot(config, debug=debug)
|
||||
postfix_need_restart = _configure_postfix(config, debug=debug)
|
||||
mta_sts_need_restart = _install_mta_sts_daemon()
|
||||
nginx_need_restart = _configure_nginx(config)
|
||||
|
||||
_remove_rspamd()
|
||||
opendkim_need_restart = _configure_opendkim(mail_domain, "opendkim")
|
||||
|
||||
systemd.service(
|
||||
name="Start and enable OpenDKIM",
|
||||
service="opendkim.service",
|
||||
running=True,
|
||||
enabled=True,
|
||||
restarted=opendkim_need_restart,
|
||||
)
|
||||
|
||||
systemd.service(
|
||||
name="Start and enable MTA-STS daemon",
|
||||
service="mta-sts-daemon.service",
|
||||
daemon_reload=True,
|
||||
running=True,
|
||||
enabled=True,
|
||||
restarted=mta_sts_need_restart,
|
||||
)
|
||||
|
||||
# Dovecot should be started before Postfix
|
||||
# because it creates authentication socket
|
||||
# required by Postfix.
|
||||
systemd.service(
|
||||
name="disable dovecot for now" if disable_mail else "Start and enable Dovecot",
|
||||
service="dovecot.service",
|
||||
running=False if disable_mail else True,
|
||||
enabled=False if disable_mail else True,
|
||||
restarted=dovecot_need_restart if not disable_mail else False,
|
||||
)
|
||||
|
||||
systemd.service(
|
||||
name="disable postfix for now" if disable_mail else "Start and enable Postfix",
|
||||
service="postfix.service",
|
||||
running=False if disable_mail else True,
|
||||
enabled=False if disable_mail else True,
|
||||
restarted=postfix_need_restart if not disable_mail else False,
|
||||
)
|
||||
|
||||
systemd.service(
|
||||
name="Start and enable nginx",
|
||||
service="nginx.service",
|
||||
running=True,
|
||||
enabled=True,
|
||||
restarted=nginx_need_restart,
|
||||
)
|
||||
|
||||
# This file is used by auth proxy.
|
||||
# https://wiki.debian.org/EtcMailName
|
||||
server.shell(
|
||||
name="Setup /etc/mailname",
|
||||
commands=[f"echo {mail_domain} >/etc/mailname; chmod 644 /etc/mailname"],
|
||||
)
|
||||
|
||||
journald_conf = files.put(
|
||||
name="Configure journald",
|
||||
src=importlib.resources.files(__package__).joinpath("journald.conf"),
|
||||
dest="/etc/systemd/journald.conf",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
systemd.service(
|
||||
name="Start and enable journald",
|
||||
service="systemd-journald.service",
|
||||
running=True,
|
||||
enabled=True,
|
||||
restarted=journald_conf.changed,
|
||||
)
|
||||
|
||||
apt.packages(
|
||||
name="Ensure cron is installed",
|
||||
packages=["cron"],
|
||||
)
|
||||
|
||||
deploy_mtail(config)
|
||||
@@ -1,75 +1,141 @@
|
||||
import importlib.resources
|
||||
|
||||
from pyinfra import host
|
||||
from pyinfra.facts.systemd import SystemdStatus
|
||||
from pyinfra.operations import apt, files, server, systemd
|
||||
|
||||
from ..basedeploy import Deployer
|
||||
|
||||
def deploy_acmetool(email="", domains=[]):
|
||||
"""Deploy acmetool."""
|
||||
apt.packages(
|
||||
name="Install acmetool",
|
||||
packages=["acmetool"],
|
||||
)
|
||||
|
||||
files.put(
|
||||
src=importlib.resources.files(__package__).joinpath("acmetool.cron").open("rb"),
|
||||
dest="/etc/cron.d/acmetool",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
class AcmetoolDeployer(Deployer):
|
||||
def __init__(self, email, domains):
|
||||
self.domains = domains
|
||||
self.email = email
|
||||
self.need_restart_redirector = False
|
||||
self.need_restart_reconcile_service = False
|
||||
self.need_restart_reconcile_timer = False
|
||||
|
||||
files.put(
|
||||
src=importlib.resources.files(__package__).joinpath("acmetool.hook").open("rb"),
|
||||
dest="/usr/lib/acme/hooks/nginx",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="744",
|
||||
)
|
||||
|
||||
files.template(
|
||||
src=importlib.resources.files(__package__).joinpath("response-file.yaml.j2"),
|
||||
dest="/var/lib/acme/conf/responses",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
email=email,
|
||||
)
|
||||
|
||||
files.template(
|
||||
src=importlib.resources.files(__package__).joinpath("target.yaml.j2"),
|
||||
dest="/var/lib/acme/conf/target",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
|
||||
service_file = files.put(
|
||||
src=importlib.resources.files(__package__).joinpath(
|
||||
"acmetool-redirector.service"
|
||||
),
|
||||
dest="/etc/systemd/system/acmetool-redirector.service",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
if host.get_fact(SystemdStatus).get("nginx.service"):
|
||||
systemd.service(
|
||||
name="Stop nginx service to free port 80",
|
||||
service="nginx",
|
||||
running=False,
|
||||
def install(self):
|
||||
apt.packages(
|
||||
name="Install acmetool",
|
||||
packages=["acmetool"],
|
||||
)
|
||||
|
||||
systemd.service(
|
||||
name="Setup acmetool-redirector service",
|
||||
service="acmetool-redirector.service",
|
||||
running=True,
|
||||
enabled=True,
|
||||
restarted=service_file.changed,
|
||||
)
|
||||
files.file(
|
||||
name="Remove old acmetool cronjob, it is replaced with systemd timer.",
|
||||
path="/etc/cron.d/acmetool",
|
||||
present=False,
|
||||
)
|
||||
|
||||
server.shell(
|
||||
name=f"Request certificate for: {', '.join(domains)}",
|
||||
commands=[f"acmetool want --xlog.severity=debug {' '.join(domains)}"],
|
||||
)
|
||||
files.put(
|
||||
name="Install acmetool hook.",
|
||||
src=importlib.resources.files(__package__)
|
||||
.joinpath("acmetool.hook")
|
||||
.open("rb"),
|
||||
dest="/etc/acme/hooks/nginx",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="755",
|
||||
)
|
||||
files.file(
|
||||
name="Remove acmetool hook from the wrong location where it was previously installed.",
|
||||
path="/usr/lib/acme/hooks/nginx",
|
||||
present=False,
|
||||
)
|
||||
|
||||
def configure(self):
|
||||
files.template(
|
||||
src=importlib.resources.files(__package__).joinpath(
|
||||
"response-file.yaml.j2"
|
||||
),
|
||||
dest="/var/lib/acme/conf/responses",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
email=self.email,
|
||||
)
|
||||
|
||||
files.template(
|
||||
src=importlib.resources.files(__package__).joinpath("target.yaml.j2"),
|
||||
dest="/var/lib/acme/conf/target",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
|
||||
server.shell(
|
||||
name=f"Remove old acmetool desired files for {self.domains[0]}",
|
||||
commands=[f"rm -f /var/lib/acme/desired/{self.domains[0]}-*"],
|
||||
)
|
||||
files.template(
|
||||
src=importlib.resources.files(__package__).joinpath("desired.yaml.j2"),
|
||||
dest=f"/var/lib/acme/desired/{self.domains[0]}", # 0 is mailhost TLD
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
domains=self.domains,
|
||||
)
|
||||
|
||||
service_file = files.put(
|
||||
src=importlib.resources.files(__package__).joinpath(
|
||||
"acmetool-redirector.service"
|
||||
),
|
||||
dest="/etc/systemd/system/acmetool-redirector.service",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
self.need_restart_redirector = service_file.changed
|
||||
|
||||
reconcile_service_file = files.put(
|
||||
src=importlib.resources.files(__package__).joinpath(
|
||||
"acmetool-reconcile.service"
|
||||
),
|
||||
dest="/etc/systemd/system/acmetool-reconcile.service",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
self.need_restart_reconcile_service = reconcile_service_file.changed
|
||||
|
||||
reconcile_timer_file = files.put(
|
||||
src=importlib.resources.files(__package__).joinpath(
|
||||
"acmetool-reconcile.timer"
|
||||
),
|
||||
dest="/etc/systemd/system/acmetool-reconcile.timer",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
self.need_restart_reconcile_timer = reconcile_timer_file.changed
|
||||
|
||||
def activate(self):
|
||||
systemd.service(
|
||||
name="Setup acmetool-redirector service",
|
||||
service="acmetool-redirector.service",
|
||||
running=True,
|
||||
enabled=True,
|
||||
restarted=self.need_restart_redirector,
|
||||
)
|
||||
self.need_restart_redirector = False
|
||||
|
||||
systemd.service(
|
||||
name="Setup acmetool-reconcile service",
|
||||
service="acmetool-reconcile.service",
|
||||
running=False,
|
||||
enabled=False,
|
||||
daemon_reload=self.need_restart_reconcile_service,
|
||||
)
|
||||
self.need_restart_reconcile_service = False
|
||||
|
||||
systemd.service(
|
||||
name="Setup acmetool-reconcile timer",
|
||||
service="acmetool-reconcile.timer",
|
||||
running=True,
|
||||
enabled=True,
|
||||
daemon_reload=self.need_restart_reconcile_timer,
|
||||
)
|
||||
self.need_restart_reconcile_timer = False
|
||||
|
||||
server.shell(
|
||||
name=f"Reconcile certificates for: {', '.join(self.domains)}",
|
||||
commands=["acmetool --batch --xlog.severity=debug reconcile"],
|
||||
)
|
||||
|
||||
@@ -0,0 +1,8 @@
|
||||
[Unit]
|
||||
Description=Renew TLS certificates with acmetool
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/usr/bin/acmetool --batch reconcile
|
||||
|
||||
8
cmdeploy/src/cmdeploy/acmetool/acmetool-reconcile.timer
Normal file
8
cmdeploy/src/cmdeploy/acmetool/acmetool-reconcile.timer
Normal file
@@ -0,0 +1,8 @@
|
||||
[Unit]
|
||||
Description=Renew TLS certificates with acmetool
|
||||
|
||||
[Timer]
|
||||
OnCalendar=*-*-* 16:20:00
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
||||
@@ -3,7 +3,7 @@ Description=acmetool HTTP redirector
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
ExecStart=/usr/bin/acmetool redirector --service.uid=daemon
|
||||
ExecStart=/usr/bin/acmetool redirector --service.uid=daemon --bind=127.0.0.1:402
|
||||
Restart=always
|
||||
RestartSec=30
|
||||
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
SHELL=/bin/sh
|
||||
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin
|
||||
MAILTO=root
|
||||
20 16 * * * root /usr/bin/acmetool --batch reconcile && systemctl reload dovecot && systemctl reload postfix && systemctl reload nginx
|
||||
6
cmdeploy/src/cmdeploy/acmetool/desired.yaml.j2
Normal file
6
cmdeploy/src/cmdeploy/acmetool/desired.yaml.j2
Normal file
@@ -0,0 +1,6 @@
|
||||
satisfy:
|
||||
names:
|
||||
{%- for domain in domains %}
|
||||
- {{ domain }}
|
||||
{%- endfor %}
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
"acme-enter-email": "{{ email }}"
|
||||
"acme-agreement:https://letsencrypt.org/documents/LE-SA-v1.4-April-3-2024.pdf": true
|
||||
"acme-agreement:https://letsencrypt.org/documents/LE-SA-v1.6-August-18-2025.pdf": true
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
request:
|
||||
provider: https://acme-v02.api.letsencrypt.org/directory
|
||||
key:
|
||||
type: rsa
|
||||
type: ecdsa
|
||||
ecdsa-curve: nistp256
|
||||
challenge:
|
||||
webroot-paths:
|
||||
- /var/www/html/.well-known/acme-challenge
|
||||
|
||||
139
cmdeploy/src/cmdeploy/basedeploy.py
Normal file
139
cmdeploy/src/cmdeploy/basedeploy.py
Normal file
@@ -0,0 +1,139 @@
|
||||
import importlib.resources
|
||||
import io
|
||||
import os
|
||||
from contextlib import contextmanager
|
||||
|
||||
from pyinfra.operations import files, server, systemd
|
||||
|
||||
|
||||
def has_systemd():
|
||||
"""Returns False during Docker image builds or any other non-systemd environment."""
|
||||
return os.path.isdir("/run/systemd/system")
|
||||
|
||||
|
||||
@contextmanager
|
||||
def blocked_service_startup():
|
||||
"""Prevent services from auto-starting during package installation.
|
||||
|
||||
Installs a ``/usr/sbin/policy-rc.d`` that exits 101, blocking any
|
||||
service from being started by the package manager. This avoids bind
|
||||
conflicts and CPU/RAM spikes during initial setup. The file is removed
|
||||
when the context exits.
|
||||
"""
|
||||
# For documentation about policy-rc.d, see:
|
||||
# https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt
|
||||
files.put(
|
||||
src=get_resource("policy-rc.d"),
|
||||
dest="/usr/sbin/policy-rc.d",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="755",
|
||||
)
|
||||
yield
|
||||
files.file("/usr/sbin/policy-rc.d", present=False)
|
||||
|
||||
|
||||
def get_resource(arg, pkg=__package__):
|
||||
return importlib.resources.files(pkg).joinpath(arg)
|
||||
|
||||
|
||||
def configure_remote_units(mail_domain, units) -> None:
|
||||
remote_base_dir = "/usr/local/lib/chatmaild"
|
||||
remote_venv_dir = f"{remote_base_dir}/venv"
|
||||
remote_chatmail_inipath = f"{remote_base_dir}/chatmail.ini"
|
||||
root_owned = dict(user="root", group="root", mode="644")
|
||||
|
||||
# install systemd units
|
||||
for fn in units:
|
||||
params = dict(
|
||||
execpath=f"{remote_venv_dir}/bin/{fn}",
|
||||
config_path=remote_chatmail_inipath,
|
||||
remote_venv_dir=remote_venv_dir,
|
||||
mail_domain=mail_domain,
|
||||
)
|
||||
|
||||
basename = fn if "." in fn else f"{fn}.service"
|
||||
|
||||
source_path = get_resource(f"service/{basename}.f")
|
||||
content = source_path.read_text().format(**params).encode()
|
||||
|
||||
files.put(
|
||||
name=f"Upload {basename}",
|
||||
src=io.BytesIO(content),
|
||||
dest=f"/etc/systemd/system/{basename}",
|
||||
**root_owned,
|
||||
)
|
||||
|
||||
|
||||
def activate_remote_units(units) -> None:
|
||||
# activate systemd units
|
||||
for fn in units:
|
||||
basename = fn if "." in fn else f"{fn}.service"
|
||||
|
||||
if fn == "chatmail-expire" or fn == "chatmail-fsreport":
|
||||
# don't auto-start but let the corresponding timer trigger execution
|
||||
enabled = False
|
||||
else:
|
||||
enabled = True
|
||||
systemd.service(
|
||||
name=f"Setup {basename}",
|
||||
service=basename,
|
||||
running=enabled,
|
||||
enabled=enabled,
|
||||
restarted=enabled,
|
||||
daemon_reload=True,
|
||||
)
|
||||
|
||||
|
||||
class Deployment:
|
||||
def install(self, deployer):
|
||||
# optional 'required_users' contains a list of (user, group, secondary-group-list) tuples.
|
||||
# If the group is None, no group is created corresponding to that user.
|
||||
# If the secondary group list is not None, all listed groups are created as well.
|
||||
required_users = getattr(deployer, "required_users", [])
|
||||
for user, group, groups in required_users:
|
||||
if group is not None:
|
||||
server.group(
|
||||
name="Create {} group".format(group), group=group, system=True
|
||||
)
|
||||
if groups is not None:
|
||||
for group2 in groups:
|
||||
server.group(
|
||||
name="Create {} group".format(group2), group=group2, system=True
|
||||
)
|
||||
server.user(
|
||||
name="Create {} user".format(user),
|
||||
user=user,
|
||||
group=group,
|
||||
groups=groups,
|
||||
system=True,
|
||||
)
|
||||
|
||||
deployer.install()
|
||||
|
||||
def configure(self, deployer):
|
||||
deployer.configure()
|
||||
|
||||
def activate(self, deployer):
|
||||
deployer.activate()
|
||||
|
||||
def perform_stages(self, deployers):
|
||||
default_stages = "install,configure,activate"
|
||||
stages = os.getenv("CMDEPLOY_STAGES", default_stages).split(",")
|
||||
|
||||
for stage in stages:
|
||||
for deployer in deployers:
|
||||
getattr(self, stage)(deployer)
|
||||
|
||||
|
||||
class Deployer:
|
||||
need_restart = False
|
||||
|
||||
def install(self):
|
||||
pass
|
||||
|
||||
def configure(self):
|
||||
pass
|
||||
|
||||
def activate(self):
|
||||
pass
|
||||
@@ -1,30 +0,0 @@
|
||||
;
|
||||
; Required DNS entries for chatmail servers
|
||||
;
|
||||
{% if A %}
|
||||
{{ mail_domain }}. A {{ A }}
|
||||
{% endif %}
|
||||
{% if AAAA %}
|
||||
{{ mail_domain }}. AAAA {{ AAAA }}
|
||||
{% endif %}
|
||||
{{ mail_domain }}. MX 10 {{ mail_domain }}.
|
||||
_mta-sts.{{ mail_domain }}. TXT "v=STSv1; id={{ sts_id }}"
|
||||
mta-sts.{{ mail_domain }}. CNAME {{ mail_domain }}.
|
||||
www.{{ mail_domain }}. CNAME {{ mail_domain }}.
|
||||
{{ dkim_entry }}
|
||||
|
||||
;
|
||||
; Recommended DNS entries for interoperability and security-hardening
|
||||
;
|
||||
{{ mail_domain }}. TXT "v=spf1 a:{{ mail_domain }} ~all"
|
||||
_dmarc.{{ mail_domain }}. TXT "v=DMARC1;p=reject;adkim=s;aspf=s"
|
||||
|
||||
{% if acme_account_url %}
|
||||
{{ mail_domain }}. CAA 0 issue "letsencrypt.org;accounturi={{ acme_account_url }}"
|
||||
{% endif %}
|
||||
_adsp._domainkey.{{ mail_domain }}. TXT "dkim=discardable"
|
||||
|
||||
_submission._tcp.{{ mail_domain }}. SRV 0 1 587 {{ mail_domain }}.
|
||||
_submissions._tcp.{{ mail_domain }}. SRV 0 1 465 {{ mail_domain }}.
|
||||
_imap._tcp.{{ mail_domain }}. SRV 0 1 143 {{ mail_domain }}.
|
||||
_imaps._tcp.{{ mail_domain }}. SRV 0 1 993 {{ mail_domain }}.
|
||||
@@ -5,7 +5,6 @@ along with command line option and subcommand parsing.
|
||||
|
||||
import argparse
|
||||
import importlib.resources
|
||||
import importlib.util
|
||||
import os
|
||||
import pathlib
|
||||
import shutil
|
||||
@@ -16,10 +15,27 @@ from pathlib import Path
|
||||
import pyinfra
|
||||
from chatmaild.config import read_config, write_initial_config
|
||||
from packaging import version
|
||||
from termcolor import colored
|
||||
|
||||
from . import dns, remote
|
||||
from .sshexec import SSHExec
|
||||
from .lxc.cli import (
|
||||
lxc_start_cmd,
|
||||
lxc_start_cmd_options,
|
||||
lxc_status_cmd,
|
||||
lxc_status_cmd_options,
|
||||
lxc_stop_cmd,
|
||||
lxc_stop_cmd_options,
|
||||
lxc_test_cmd,
|
||||
lxc_test_cmd_options,
|
||||
)
|
||||
from .lxc.incus import DNSConfigurationError
|
||||
from .sshexec import (
|
||||
LocalExec,
|
||||
SSHExec,
|
||||
resolve_host_from_ssh_config,
|
||||
resolve_key_from_ssh_config,
|
||||
)
|
||||
from .util import Out
|
||||
from .www import main as webdev_main
|
||||
|
||||
#
|
||||
# cmdeploy sub commands and options
|
||||
@@ -32,17 +48,30 @@ def init_cmd_options(parser):
|
||||
action="store",
|
||||
help="fully qualified DNS domain name for your chatmail instance",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--force",
|
||||
dest="recreate_ini",
|
||||
action="store_true",
|
||||
help="force reacreate ini file",
|
||||
)
|
||||
|
||||
|
||||
def init_cmd(args, out):
|
||||
"""Initialize chatmail config file."""
|
||||
mail_domain = args.chatmail_domain
|
||||
inipath = args.inipath
|
||||
if args.inipath.exists():
|
||||
print(f"Path exists, not modifying: {args.inipath}")
|
||||
return 1
|
||||
else:
|
||||
write_initial_config(args.inipath, mail_domain, overrides={})
|
||||
out.green(f"created config file for {mail_domain} in {args.inipath}")
|
||||
if not args.recreate_ini:
|
||||
print(f"[WARNING] Path exists, not modifying: {inipath}")
|
||||
return 1
|
||||
else:
|
||||
print(
|
||||
f"[WARNING] Force argument was provided, deleting config file: {inipath}"
|
||||
)
|
||||
inipath.unlink()
|
||||
|
||||
write_initial_config(inipath, mail_domain, overrides={})
|
||||
out.green(f"created config file for {mail_domain} in {inipath}")
|
||||
|
||||
|
||||
def run_cmd_options(parser):
|
||||
@@ -59,43 +88,90 @@ def run_cmd_options(parser):
|
||||
help="install/upgrade the server, but disable postfix & dovecot for now",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ssh-host",
|
||||
dest="ssh_host",
|
||||
help="specify an SSH host to deploy to; uses mail_domain from chatmail.ini by default",
|
||||
"--website-only",
|
||||
action="store_true",
|
||||
help="only update/deploy the website, skipping full server upgrade/deployment, useful when you only changed/updated the web pages and don't need to re-run a full server upgrade",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--skip-dns-check",
|
||||
dest="dns_check_disabled",
|
||||
action="store_true",
|
||||
help="disable checks nslookup for dns",
|
||||
)
|
||||
add_ssh_host_option(parser)
|
||||
add_ssh_config_option(parser)
|
||||
|
||||
|
||||
def run_cmd(args, out):
|
||||
"""Deploy chatmail services on the remote server."""
|
||||
|
||||
sshexec = args.get_sshexec()
|
||||
ssh_host = args.ssh_host if args.ssh_host else args.config.mail_domain
|
||||
sshexec = get_sshexec(ssh_host, ssh_config=args.ssh_config)
|
||||
require_iroh = args.config.enable_iroh_relay
|
||||
remote_data = dns.get_initial_remote_data(sshexec, args.config.mail_domain)
|
||||
if not dns.check_initial_remote_data(remote_data, print=out.red):
|
||||
return 1
|
||||
strict_tls = args.config.tls_cert_mode == "acme"
|
||||
if not args.dns_check_disabled:
|
||||
remote_data = dns.get_initial_remote_data(sshexec, args.config.mail_domain)
|
||||
if not dns.check_initial_remote_data(
|
||||
remote_data, strict_tls=strict_tls, print=out.red
|
||||
):
|
||||
return 1
|
||||
|
||||
env = os.environ.copy()
|
||||
env["CHATMAIL_INI"] = args.inipath
|
||||
env["CHATMAIL_WEBSITE_ONLY"] = "True" if args.website_only else ""
|
||||
env["CHATMAIL_DISABLE_MAIL"] = "True" if args.disable_mail else ""
|
||||
env["CHATMAIL_REQUIRE_IROH"] = "True" if require_iroh else ""
|
||||
deploy_path = importlib.resources.files(__package__).joinpath("deploy.py").resolve()
|
||||
if not args.dns_check_disabled:
|
||||
env["CHATMAIL_ADDR_V4"] = remote_data.get("A") or ""
|
||||
env["CHATMAIL_ADDR_V6"] = remote_data.get("AAAA") or ""
|
||||
env["DEBIAN_FRONTEND"] = "noninteractive"
|
||||
env["TERM"] = "linux"
|
||||
deploy_path = importlib.resources.files(__package__).joinpath("run.py").resolve()
|
||||
pyinf = "pyinfra --dry" if args.dry_run else "pyinfra"
|
||||
ssh_host = args.config.mail_domain if not args.ssh_host else args.ssh_host
|
||||
|
||||
cmd = f"{pyinf} --ssh-user root {ssh_host} {deploy_path} -y"
|
||||
ssh_config = args.ssh_config
|
||||
if ssh_config:
|
||||
ssh_config = str(Path(ssh_config).resolve())
|
||||
|
||||
# Use pyinfra's native SSH data keys to configure the connection directly
|
||||
# rather than relying on paramiko config parsing (see also sshexec.py)
|
||||
ip = resolve_host_from_ssh_config(ssh_host, ssh_config)
|
||||
key = resolve_key_from_ssh_config(ssh_host, ssh_config)
|
||||
data_args = f"--data ssh_hostname={ip} --data ssh_known_hosts_file=/dev/null"
|
||||
if key:
|
||||
data_args += f" --data ssh_key={key}"
|
||||
cmd = f"{pyinf} --ssh-user root {ssh_host} {deploy_path} -y {data_args}"
|
||||
if ssh_host in ["localhost", "@docker"]:
|
||||
if ssh_host == "@docker":
|
||||
env["CHATMAIL_NOPORTCHECK"] = "True"
|
||||
env["CHATMAIL_NOSYSCTL"] = "True"
|
||||
cmd = f"{pyinf} @local {deploy_path} -y"
|
||||
|
||||
if version.parse(pyinfra.__version__) < version.parse("3"):
|
||||
out.red("Please re-run scripts/initenv.sh to update pyinfra to version 3.")
|
||||
return 1
|
||||
|
||||
retcode = out.check_call(cmd, env=env)
|
||||
if retcode == 0:
|
||||
out.green("Deploy completed, call `cmdeploy dns` next.")
|
||||
elif not remote_data["acme_account_url"]:
|
||||
out.red("Deploy completed but letsencrypt not configured")
|
||||
out.red("Run 'cmdeploy run' again")
|
||||
retcode = 0
|
||||
else:
|
||||
try:
|
||||
ret = out.shell(cmd, env=env)
|
||||
if ret:
|
||||
out.red("Deploy failed")
|
||||
return 1
|
||||
if args.website_only:
|
||||
out.green("Website deployment completed.")
|
||||
elif (
|
||||
not args.dns_check_disabled
|
||||
and strict_tls
|
||||
and not remote_data["acme_account_url"]
|
||||
):
|
||||
out.red("Deploy completed but letsencrypt not configured")
|
||||
out.red("Run 'cmdeploy run' again")
|
||||
else:
|
||||
out.green("Deploy completed, call `cmdeploy dns` next.")
|
||||
return 0
|
||||
except subprocess.CalledProcessError:
|
||||
out.red("Deploy failed")
|
||||
return retcode
|
||||
return 1
|
||||
|
||||
|
||||
def dns_cmd_options(parser):
|
||||
@@ -104,18 +180,23 @@ def dns_cmd_options(parser):
|
||||
dest="zonefile",
|
||||
type=pathlib.Path,
|
||||
default=None,
|
||||
help="write out a zonefile",
|
||||
help="write DNS records in standard BIND format to the given file",
|
||||
)
|
||||
add_ssh_host_option(parser)
|
||||
add_ssh_config_option(parser)
|
||||
|
||||
|
||||
def dns_cmd(args, out):
|
||||
"""Check DNS entries and optionally generate dns zone file."""
|
||||
sshexec = args.get_sshexec()
|
||||
ssh_host = args.ssh_host if args.ssh_host else args.config.mail_domain
|
||||
sshexec = get_sshexec(ssh_host, verbose=args.verbose, ssh_config=args.ssh_config)
|
||||
tls_cert_mode = args.config.tls_cert_mode
|
||||
strict_tls = tls_cert_mode == "acme"
|
||||
remote_data = dns.get_initial_remote_data(sshexec, args.config.mail_domain)
|
||||
if not remote_data:
|
||||
if not dns.check_initial_remote_data(remote_data, strict_tls=strict_tls):
|
||||
return 1
|
||||
|
||||
if not remote_data["acme_account_url"]:
|
||||
if strict_tls and not remote_data["acme_account_url"]:
|
||||
out.red("could not get letsencrypt account url, please run 'cmdeploy run'")
|
||||
return 1
|
||||
|
||||
@@ -123,6 +204,7 @@ def dns_cmd(args, out):
|
||||
out.red("could not determine dkim_entry, please run 'cmdeploy run'")
|
||||
return 1
|
||||
|
||||
remote_data["strict_tls"] = strict_tls
|
||||
zonefile = dns.get_filled_zone_file(remote_data)
|
||||
|
||||
if args.zonefile:
|
||||
@@ -136,10 +218,16 @@ def dns_cmd(args, out):
|
||||
return retcode
|
||||
|
||||
|
||||
def status_cmd_options(parser):
|
||||
add_ssh_host_option(parser)
|
||||
add_ssh_config_option(parser)
|
||||
|
||||
|
||||
def status_cmd(args, out):
|
||||
"""Display status for online chatmail instance."""
|
||||
|
||||
sshexec = args.get_sshexec()
|
||||
ssh_host = args.ssh_host if args.ssh_host else args.config.mail_domain
|
||||
sshexec = get_sshexec(ssh_host, verbose=args.verbose, ssh_config=args.ssh_config)
|
||||
|
||||
out.green(f"chatmail domain: {args.config.mail_domain}")
|
||||
if args.config.privacy_mail:
|
||||
@@ -158,17 +246,15 @@ def test_cmd_options(parser):
|
||||
action="store_true",
|
||||
help="also run slow tests",
|
||||
)
|
||||
add_ssh_host_option(parser)
|
||||
add_ssh_config_option(parser)
|
||||
|
||||
|
||||
def test_cmd(args, out):
|
||||
"""Run local and online tests for chatmail deployment.
|
||||
"""Run local and online tests for chatmail deployment."""
|
||||
|
||||
This will automatically pip-install 'deltachat' if it's not available.
|
||||
"""
|
||||
|
||||
x = importlib.util.find_spec("deltachat")
|
||||
if x is None:
|
||||
out.check_call(f"{sys.executable} -m pip install deltachat")
|
||||
env = os.environ.copy()
|
||||
env["CHATMAIL_INI"] = str(args.inipath.resolve())
|
||||
|
||||
pytest_path = shutil.which("pytest")
|
||||
pytest_args = [
|
||||
@@ -182,7 +268,11 @@ def test_cmd(args, out):
|
||||
]
|
||||
if args.slow:
|
||||
pytest_args.append("--slow")
|
||||
ret = out.run_ret(pytest_args)
|
||||
if args.ssh_host:
|
||||
pytest_args.extend(["--ssh-host", args.ssh_host])
|
||||
if args.ssh_config:
|
||||
pytest_args.extend(["--ssh-config", str(Path(args.ssh_config).resolve())])
|
||||
ret = out.shell(" ".join(pytest_args), env=env)
|
||||
return ret
|
||||
|
||||
|
||||
@@ -198,7 +288,12 @@ def fmt_cmd_options(parser):
|
||||
def fmt_cmd(args, out):
|
||||
"""Run formattting fixes on all chatmail source code."""
|
||||
|
||||
sources = [str(importlib.resources.files(x)) for x in ("chatmaild", "cmdeploy")]
|
||||
chatmaild_dir = importlib.resources.files("chatmaild").resolve()
|
||||
cmdeploy_dir = chatmaild_dir.joinpath(
|
||||
"..", "..", "..", "cmdeploy", "src", "cmdeploy"
|
||||
).resolve()
|
||||
sources = [str(chatmaild_dir), str(cmdeploy_dir)]
|
||||
|
||||
format_args = [shutil.which("ruff"), "format"]
|
||||
check_args = [shutil.which("ruff"), "check"]
|
||||
|
||||
@@ -214,8 +309,8 @@ def fmt_cmd(args, out):
|
||||
format_args.extend(sources)
|
||||
check_args.extend(sources)
|
||||
|
||||
out.check_call(" ".join(format_args), quiet=not args.verbose)
|
||||
out.check_call(" ".join(check_args), quiet=not args.verbose)
|
||||
out.shell(" ".join(format_args), quiet=not args.verbose)
|
||||
out.shell(" ".join(check_args), quiet=not args.verbose)
|
||||
|
||||
|
||||
def bench_cmd(args, out):
|
||||
@@ -228,9 +323,7 @@ def bench_cmd(args, out):
|
||||
|
||||
def webdev_cmd(args, out):
|
||||
"""Run local web development loop for static web pages."""
|
||||
from .www import main
|
||||
|
||||
main()
|
||||
webdev_main()
|
||||
|
||||
|
||||
#
|
||||
@@ -238,30 +331,23 @@ def webdev_cmd(args, out):
|
||||
#
|
||||
|
||||
|
||||
class Out:
|
||||
"""Convenience output printer providing coloring."""
|
||||
def add_ssh_host_option(parser):
|
||||
parser.add_argument(
|
||||
"--ssh-host",
|
||||
dest="ssh_host",
|
||||
help="Run commands on 'localhost', via '@docker', or on a specific SSH host "
|
||||
"instead of chatmail.ini's mail_domain.",
|
||||
)
|
||||
|
||||
def red(self, msg, file=sys.stderr):
|
||||
print(colored(msg, "red"), file=file)
|
||||
|
||||
def green(self, msg, file=sys.stderr):
|
||||
print(colored(msg, "green"), file=file)
|
||||
|
||||
def __call__(self, msg, red=False, green=False, file=sys.stdout):
|
||||
color = "red" if red else ("green" if green else None)
|
||||
print(colored(msg, color), file=file)
|
||||
|
||||
def check_call(self, arg, env=None, quiet=False):
|
||||
if not quiet:
|
||||
self(f"[$ {arg}]", file=sys.stderr)
|
||||
return subprocess.check_call(arg, shell=True, env=env)
|
||||
|
||||
def run_ret(self, args, env=None, quiet=False):
|
||||
if not quiet:
|
||||
cmdstring = " ".join(args)
|
||||
self(f"[$ {cmdstring}]", file=sys.stderr)
|
||||
proc = subprocess.run(args, env=env, check=False)
|
||||
return proc.returncode
|
||||
def add_ssh_config_option(parser):
|
||||
parser.add_argument(
|
||||
"--ssh-config",
|
||||
dest="ssh_config",
|
||||
type=Path,
|
||||
default=None,
|
||||
help="Path to an SSH config file (e.g. lxconfigs/ssh-config).",
|
||||
)
|
||||
|
||||
|
||||
def add_config_option(parser):
|
||||
@@ -269,29 +355,30 @@ def add_config_option(parser):
|
||||
"--config",
|
||||
dest="inipath",
|
||||
action="store",
|
||||
default=Path("chatmail.ini"),
|
||||
default=Path(os.environ.get("CHATMAIL_INI", "chatmail.ini")),
|
||||
type=Path,
|
||||
help="path to the chatmail.ini file",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--verbose",
|
||||
"-v",
|
||||
dest="verbose",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="provide verbose logging",
|
||||
)
|
||||
|
||||
|
||||
def add_subcommand(subparsers, func):
|
||||
def add_subcommand(subparsers, func, add_config=True):
|
||||
name = func.__name__
|
||||
assert name.endswith("_cmd")
|
||||
name = name[:-4]
|
||||
name = name[:-4].replace("_", "-")
|
||||
doc = func.__doc__.strip()
|
||||
help = doc.split("\n")[0].strip(".")
|
||||
p = subparsers.add_parser(name, description=doc, help=help)
|
||||
p.set_defaults(func=func)
|
||||
add_config_option(p)
|
||||
if add_config:
|
||||
add_config_option(p)
|
||||
p.add_argument(
|
||||
"-v",
|
||||
"--verbose",
|
||||
dest="verbose",
|
||||
action="count",
|
||||
default=0,
|
||||
help="increase verbosity (can be repeated: -v, -vv)",
|
||||
)
|
||||
return p
|
||||
|
||||
|
||||
@@ -300,41 +387,60 @@ Setup your chatmail server configuration and
|
||||
deploy it via SSH to your remote location.
|
||||
"""
|
||||
|
||||
# Explicit subcommand registry: (cmd_func, options_func_or_None, needs_config).
|
||||
# LXC commands don't need a chatmail.ini (no config); all others do.
|
||||
SUBCOMMANDS = [
|
||||
(init_cmd, init_cmd_options, True),
|
||||
(run_cmd, run_cmd_options, True),
|
||||
(dns_cmd, dns_cmd_options, True),
|
||||
(status_cmd, status_cmd_options, True),
|
||||
(test_cmd, test_cmd_options, True),
|
||||
(fmt_cmd, fmt_cmd_options, True),
|
||||
(bench_cmd, None, True),
|
||||
(webdev_cmd, None, True),
|
||||
(lxc_start_cmd, lxc_start_cmd_options, False),
|
||||
(lxc_stop_cmd, lxc_stop_cmd_options, False),
|
||||
(lxc_status_cmd, lxc_status_cmd_options, False),
|
||||
(lxc_test_cmd, lxc_test_cmd_options, False),
|
||||
]
|
||||
|
||||
|
||||
def get_parser():
|
||||
"""Return an ArgumentParser for the 'cmdeploy' CLI"""
|
||||
|
||||
parser = argparse.ArgumentParser(description=description.strip())
|
||||
parser.set_defaults(func=None, inipath=None)
|
||||
subparsers = parser.add_subparsers(title="subcommands")
|
||||
|
||||
# find all subcommands in the module namespace
|
||||
glob = globals()
|
||||
for name, func in glob.items():
|
||||
if name.endswith("_cmd"):
|
||||
subparser = add_subcommand(subparsers, func)
|
||||
addopts = glob.get(name + "_options")
|
||||
if addopts is not None:
|
||||
addopts(subparser)
|
||||
for func, addopts, needs_config in SUBCOMMANDS:
|
||||
subparser = add_subcommand(subparsers, func, add_config=needs_config)
|
||||
if addopts is not None:
|
||||
addopts(subparser)
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
def get_sshexec(ssh_host: str, verbose=True, ssh_config=None):
|
||||
if ssh_host in ["localhost", "@local"]:
|
||||
return LocalExec(verbose, docker=False)
|
||||
elif ssh_host == "@docker":
|
||||
return LocalExec(verbose, docker=True)
|
||||
if verbose:
|
||||
print(f"[ssh] login to {ssh_host}")
|
||||
return SSHExec(ssh_host, verbose=verbose, ssh_config=ssh_config)
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""Provide main entry point for 'cmdeploy' CLI invocation."""
|
||||
parser = get_parser()
|
||||
args = parser.parse_args(args=args)
|
||||
if not hasattr(args, "func"):
|
||||
if args.func is None:
|
||||
return parser.parse_args(["-h"])
|
||||
|
||||
def get_sshexec():
|
||||
print(f"[ssh] login to {args.config.mail_domain}")
|
||||
return SSHExec(args.config.mail_domain, verbose=args.verbose)
|
||||
|
||||
args.get_sshexec = get_sshexec
|
||||
|
||||
out = Out()
|
||||
out = Out(verbosity=args.verbose)
|
||||
kwargs = {}
|
||||
if args.func.__name__ not in ("init_cmd", "fmt_cmd"):
|
||||
|
||||
if args.inipath is not None and args.func.__name__ not in ("init_cmd", "fmt_cmd"):
|
||||
if not args.inipath.exists():
|
||||
out.red(f"expecting {args.inipath} to exist, run init first?")
|
||||
raise SystemExit(1)
|
||||
@@ -349,6 +455,9 @@ def main(args=None):
|
||||
if res is None:
|
||||
res = 0
|
||||
return res
|
||||
except DNSConfigurationError as exc:
|
||||
out.red(str(exc))
|
||||
return 1
|
||||
except KeyboardInterrupt:
|
||||
out.red("KeyboardInterrupt")
|
||||
sys.exit(130)
|
||||
|
||||
667
cmdeploy/src/cmdeploy/deployers.py
Normal file
667
cmdeploy/src/cmdeploy/deployers.py
Normal file
@@ -0,0 +1,667 @@
|
||||
"""
|
||||
Chat Mail pyinfra deploy.
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
from io import BytesIO, StringIO
|
||||
from pathlib import Path
|
||||
|
||||
from chatmaild.config import read_config
|
||||
from pyinfra import facts, host, logger
|
||||
from pyinfra.api import FactBase
|
||||
from pyinfra.facts import hardware
|
||||
from pyinfra.facts.files import Sha256File
|
||||
from pyinfra.facts.systemd import SystemdEnabled
|
||||
from pyinfra.operations import apt, files, pip, server, systemd
|
||||
|
||||
from .acmetool import AcmetoolDeployer
|
||||
from .basedeploy import (
|
||||
Deployer,
|
||||
Deployment,
|
||||
activate_remote_units,
|
||||
blocked_service_startup,
|
||||
configure_remote_units,
|
||||
get_resource,
|
||||
has_systemd,
|
||||
)
|
||||
from .dovecot.deployer import DovecotDeployer
|
||||
from .external.deployer import ExternalTlsDeployer
|
||||
from .filtermail.deployer import FiltermailDeployer
|
||||
from .mtail.deployer import MtailDeployer
|
||||
from .nginx.deployer import NginxDeployer
|
||||
from .opendkim.deployer import OpendkimDeployer
|
||||
from .postfix.deployer import PostfixDeployer
|
||||
from .selfsigned.deployer import SelfSignedTlsDeployer
|
||||
from .util import Out, get_version_string
|
||||
from .www import build_webpages, find_merge_conflict, get_paths
|
||||
|
||||
|
||||
class Port(FactBase):
|
||||
"""
|
||||
Returns the process occupying a port.
|
||||
"""
|
||||
|
||||
def command(self, port: int) -> str:
|
||||
return (
|
||||
"ss -lptn 'src :%d' | awk 'NR>1 {print $6,$7}' | sed 's/users:((\"//;s/\".*//'"
|
||||
% (port,)
|
||||
)
|
||||
|
||||
def process(self, output: [str]) -> str:
|
||||
return output[0]
|
||||
|
||||
|
||||
def _build_chatmaild(dist_dir) -> None:
|
||||
dist_dir = Path(dist_dir).resolve()
|
||||
if dist_dir.exists():
|
||||
shutil.rmtree(dist_dir)
|
||||
dist_dir.mkdir()
|
||||
subprocess.check_output(
|
||||
[sys.executable, "-m", "build", "-n"]
|
||||
+ ["--sdist", "chatmaild", "--outdir", str(dist_dir)]
|
||||
)
|
||||
entries = list(dist_dir.iterdir())
|
||||
assert len(entries) == 1
|
||||
return entries[0]
|
||||
|
||||
|
||||
def remove_legacy_artifacts():
|
||||
if not has_systemd():
|
||||
return
|
||||
# disable legacy doveauth-dictproxy.service
|
||||
if host.get_fact(SystemdEnabled).get("doveauth-dictproxy.service"):
|
||||
systemd.service(
|
||||
name="Disable legacy doveauth-dictproxy.service",
|
||||
service="doveauth-dictproxy.service",
|
||||
running=False,
|
||||
enabled=False,
|
||||
)
|
||||
|
||||
|
||||
def _install_remote_venv_with_chatmaild() -> None:
|
||||
remove_legacy_artifacts()
|
||||
dist_file = _build_chatmaild(dist_dir=Path("chatmaild/dist"))
|
||||
remote_base_dir = "/usr/local/lib/chatmaild"
|
||||
remote_dist_file = f"{remote_base_dir}/dist/{dist_file.name}"
|
||||
remote_venv_dir = f"{remote_base_dir}/venv"
|
||||
root_owned = dict(user="root", group="root", mode="644")
|
||||
|
||||
apt.packages(
|
||||
name="apt install python3-virtualenv",
|
||||
packages=["python3-virtualenv"],
|
||||
)
|
||||
|
||||
files.put(
|
||||
name="Upload chatmaild source package",
|
||||
src=dist_file.open("rb"),
|
||||
dest=remote_dist_file,
|
||||
create_remote_dir=True,
|
||||
**root_owned,
|
||||
)
|
||||
|
||||
pip.virtualenv(
|
||||
name=f"chatmaild virtualenv {remote_venv_dir}",
|
||||
path=remote_venv_dir,
|
||||
always_copy=True,
|
||||
)
|
||||
|
||||
apt.packages(
|
||||
name="install gcc and headers to build crypt_r source package",
|
||||
packages=["gcc", "python3-dev"],
|
||||
)
|
||||
|
||||
server.shell(
|
||||
name=f"forced pip-install {dist_file.name}",
|
||||
commands=[
|
||||
f"{remote_venv_dir}/bin/pip install --force-reinstall {remote_dist_file}"
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
def _configure_remote_venv_with_chatmaild(config) -> None:
|
||||
remote_base_dir = "/usr/local/lib/chatmaild"
|
||||
remote_chatmail_inipath = f"{remote_base_dir}/chatmail.ini"
|
||||
root_owned = dict(user="root", group="root", mode="644")
|
||||
|
||||
files.put(
|
||||
name=f"Upload {remote_chatmail_inipath}",
|
||||
src=config._getbytefile(),
|
||||
dest=remote_chatmail_inipath,
|
||||
**root_owned,
|
||||
)
|
||||
|
||||
files.file(
|
||||
path="/etc/cron.d/chatmail-metrics",
|
||||
present=False,
|
||||
)
|
||||
files.file(
|
||||
path="/var/www/html/metrics",
|
||||
present=False,
|
||||
)
|
||||
|
||||
|
||||
class UnboundDeployer(Deployer):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.need_restart = False
|
||||
|
||||
def install(self):
|
||||
# Run local DNS resolver `unbound`. `resolvconf` takes care of
|
||||
# setting up /etc/resolv.conf to use 127.0.0.1 as the resolver.
|
||||
|
||||
# On an IPv4-only system, if unbound is started but not configured,
|
||||
# it causes subsequent steps to fail to resolve hosts.
|
||||
with blocked_service_startup():
|
||||
apt.packages(
|
||||
name="Install unbound",
|
||||
packages=["unbound", "unbound-anchor", "dnsutils"],
|
||||
)
|
||||
|
||||
def configure(self):
|
||||
server.shell(
|
||||
name="Generate root keys for validating DNSSEC",
|
||||
commands=[
|
||||
"unbound-anchor -a /var/lib/unbound/root.key || true",
|
||||
],
|
||||
)
|
||||
if self.config.disable_ipv6:
|
||||
files.directory(
|
||||
path="/etc/unbound/unbound.conf.d",
|
||||
present=True,
|
||||
user="root",
|
||||
group="root",
|
||||
mode="755",
|
||||
)
|
||||
conf = files.put(
|
||||
src=get_resource("unbound/unbound.conf.j2"),
|
||||
dest="/etc/unbound/unbound.conf.d/chatmail.conf",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
else:
|
||||
conf = files.file(
|
||||
path="/etc/unbound/unbound.conf.d/chatmail.conf",
|
||||
present=False,
|
||||
)
|
||||
self.need_restart |= conf.changed
|
||||
|
||||
def activate(self):
|
||||
server.shell(
|
||||
name="Generate root keys for validating DNSSEC",
|
||||
commands=[
|
||||
"systemctl reset-failed unbound.service",
|
||||
],
|
||||
)
|
||||
|
||||
systemd.service(
|
||||
name="Start and enable unbound",
|
||||
service="unbound.service",
|
||||
running=True,
|
||||
enabled=True,
|
||||
restarted=self.need_restart,
|
||||
)
|
||||
|
||||
|
||||
class MtastsDeployer(Deployer):
|
||||
def configure(self):
|
||||
# Remove configuration.
|
||||
files.file("/etc/mta-sts-daemon.yml", present=False)
|
||||
files.directory("/usr/local/lib/postfix-mta-sts-resolver", present=False)
|
||||
files.file("/etc/systemd/system/mta-sts-daemon.service", present=False)
|
||||
|
||||
def activate(self):
|
||||
systemd.service(
|
||||
name="Stop MTA-STS daemon",
|
||||
service="mta-sts-daemon.service",
|
||||
daemon_reload=True,
|
||||
running=False,
|
||||
enabled=False,
|
||||
)
|
||||
|
||||
|
||||
class WebsiteDeployer(Deployer):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
|
||||
def install(self):
|
||||
files.directory(
|
||||
name="Ensure /var/www exists",
|
||||
path="/var/www",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="755",
|
||||
present=True,
|
||||
)
|
||||
|
||||
def configure(self):
|
||||
www_path, src_dir, build_dir = get_paths(self.config)
|
||||
# if www_folder was set to a non-existing folder, skip upload
|
||||
if not www_path.is_dir():
|
||||
logger.warning("Building web pages is disabled in chatmail.ini, skipping")
|
||||
elif (path := find_merge_conflict(src_dir)) is not None:
|
||||
logger.warning(
|
||||
f"Merge conflict found in {path}, skipping website deployment. Fix merge conflict if you want to upload your web page."
|
||||
)
|
||||
else:
|
||||
# if www_folder is a hugo page, build it
|
||||
if build_dir:
|
||||
www_path = build_webpages(src_dir, build_dir, self.config)
|
||||
if www_path is None:
|
||||
logger.warning("Web page build failed, skipping website deployment")
|
||||
return
|
||||
# if it is not a hugo page, upload it as is
|
||||
# pyinfra files.rsync (experimental) causes problems with ssh-config configuration
|
||||
# the stable files.sync should do
|
||||
files.sync(
|
||||
src=str(www_path),
|
||||
dest="/var/www/html",
|
||||
user="www-data",
|
||||
group="www-data",
|
||||
delete=True,
|
||||
)
|
||||
|
||||
|
||||
class LegacyRemoveDeployer(Deployer):
|
||||
def install(self):
|
||||
apt.packages(name="Remove rspamd", packages="rspamd", present=False)
|
||||
|
||||
# remove historic expunge script
|
||||
# which is now implemented through a systemd timer (chatmail-expire)
|
||||
files.file(
|
||||
path="/etc/cron.d/expunge",
|
||||
present=False,
|
||||
)
|
||||
|
||||
# Remove OBS repository key that is no longer used.
|
||||
files.file("/etc/apt/keyrings/obs-home-deltachat.gpg", present=False)
|
||||
files.line(
|
||||
name="Remove DeltaChat OBS home repository from sources.list",
|
||||
path="/etc/apt/sources.list",
|
||||
line="deb [signed-by=/etc/apt/keyrings/obs-home-deltachat.gpg] https://download.opensuse.org/repositories/home:/deltachat/Debian_12/ ./",
|
||||
escape_regex_characters=True,
|
||||
present=False,
|
||||
)
|
||||
|
||||
# prior relay versions used filelogging
|
||||
files.directory(
|
||||
name="Ensure old logs on disk are deleted",
|
||||
path="/var/log/journal/",
|
||||
present=False,
|
||||
)
|
||||
# remove echobot if it is still running
|
||||
if has_systemd() and host.get_fact(SystemdEnabled).get("echobot.service"):
|
||||
systemd.service(
|
||||
name="Disable echobot.service",
|
||||
service="echobot.service",
|
||||
running=False,
|
||||
enabled=False,
|
||||
)
|
||||
|
||||
|
||||
def check_config(config):
|
||||
mail_domain = config.mail_domain
|
||||
if mail_domain != "testrun.org" and not mail_domain.endswith(".testrun.org"):
|
||||
blocked_words = "merlinux schmieder testrun.org".split()
|
||||
for key in config.__dict__:
|
||||
value = config.__dict__[key]
|
||||
if key.startswith("privacy") and any(
|
||||
x in str(value) for x in blocked_words
|
||||
):
|
||||
raise ValueError(
|
||||
f"please set your own privacy contacts/addresses in {config._inipath}"
|
||||
)
|
||||
return config
|
||||
|
||||
|
||||
class TurnDeployer(Deployer):
|
||||
def __init__(self, mail_domain):
|
||||
self.mail_domain = mail_domain
|
||||
self.units = ["turnserver"]
|
||||
|
||||
def install(self):
|
||||
(url, sha256sum) = {
|
||||
"x86_64": (
|
||||
"https://github.com/chatmail/chatmail-turn/releases/download/v0.4/chatmail-turn-x86_64-linux",
|
||||
"1ec1f5c50122165e858a5a91bcba9037a28aa8cb8b64b8db570aa457c6141a8a",
|
||||
),
|
||||
"aarch64": (
|
||||
"https://github.com/chatmail/chatmail-turn/releases/download/v0.4/chatmail-turn-aarch64-linux",
|
||||
"0fb3e792419494e21ecad536464929dba706bb2c88884ed8f1788141d26fc756",
|
||||
),
|
||||
}[host.get_fact(facts.server.Arch)]
|
||||
|
||||
existing_sha256sum = host.get_fact(Sha256File, "/usr/local/bin/chatmail-turn")
|
||||
if existing_sha256sum != sha256sum:
|
||||
server.shell(
|
||||
name="Download chatmail-turn",
|
||||
commands=[
|
||||
f"(curl -L {url} >/usr/local/bin/chatmail-turn.new && (echo '{sha256sum} /usr/local/bin/chatmail-turn.new' | sha256sum -c) && mv /usr/local/bin/chatmail-turn.new /usr/local/bin/chatmail-turn)",
|
||||
"chmod 755 /usr/local/bin/chatmail-turn",
|
||||
],
|
||||
)
|
||||
|
||||
def configure(self):
|
||||
configure_remote_units(self.mail_domain, self.units)
|
||||
|
||||
def activate(self):
|
||||
activate_remote_units(self.units)
|
||||
|
||||
|
||||
class IrohDeployer(Deployer):
|
||||
def __init__(self, enable_iroh_relay):
|
||||
self.enable_iroh_relay = enable_iroh_relay
|
||||
|
||||
def install(self):
|
||||
(url, sha256sum) = {
|
||||
"x86_64": (
|
||||
"https://github.com/n0-computer/iroh/releases/download/v0.35.0/iroh-relay-v0.35.0-x86_64-unknown-linux-musl.tar.gz",
|
||||
"45c81199dbd70f8c4c30fef7f3b9727ca6e3cea8f2831333eeaf8aa71bf0fac1",
|
||||
),
|
||||
"aarch64": (
|
||||
"https://github.com/n0-computer/iroh/releases/download/v0.35.0/iroh-relay-v0.35.0-aarch64-unknown-linux-musl.tar.gz",
|
||||
"f8ef27631fac213b3ef668d02acd5b3e215292746a3fc71d90c63115446008b1",
|
||||
),
|
||||
}[host.get_fact(facts.server.Arch)]
|
||||
|
||||
existing_sha256sum = host.get_fact(Sha256File, "/usr/local/bin/iroh-relay")
|
||||
if existing_sha256sum != sha256sum:
|
||||
server.shell(
|
||||
name="Download iroh-relay",
|
||||
commands=[
|
||||
f"(curl -L {url} | gunzip | tar -x -f - ./iroh-relay -O >/usr/local/bin/iroh-relay.new && (echo '{sha256sum} /usr/local/bin/iroh-relay.new' | sha256sum -c) && mv /usr/local/bin/iroh-relay.new /usr/local/bin/iroh-relay)",
|
||||
"chmod 755 /usr/local/bin/iroh-relay",
|
||||
],
|
||||
)
|
||||
|
||||
self.need_restart = True
|
||||
|
||||
def configure(self):
|
||||
systemd_unit = files.put(
|
||||
name="Upload iroh-relay systemd unit",
|
||||
src=get_resource("iroh-relay.service"),
|
||||
dest="/etc/systemd/system/iroh-relay.service",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
self.need_restart |= systemd_unit.changed
|
||||
|
||||
iroh_config = files.put(
|
||||
name="Upload iroh-relay config",
|
||||
src=get_resource("iroh-relay.toml"),
|
||||
dest="/etc/iroh-relay.toml",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
self.need_restart |= iroh_config.changed
|
||||
|
||||
def activate(self):
|
||||
systemd.service(
|
||||
name="Start and enable iroh-relay",
|
||||
service="iroh-relay.service",
|
||||
running=True,
|
||||
enabled=self.enable_iroh_relay,
|
||||
restarted=self.need_restart,
|
||||
)
|
||||
self.need_restart = False
|
||||
|
||||
|
||||
class JournaldDeployer(Deployer):
|
||||
def configure(self):
|
||||
journald_conf = files.put(
|
||||
name="Configure journald",
|
||||
src=get_resource("journald.conf"),
|
||||
dest="/etc/systemd/journald.conf",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
self.need_restart = journald_conf.changed
|
||||
|
||||
def activate(self):
|
||||
systemd.service(
|
||||
name="Start and enable journald",
|
||||
service="systemd-journald.service",
|
||||
running=True,
|
||||
enabled=True,
|
||||
restarted=self.need_restart,
|
||||
)
|
||||
self.need_restart = False
|
||||
|
||||
|
||||
class ChatmailVenvDeployer(Deployer):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.units = (
|
||||
"chatmail-metadata",
|
||||
"lastlogin",
|
||||
"chatmail-expire",
|
||||
"chatmail-expire.timer",
|
||||
"chatmail-fsreport",
|
||||
"chatmail-fsreport.timer",
|
||||
)
|
||||
|
||||
def install(self):
|
||||
_install_remote_venv_with_chatmaild()
|
||||
|
||||
def configure(self):
|
||||
_configure_remote_venv_with_chatmaild(self.config)
|
||||
configure_remote_units(self.config.mail_domain, self.units)
|
||||
|
||||
def activate(self):
|
||||
activate_remote_units(self.units)
|
||||
|
||||
|
||||
class ChatmailDeployer(Deployer):
|
||||
required_users = [
|
||||
("vmail", "vmail", None),
|
||||
("iroh", None, None),
|
||||
]
|
||||
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.mail_domain = config.mail_domain
|
||||
|
||||
def install(self):
|
||||
files.put(
|
||||
name="Disable installing recommended packages globally",
|
||||
src=BytesIO(b'APT::Install-Recommends "false";\n'),
|
||||
dest="/etc/apt/apt.conf.d/00InstallRecommends",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
apt.update(name="apt update", cache_time=24 * 3600)
|
||||
apt.upgrade(name="upgrade apt packages", auto_remove=True)
|
||||
|
||||
apt.packages(
|
||||
name="Install curl",
|
||||
packages=["curl"],
|
||||
)
|
||||
|
||||
apt.packages(
|
||||
name="Install rsync",
|
||||
packages=["rsync"],
|
||||
)
|
||||
apt.packages(
|
||||
name="Ensure cron is installed",
|
||||
packages=["cron"],
|
||||
)
|
||||
|
||||
def configure(self):
|
||||
# Ensure the per-domain mailbox directory exists before
|
||||
# chatmail-metadata starts (it crashes without it).
|
||||
files.directory(
|
||||
name="Ensure vmail mailbox directory exists",
|
||||
path=f"/home/vmail/mail/{self.mail_domain}",
|
||||
user="vmail",
|
||||
group="vmail",
|
||||
mode="700",
|
||||
present=True,
|
||||
)
|
||||
|
||||
# This file is used by auth proxy.
|
||||
# https://wiki.debian.org/EtcMailName
|
||||
server.shell(
|
||||
name="Setup /etc/mailname",
|
||||
commands=[
|
||||
f"echo {self.mail_domain} >/etc/mailname; chmod 644 /etc/mailname"
|
||||
],
|
||||
)
|
||||
|
||||
files.directory(
|
||||
name=f"Ensure mailboxes directory {self.config.mailboxes_dir} exists",
|
||||
path=str(self.config.mailboxes_dir),
|
||||
user="vmail",
|
||||
group="vmail",
|
||||
mode="700",
|
||||
present=True,
|
||||
)
|
||||
|
||||
|
||||
class FcgiwrapDeployer(Deployer):
|
||||
def install(self):
|
||||
apt.packages(
|
||||
name="Install fcgiwrap",
|
||||
packages=["fcgiwrap"],
|
||||
)
|
||||
|
||||
def activate(self):
|
||||
systemd.service(
|
||||
name="Start and enable fcgiwrap",
|
||||
service="fcgiwrap.service",
|
||||
running=True,
|
||||
enabled=True,
|
||||
)
|
||||
|
||||
|
||||
class GithashDeployer(Deployer):
|
||||
def activate(self):
|
||||
files.put(
|
||||
name="Upload chatmail relay git commit hash",
|
||||
src=StringIO(get_version_string()),
|
||||
dest="/etc/chatmail-version",
|
||||
mode="700",
|
||||
)
|
||||
|
||||
|
||||
def get_tls_deployer(config, mail_domain):
|
||||
"""Select the appropriate TLS deployer based on config."""
|
||||
tls_domains = [mail_domain, f"mta-sts.{mail_domain}", f"www.{mail_domain}"]
|
||||
|
||||
if config.tls_cert_mode == "acme":
|
||||
return AcmetoolDeployer(config.acme_email, tls_domains)
|
||||
elif config.tls_cert_mode == "self":
|
||||
return SelfSignedTlsDeployer(mail_domain)
|
||||
elif config.tls_cert_mode == "external":
|
||||
return ExternalTlsDeployer(config.tls_cert_path, config.tls_key_path)
|
||||
else:
|
||||
raise ValueError(f"Unknown tls_cert_mode: {config.tls_cert_mode}")
|
||||
|
||||
|
||||
def deploy_chatmail(config_path: Path, disable_mail: bool, website_only: bool) -> None:
|
||||
"""Deploy a chat-mail instance.
|
||||
|
||||
:param config_path: path to chatmail.ini
|
||||
:param disable_mail: whether to disable postfix & dovecot
|
||||
:param website_only: if True, only deploy the website
|
||||
"""
|
||||
config = read_config(config_path)
|
||||
check_config(config)
|
||||
mail_domain = config.mail_domain
|
||||
|
||||
if website_only:
|
||||
Deployment().perform_stages([WebsiteDeployer(config)])
|
||||
return
|
||||
|
||||
if host.get_fact(Port, port=53) != "unbound":
|
||||
files.line(
|
||||
name="Add 9.9.9.9 to resolv.conf",
|
||||
path="/etc/resolv.conf",
|
||||
# Guard against resolv.conf missing a trailing newline (SolusVM bug).
|
||||
line="\nnameserver 9.9.9.9",
|
||||
)
|
||||
|
||||
# Check if mtail_address interface is available (if configured)
|
||||
if config.mtail_address and config.mtail_address not in (
|
||||
"127.0.0.1",
|
||||
"::1",
|
||||
"localhost",
|
||||
):
|
||||
ipv4_addrs = host.get_fact(hardware.Ipv4Addrs)
|
||||
all_addresses = [addr for addrs in ipv4_addrs.values() for addr in addrs]
|
||||
if config.mtail_address not in all_addresses:
|
||||
Out().red(
|
||||
f"Deploy failed: mtail_address {config.mtail_address} is not available (VPN up?).\n"
|
||||
)
|
||||
exit(1)
|
||||
|
||||
if not os.environ.get("CHATMAIL_NOPORTCHECK"):
|
||||
port_services = [
|
||||
(["master", "smtpd"], 25),
|
||||
("unbound", 53),
|
||||
]
|
||||
if config.tls_cert_mode == "acme":
|
||||
port_services.append(("acmetool", 402))
|
||||
port_services += [
|
||||
(["imap-login", "dovecot"], 143),
|
||||
# acmetool previously listened on port 80,
|
||||
# so don't complain during upgrade that moved it to port 402
|
||||
# and gave the port to nginx.
|
||||
(["acmetool", "nginx"], 80),
|
||||
("nginx", 443),
|
||||
(["master", "smtpd"], 465),
|
||||
(["master", "smtpd"], 587),
|
||||
(["imap-login", "dovecot"], 993),
|
||||
("iroh-relay", 3340),
|
||||
("mtail", 3903),
|
||||
("stats", 3904),
|
||||
("nginx", 8443),
|
||||
(["master", "smtpd"], config.postfix_reinject_port),
|
||||
(["master", "smtpd"], config.postfix_reinject_port_incoming),
|
||||
("filtermail", config.filtermail_smtp_port),
|
||||
("filtermail", config.filtermail_smtp_port_incoming),
|
||||
]
|
||||
for service, port in port_services:
|
||||
print(f"Checking if port {port} is available for {service}...")
|
||||
running_service = host.get_fact(Port, port=port)
|
||||
services = [service] if isinstance(service, str) else service
|
||||
if running_service:
|
||||
if running_service not in services:
|
||||
Out().red(
|
||||
f"Deploy failed: port {port} is occupied by: {running_service}"
|
||||
)
|
||||
exit(1)
|
||||
|
||||
tls_deployer = get_tls_deployer(config, mail_domain)
|
||||
|
||||
all_deployers = [
|
||||
ChatmailDeployer(config),
|
||||
LegacyRemoveDeployer(),
|
||||
FiltermailDeployer(),
|
||||
JournaldDeployer(),
|
||||
UnboundDeployer(config),
|
||||
TurnDeployer(mail_domain),
|
||||
IrohDeployer(config.enable_iroh_relay),
|
||||
tls_deployer,
|
||||
WebsiteDeployer(config),
|
||||
ChatmailVenvDeployer(config),
|
||||
MtastsDeployer(),
|
||||
OpendkimDeployer(mail_domain),
|
||||
# Dovecot should be started before Postfix
|
||||
# because it creates authentication socket
|
||||
# required by Postfix.
|
||||
DovecotDeployer(config, disable_mail),
|
||||
PostfixDeployer(config, disable_mail),
|
||||
FcgiwrapDeployer(),
|
||||
NginxDeployer(config),
|
||||
MtailDeployer(config.mtail_address),
|
||||
GithashDeployer(),
|
||||
]
|
||||
|
||||
Deployment().perform_stages(all_deployers)
|
||||
@@ -1,25 +1,40 @@
|
||||
import datetime
|
||||
import importlib
|
||||
|
||||
from jinja2 import Template
|
||||
|
||||
from . import remote
|
||||
|
||||
|
||||
def parse_zone_records(text):
|
||||
"""Yield ``(name, ttl, rtype, rdata)`` from standard BIND-format text.
|
||||
|
||||
Skips comment lines (starting with ``;``) and blank lines.
|
||||
Each record line must have the format ``name TTL IN type rdata``.
|
||||
"""
|
||||
for raw_line in text.splitlines():
|
||||
line = raw_line.strip()
|
||||
if not line or line.startswith(";"):
|
||||
continue
|
||||
try:
|
||||
name, ttl, _in, rtype, rdata = line.split(None, 4)
|
||||
except ValueError:
|
||||
raise ValueError(f"Bad zone record line: {line!r}") from None
|
||||
name = name.rstrip(".")
|
||||
yield name, ttl, rtype.upper(), rdata
|
||||
|
||||
|
||||
def get_initial_remote_data(sshexec, mail_domain):
|
||||
return sshexec.logged(
|
||||
call=remote.rdns.perform_initial_checks, kwargs=dict(mail_domain=mail_domain)
|
||||
)
|
||||
|
||||
|
||||
def check_initial_remote_data(remote_data, *, print=print):
|
||||
def check_initial_remote_data(remote_data, *, strict_tls=True, print=print):
|
||||
mail_domain = remote_data["mail_domain"]
|
||||
if not remote_data["A"] and not remote_data["AAAA"]:
|
||||
print(f"Missing A and/or AAAA DNS records for {mail_domain}!")
|
||||
elif remote_data["MTA_STS"] != f"{mail_domain}.":
|
||||
elif strict_tls and remote_data["MTA_STS"] != f"{mail_domain}.":
|
||||
print("Missing MTA-STS CNAME record:")
|
||||
print(f"mta-sts.{mail_domain}. CNAME {mail_domain}.")
|
||||
elif remote_data["WWW"] != f"{mail_domain}.":
|
||||
elif strict_tls and remote_data["WWW"] != f"{mail_domain}.":
|
||||
print("Missing www CNAME record:")
|
||||
print(f"www.{mail_domain}. CNAME {mail_domain}.")
|
||||
else:
|
||||
@@ -31,13 +46,36 @@ def get_filled_zone_file(remote_data):
|
||||
if not sts_id:
|
||||
remote_data["sts_id"] = datetime.datetime.now().strftime("%Y%m%d%H%M")
|
||||
|
||||
template = importlib.resources.files(__package__).joinpath("chatmail.zone.j2")
|
||||
content = template.read_text()
|
||||
zonefile = Template(content).render(**remote_data)
|
||||
lines = [x.strip() for x in zonefile.split("\n") if x.strip()]
|
||||
d = remote_data["mail_domain"]
|
||||
lines = ["; Required DNS entries"]
|
||||
if remote_data.get("A"):
|
||||
lines.append(f"{d}. 3600 IN A {remote_data['A']}")
|
||||
if remote_data.get("AAAA"):
|
||||
lines.append(f"{d}. 3600 IN AAAA {remote_data['AAAA']}")
|
||||
lines.append(f"{d}. 3600 IN MX 10 {d}.")
|
||||
if remote_data.get("strict_tls"):
|
||||
lines.append(
|
||||
f'_mta-sts.{d}. 3600 IN TXT "v=STSv1; id={remote_data["sts_id"]}"'
|
||||
)
|
||||
lines.append(f"mta-sts.{d}. 3600 IN CNAME {d}.")
|
||||
lines.append(f"www.{d}. 3600 IN CNAME {d}.")
|
||||
lines.append(remote_data["dkim_entry"])
|
||||
lines.append("")
|
||||
zonefile = "\n".join(lines)
|
||||
return zonefile
|
||||
lines.append("; Recommended DNS entries")
|
||||
lines.append(f'{d}. 3600 IN TXT "v=spf1 a ~all"')
|
||||
lines.append(f'_dmarc.{d}. 3600 IN TXT "v=DMARC1;p=reject;adkim=s;aspf=s"')
|
||||
if remote_data.get("acme_account_url"):
|
||||
lines.append(
|
||||
f"{d}. 3600 IN CAA 0 issue"
|
||||
f' "letsencrypt.org;accounturi={remote_data["acme_account_url"]}"'
|
||||
)
|
||||
lines.append(f'_adsp._domainkey.{d}. 3600 IN TXT "dkim=discardable"')
|
||||
lines.append(f"_submission._tcp.{d}. 3600 IN SRV 0 1 587 {d}.")
|
||||
lines.append(f"_submissions._tcp.{d}. 3600 IN SRV 0 1 465 {d}.")
|
||||
lines.append(f"_imap._tcp.{d}. 3600 IN SRV 0 1 143 {d}.")
|
||||
lines.append(f"_imaps._tcp.{d}. 3600 IN SRV 0 1 993 {d}.")
|
||||
lines.append("")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def check_full_zone(sshexec, remote_data, out, zonefile) -> int:
|
||||
@@ -46,25 +84,26 @@ def check_full_zone(sshexec, remote_data, out, zonefile) -> int:
|
||||
|
||||
required_diff, recommended_diff = sshexec.logged(
|
||||
remote.rdns.check_zonefile,
|
||||
kwargs=dict(zonefile=zonefile, mail_domain=remote_data["mail_domain"]),
|
||||
kwargs=dict(zonefile=zonefile, verbose=False),
|
||||
)
|
||||
|
||||
returncode = 0
|
||||
if required_diff:
|
||||
out.red("Please set required DNS entries at your DNS provider:\n")
|
||||
for line in required_diff:
|
||||
out(line)
|
||||
out("")
|
||||
out.print(line)
|
||||
out.print()
|
||||
returncode = 1
|
||||
if remote_data.get("dkim_entry") in required_diff:
|
||||
out(
|
||||
"If the DKIM entry above does not work with your DNS provider, you can try this one:\n"
|
||||
out.print(
|
||||
"If the DKIM entry above does not work with your DNS provider,"
|
||||
" you can try this one:\n"
|
||||
)
|
||||
out(remote_data.get("web_dkim_entry") + "\n")
|
||||
out.print(remote_data.get("web_dkim_entry") + "\n")
|
||||
if recommended_diff:
|
||||
out("WARNING: these recommended DNS entries are not set:\n")
|
||||
out.print("WARNING: these recommended DNS entries are not set:\n")
|
||||
for line in recommended_diff:
|
||||
out(line)
|
||||
out.print(line)
|
||||
|
||||
if not (recommended_diff or required_diff):
|
||||
out.green("Great! All your DNS entries are verified and correct.")
|
||||
|
||||
@@ -4,7 +4,7 @@ iterate_prefix = userdb/
|
||||
|
||||
default_pass_scheme = plain
|
||||
# %E escapes characters " (double quote), ' (single quote) and \ (backslash) with \ (backslash).
|
||||
# See <https://doc.dovecot.org/configuration_manual/config_file/config_variables/#modifiers>
|
||||
# See <https://doc.dovecot.org/2.3/configuration_manual/config_file/config_variables/#modifiers>
|
||||
# for documentation.
|
||||
#
|
||||
# We escape user-provided input and use double quote as a separator.
|
||||
|
||||
180
cmdeploy/src/cmdeploy/dovecot/deployer.py
Normal file
180
cmdeploy/src/cmdeploy/dovecot/deployer.py
Normal file
@@ -0,0 +1,180 @@
|
||||
import urllib.request
|
||||
|
||||
from chatmaild.config import Config
|
||||
from pyinfra import host
|
||||
from pyinfra.facts.server import Arch, Command, Sysctl
|
||||
from pyinfra.facts.systemd import SystemdEnabled
|
||||
from pyinfra.operations import apt, files, server, systemd
|
||||
|
||||
from cmdeploy.basedeploy import (
|
||||
Deployer,
|
||||
activate_remote_units,
|
||||
blocked_service_startup,
|
||||
configure_remote_units,
|
||||
get_resource,
|
||||
has_systemd,
|
||||
)
|
||||
|
||||
|
||||
class DovecotDeployer(Deployer):
|
||||
daemon_reload = False
|
||||
|
||||
def __init__(self, config, disable_mail):
|
||||
self.config = config
|
||||
self.disable_mail = disable_mail
|
||||
self.units = ["doveauth"]
|
||||
|
||||
def install(self):
|
||||
arch = host.get_fact(Arch)
|
||||
if has_systemd() and "dovecot.service" in host.get_fact(SystemdEnabled):
|
||||
return # already installed and running
|
||||
|
||||
with blocked_service_startup():
|
||||
_install_dovecot_package("core", arch)
|
||||
_install_dovecot_package("imapd", arch)
|
||||
_install_dovecot_package("lmtpd", arch)
|
||||
|
||||
def configure(self):
|
||||
configure_remote_units(self.config.mail_domain, self.units)
|
||||
self.need_restart, self.daemon_reload = _configure_dovecot(self.config)
|
||||
|
||||
def activate(self):
|
||||
activate_remote_units(self.units)
|
||||
|
||||
restart = False if self.disable_mail else self.need_restart
|
||||
|
||||
systemd.service(
|
||||
name="Disable dovecot for now"
|
||||
if self.disable_mail
|
||||
else "Start and enable Dovecot",
|
||||
service="dovecot.service",
|
||||
running=False if self.disable_mail else True,
|
||||
enabled=False if self.disable_mail else True,
|
||||
restarted=restart,
|
||||
daemon_reload=self.daemon_reload,
|
||||
)
|
||||
self.need_restart = False
|
||||
|
||||
|
||||
def _pick_url(primary, fallback):
|
||||
try:
|
||||
req = urllib.request.Request(primary, method="HEAD")
|
||||
urllib.request.urlopen(req, timeout=10)
|
||||
return primary
|
||||
except Exception:
|
||||
return fallback
|
||||
|
||||
|
||||
def _install_dovecot_package(package: str, arch: str):
|
||||
arch = "amd64" if arch == "x86_64" else arch
|
||||
arch = "arm64" if arch == "aarch64" else arch
|
||||
primary_url = f"https://download.delta.chat/dovecot/dovecot-{package}_2.3.21%2Bdfsg1-3_{arch}.deb"
|
||||
fallback_url = f"https://github.com/chatmail/dovecot/releases/download/upstream%2F2.3.21%2Bdfsg1/dovecot-{package}_2.3.21%2Bdfsg1-3_{arch}.deb"
|
||||
url = _pick_url(primary_url, fallback_url)
|
||||
deb_filename = "/root/" + url.split("/")[-1]
|
||||
|
||||
match (package, arch):
|
||||
case ("core", "amd64"):
|
||||
sha256 = "dd060706f52a306fa863d874717210b9fe10536c824afe1790eec247ded5b27d"
|
||||
case ("core", "arm64"):
|
||||
sha256 = "e7548e8a82929722e973629ecc40fcfa886894cef3db88f23535149e7f730dc9"
|
||||
case ("imapd", "amd64"):
|
||||
sha256 = "8d8dc6fc00bbb6cdb25d345844f41ce2f1c53f764b79a838eb2a03103eebfa86"
|
||||
case ("imapd", "arm64"):
|
||||
sha256 = "178fa877ddd5df9930e8308b518f4b07df10e759050725f8217a0c1fb3fd707f"
|
||||
case ("lmtpd", "amd64"):
|
||||
sha256 = "2f69ba5e35363de50962d42cccbfe4ed8495265044e244007d7ccddad77513ab"
|
||||
case ("lmtpd", "arm64"):
|
||||
sha256 = "89f52fb36524f5877a177dff4a713ba771fd3f91f22ed0af7238d495e143b38f"
|
||||
case _:
|
||||
apt.packages(packages=[f"dovecot-{package}"])
|
||||
return
|
||||
|
||||
files.download(
|
||||
name=f"Download dovecot-{package}",
|
||||
src=url,
|
||||
dest=deb_filename,
|
||||
sha256sum=sha256,
|
||||
cache_time=60 * 60 * 24 * 365 * 10, # never redownload the package
|
||||
)
|
||||
|
||||
apt.deb(name=f"Install dovecot-{package}", src=deb_filename)
|
||||
|
||||
|
||||
def _configure_dovecot(config: Config, debug: bool = False) -> (bool, bool):
|
||||
"""Configures Dovecot IMAP server."""
|
||||
need_restart = False
|
||||
daemon_reload = False
|
||||
|
||||
main_config = files.template(
|
||||
src=get_resource("dovecot/dovecot.conf.j2"),
|
||||
dest="/etc/dovecot/dovecot.conf",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
config=config,
|
||||
debug=debug,
|
||||
disable_ipv6=config.disable_ipv6,
|
||||
)
|
||||
need_restart |= main_config.changed
|
||||
auth_config = files.put(
|
||||
src=get_resource("dovecot/auth.conf"),
|
||||
dest="/etc/dovecot/auth.conf",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
need_restart |= auth_config.changed
|
||||
lua_push_notification_script = files.put(
|
||||
src=get_resource("dovecot/push_notification.lua"),
|
||||
dest="/etc/dovecot/push_notification.lua",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
need_restart |= lua_push_notification_script.changed
|
||||
|
||||
# as per https://doc.dovecot.org/2.3/configuration_manual/os/
|
||||
# it is recommended to set the following inotify limits
|
||||
can_modify = host.get_fact(Command, "systemd-detect-virt -c || true") == "none"
|
||||
for name in ("max_user_instances", "max_user_watches"):
|
||||
key = f"fs.inotify.{name}"
|
||||
value = host.get_fact(Sysctl)[key]
|
||||
if value > 65534:
|
||||
continue
|
||||
if not can_modify:
|
||||
print(
|
||||
"\n!!!! refusing to attempt sysctl setting in shared-kernel containers\n"
|
||||
f"!!!! dovecot: sysctl {key!r}={value}, should be >65534 for production setups\n"
|
||||
"!!!!"
|
||||
)
|
||||
continue
|
||||
server.sysctl(
|
||||
name=f"Change {key}",
|
||||
key=key,
|
||||
value=65535,
|
||||
persist=True,
|
||||
)
|
||||
|
||||
timezone_env = files.line(
|
||||
name="Set TZ environment variable",
|
||||
path="/etc/environment",
|
||||
line="TZ=:/etc/localtime",
|
||||
)
|
||||
need_restart |= timezone_env.changed
|
||||
|
||||
restart_conf = files.put(
|
||||
name="dovecot: restart automatically on failure",
|
||||
src=get_resource("service/10_restart.conf"),
|
||||
dest="/etc/systemd/system/dovecot.service.d/10_restart.conf",
|
||||
)
|
||||
daemon_reload |= restart_conf.changed
|
||||
|
||||
# Validate dovecot configuration before restart
|
||||
if need_restart:
|
||||
server.shell(
|
||||
name="Validate dovecot configuration",
|
||||
commands=["doveconf -n >/dev/null"],
|
||||
)
|
||||
|
||||
return need_restart, daemon_reload
|
||||
@@ -1,7 +1,7 @@
|
||||
## Dovecot configuration file
|
||||
|
||||
{% if disable_ipv6 %}
|
||||
listen = *
|
||||
listen = 0.0.0.0
|
||||
{% endif %}
|
||||
|
||||
protocols = imap lmtp
|
||||
@@ -26,7 +26,7 @@ default_client_limit = 20000
|
||||
# Increase number of logged in IMAP connections.
|
||||
# Each connection is handled by a separate `imap` process.
|
||||
# `imap` process should have `client_limit=1` as described in
|
||||
# <https://doc.dovecot.org/configuration_manual/service_configuration/#service-limits>
|
||||
# <https://doc.dovecot.org/2.3/configuration_manual/service_configuration/#service-limits>
|
||||
# so each logged in IMAP session will need its own `imap` process.
|
||||
#
|
||||
# If this limit is reached,
|
||||
@@ -44,11 +44,11 @@ mail_server_comment = Chatmail server
|
||||
|
||||
# `zlib` enables compressing messages stored in the maildir.
|
||||
# See
|
||||
# <https://doc.dovecot.org/configuration_manual/zlib_plugin/>
|
||||
# <https://doc.dovecot.org/2.3/configuration_manual/zlib_plugin/>
|
||||
# for documentation.
|
||||
#
|
||||
# quota plugin documentation:
|
||||
# <https://doc.dovecot.org/configuration_manual/quota_plugin/>
|
||||
# <https://doc.dovecot.org/2.3/configuration_manual/quota_plugin/>
|
||||
mail_plugins = zlib quota
|
||||
|
||||
imap_capability = +XDELTAPUSH XCHATMAIL
|
||||
@@ -70,6 +70,12 @@ userdb {
|
||||
# Mailboxes are stored in the "mail" directory of the vmail user home.
|
||||
mail_location = maildir:{{ config.mailboxes_dir }}/%u
|
||||
|
||||
# index/cache files are not very useful for chatmail relay operations
|
||||
# but it's not clear how to disable them completely.
|
||||
# According to https://doc.dovecot.org/2.3/settings/advanced/#core_setting-mail_cache_max_size
|
||||
# if the cache file becomes larger than the specified size, it is truncated by dovecot
|
||||
mail_cache_max_size = 500K
|
||||
|
||||
namespace inbox {
|
||||
inbox = yes
|
||||
|
||||
@@ -107,7 +113,7 @@ mail_attribute_dict = proxy:/run/chatmail-metadata/metadata.socket:metadata
|
||||
# `imap_zlib` enables IMAP COMPRESS (RFC 4978).
|
||||
# <https://datatracker.ietf.org/doc/html/rfc4978.html>
|
||||
protocol imap {
|
||||
mail_plugins = $mail_plugins imap_zlib imap_quota last_login
|
||||
mail_plugins = $mail_plugins imap_quota last_login {% if config.imap_compress %}imap_zlib{% endif %}
|
||||
imap_metadata = yes
|
||||
}
|
||||
|
||||
@@ -119,13 +125,13 @@ plugin {
|
||||
|
||||
protocol lmtp {
|
||||
# notify plugin is a dependency of push_notification plugin:
|
||||
# <https://doc.dovecot.org/settings/plugin/notify-plugin/>
|
||||
# <https://doc.dovecot.org/2.3/settings/plugin/notify-plugin/>
|
||||
#
|
||||
# push_notification plugin documentation:
|
||||
# <https://doc.dovecot.org/configuration_manual/push_notification/>
|
||||
# <https://doc.dovecot.org/2.3/configuration_manual/push_notification/>
|
||||
#
|
||||
# mail_lua and push_notification_lua are needed for Lua push notification handler.
|
||||
# <https://doc.dovecot.org/configuration_manual/push_notification/#configuration>
|
||||
# <https://doc.dovecot.org/2.3/configuration_manual/push_notification/#configuration>
|
||||
mail_plugins = $mail_plugins mail_lua notify push_notification push_notification_lua
|
||||
}
|
||||
|
||||
@@ -148,7 +154,7 @@ plugin {
|
||||
|
||||
# push_notification configuration
|
||||
plugin {
|
||||
# <https://doc.dovecot.org/configuration_manual/push_notification/#lua-lua>
|
||||
# <https://doc.dovecot.org/2.3/configuration_manual/push_notification/#lua-lua>
|
||||
push_notification_driver = lua:file=/etc/dovecot/push_notification.lua
|
||||
}
|
||||
|
||||
@@ -162,6 +168,8 @@ service lmtp {
|
||||
}
|
||||
}
|
||||
|
||||
lmtp_add_received_header = no
|
||||
|
||||
service auth {
|
||||
unix_listener /var/spool/postfix/private/auth {
|
||||
mode = 0660
|
||||
@@ -177,20 +185,34 @@ service auth-worker {
|
||||
}
|
||||
|
||||
service imap-login {
|
||||
# High-security mode.
|
||||
# Each process serves a single connection and exits afterwards.
|
||||
# This is the default, but we set it explicitly to be sure.
|
||||
# See <https://doc.dovecot.org/admin_manual/login_processes/#high-security-mode> for details.
|
||||
service_count = 1
|
||||
|
||||
# Inrease the number of simultaneous connections.
|
||||
# High-performance mode as described in
|
||||
# <https://doc.dovecot.org/2.3/admin_manual/login_processes/#high-performance-mode>
|
||||
#
|
||||
# As of Dovecot 2.3.19.1 the default is 100 processes.
|
||||
# Combined with `service_count = 1` it means only 100 connections
|
||||
# can be handled simultaneously.
|
||||
process_limit = 10000
|
||||
# So-called high-security mode described in
|
||||
# <https://doc.dovecot.org/2.3/admin_manual/login_processes/#high-security-mode>
|
||||
# and enabled by default with `service_count = 1` starts one process per connection
|
||||
# and has problems logging in thousands of users after Dovecot restart.
|
||||
service_count = 0
|
||||
|
||||
# Increase virtual memory size limit.
|
||||
# Since imap-login processes handle TLS connections
|
||||
# even after logging users in
|
||||
# and many connections are handled by each process,
|
||||
# memory size limit should be increased.
|
||||
#
|
||||
# Otherwise the whole process eventually dies
|
||||
# with an error similar to
|
||||
# imap-login: Fatal: master: service(imap-login):
|
||||
# child 1422951 returned error 83
|
||||
# (Out of memory (service imap-login { vsz_limit=256 MB },
|
||||
# you may need to increase it)
|
||||
# and takes down all its TLS connections at once.
|
||||
vsz_limit = 1G
|
||||
|
||||
# Avoid startup latency for new connections.
|
||||
#
|
||||
# Should be set to at least the number of CPU cores
|
||||
# according to the documentation.
|
||||
process_min_avail = 10
|
||||
}
|
||||
|
||||
@@ -206,10 +228,10 @@ service anvil {
|
||||
}
|
||||
|
||||
ssl = required
|
||||
ssl_cert = </var/lib/acme/live/{{ config.mail_domain }}/fullchain
|
||||
ssl_key = </var/lib/acme/live/{{ config.mail_domain }}/privkey
|
||||
ssl_cert = <{{ config.tls_cert_path }}
|
||||
ssl_key = <{{ config.tls_key_path }}
|
||||
ssl_dh = </usr/share/dovecot/dh.pem
|
||||
ssl_min_protocol = TLSv1.2
|
||||
ssl_min_protocol = TLSv1.3
|
||||
ssl_prefer_server_ciphers = yes
|
||||
|
||||
|
||||
@@ -232,3 +254,181 @@ protocol imap {
|
||||
rawlog_dir = %h
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
{% if not config.imap_compress %}
|
||||
# Hibernate IDLE users to save memory and CPU resources
|
||||
# NOTE: this will have no effect if imap_zlib plugin is used
|
||||
imap_hibernate_timeout = 30s
|
||||
service imap {
|
||||
# Note that this change will allow any process running as
|
||||
# $default_internal_user (dovecot) to access mails as any other user.
|
||||
# This may be insecure in some installations, which is why this isn't
|
||||
# done by default.
|
||||
unix_listener imap-master {
|
||||
user = $default_internal_user
|
||||
}
|
||||
}
|
||||
# The following is the default already in v2.3.1+:
|
||||
service imap {
|
||||
extra_groups = $default_internal_group
|
||||
}
|
||||
service imap-hibernate {
|
||||
unix_listener imap-hibernate {
|
||||
mode = 0660
|
||||
group = $default_internal_group
|
||||
}
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
{% if config.mtail_address %}
|
||||
#
|
||||
# Dovecot Statistics
|
||||
#
|
||||
# OpenMetrics endpoint at http://{{- config.mtail_address}}:3904/metrics
|
||||
service stats {
|
||||
inet_listener http {
|
||||
port = 3904
|
||||
address = {{- config.mtail_address}}
|
||||
}
|
||||
}
|
||||
|
||||
# IMAP Command Metrics
|
||||
# - Bytes in/out for compression efficiency analysis
|
||||
# - Lock wait time for contention debugging
|
||||
# - Grouped by command name and reply state
|
||||
metric imap_command {
|
||||
filter = event=imap_command_finished
|
||||
fields = bytes_in bytes_out lock_wait_usecs running_usecs
|
||||
group_by = cmd_name tagged_reply_state
|
||||
}
|
||||
|
||||
# Duration buckets for latency histograms (base 10: 10us, 100us, 1ms, 10ms, 100ms, 1s, 10s, 100s)
|
||||
metric imap_command_duration {
|
||||
filter = event=imap_command_finished
|
||||
group_by = cmd_name duration:exponential:1:8:10
|
||||
}
|
||||
|
||||
# Slow command outliers (>1 second = 1000000 usecs)
|
||||
# Useful for alerting without high cardinality
|
||||
metric imap_command_slow {
|
||||
filter = event=imap_command_finished AND duration>1000000 AND NOT cmd_name=IDLE
|
||||
group_by = cmd_name
|
||||
}
|
||||
|
||||
# IDLE-specific Metrics
|
||||
|
||||
metric imap_idle {
|
||||
filter = event=imap_command_finished AND cmd_name=IDLE
|
||||
fields = bytes_in bytes_out running_usecs
|
||||
group_by = tagged_reply_state
|
||||
}
|
||||
|
||||
metric imap_idle_duration {
|
||||
filter = event=imap_command_finished AND cmd_name=IDLE
|
||||
# Base 10: 100ms to 27h (covers short wakeups to long idle sessions)
|
||||
group_by = duration:exponential:5:11:10
|
||||
}
|
||||
|
||||
metric imap_idle_commands {
|
||||
filter = event=imap_command_finished AND cmd_name=IDLE
|
||||
group_by = tagged_reply_state
|
||||
}
|
||||
|
||||
metric imap_idle_failed {
|
||||
filter = event=imap_command_finished AND cmd_name=IDLE AND NOT tagged_reply_state=OK
|
||||
}
|
||||
|
||||
# Hibernation Metrics (requires imap_hibernate_timeout)
|
||||
|
||||
metric imap_hibernated {
|
||||
filter = event=imap_client_hibernated
|
||||
}
|
||||
|
||||
metric imap_hibernated_failed {
|
||||
filter = event=imap_client_hibernated AND error=*
|
||||
}
|
||||
|
||||
metric imap_unhibernated {
|
||||
filter = event=imap_client_unhibernated
|
||||
fields = hibernation_usecs
|
||||
}
|
||||
|
||||
metric imap_unhibernated_reason {
|
||||
filter = event=imap_client_unhibernated
|
||||
group_by = reason
|
||||
fields = hibernation_usecs
|
||||
}
|
||||
|
||||
metric imap_unhibernated_reason_sleep {
|
||||
filter = event=imap_client_unhibernated
|
||||
group_by = reason hibernation_usecs:exponential:4:8:10
|
||||
}
|
||||
|
||||
metric imap_unhibernated_failed {
|
||||
filter = event=imap_client_unhibernated AND error=*
|
||||
}
|
||||
|
||||
# Hibernation duration buckets (how long clients stayed hibernated)
|
||||
# Base 10: 100ms to 27h
|
||||
metric imap_hibernation_duration {
|
||||
filter = event=imap_client_unhibernated
|
||||
group_by = reason duration:exponential:5:11:10
|
||||
}
|
||||
|
||||
# Authentication / Login Metrics
|
||||
|
||||
metric auth_request {
|
||||
filter = event=auth_request_finished
|
||||
group_by = success
|
||||
}
|
||||
|
||||
metric auth_request_duration {
|
||||
filter = event=auth_request_finished
|
||||
group_by = success duration:exponential:2:6:10
|
||||
}
|
||||
|
||||
metric auth_failed {
|
||||
filter = event=auth_request_finished AND success=no
|
||||
}
|
||||
|
||||
# Passdb cache effectiveness
|
||||
metric auth_passdb {
|
||||
filter = event=auth_passdb_request_finished
|
||||
group_by = result cache
|
||||
}
|
||||
|
||||
# Master login (post-auth userdb lookup)
|
||||
metric auth_master_login {
|
||||
filter = event=auth_master_client_login_finished
|
||||
}
|
||||
|
||||
metric auth_master_login_failed {
|
||||
filter = event=auth_master_client_login_finished AND error=*
|
||||
}
|
||||
|
||||
# Mail Delivery (LMTP) - affects IDLE wakeup latency
|
||||
|
||||
metric mail_delivery {
|
||||
filter = event=mail_delivery_finished
|
||||
}
|
||||
|
||||
metric mail_delivery_duration {
|
||||
filter = event=mail_delivery_finished
|
||||
group_by = duration:exponential:3:7:10
|
||||
}
|
||||
|
||||
metric mail_delivery_failed {
|
||||
filter = event=mail_delivery_finished AND error=*
|
||||
}
|
||||
|
||||
# Connection Events
|
||||
|
||||
metric client_connected {
|
||||
filter = event=client_connection_connected AND category="service:imap"
|
||||
}
|
||||
|
||||
metric client_disconnected {
|
||||
filter = event=client_connection_disconnected AND category="service:imap"
|
||||
fields = bytes_in bytes_out
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
# delete all mails after {{ config.delete_mails_after }} days, in the Inbox
|
||||
2 0 * * * vmail find {{ config.mailboxes_dir }} -path '*/cur/*' -mtime +{{ config.delete_mails_after }} -type f -delete
|
||||
# or in any IMAP subfolder
|
||||
2 0 * * * vmail find {{ config.mailboxes_dir }} -path '*/.*/cur/*' -mtime +{{ config.delete_mails_after }} -type f -delete
|
||||
# even if they are unseen
|
||||
2 0 * * * vmail find {{ config.mailboxes_dir }} -path '*/new/*' -mtime +{{ config.delete_mails_after }} -type f -delete
|
||||
2 0 * * * vmail find {{ config.mailboxes_dir }} -path '*/.*/new/*' -mtime +{{ config.delete_mails_after }} -type f -delete
|
||||
# or only temporary (but then they shouldn't be around after {{ config.delete_mails_after }} days anyway).
|
||||
2 0 * * * vmail find {{ config.mailboxes_dir }} -path '*/tmp/*' -mtime +{{ config.delete_mails_after }} -type f -delete
|
||||
2 0 * * * vmail find {{ config.mailboxes_dir }} -path '*/.*/tmp/*' -mtime +{{ config.delete_mails_after }} -type f -delete
|
||||
3 0 * * * vmail find {{ config.mailboxes_dir }} -name 'maildirsize' -type f -delete
|
||||
4 0 * * * vmail /usr/local/lib/chatmaild/venv/bin/delete_inactive_users /usr/local/lib/chatmaild/chatmail.ini
|
||||
@@ -2,15 +2,6 @@ function dovecot_lua_notify_begin_txn(user)
|
||||
return user
|
||||
end
|
||||
|
||||
function contains(v, needle)
|
||||
for _, keyword in ipairs(v) do
|
||||
if keyword == needle then
|
||||
return true
|
||||
end
|
||||
end
|
||||
return false
|
||||
end
|
||||
|
||||
function dovecot_lua_notify_event_message_new(user, event)
|
||||
local mbox = user:mailbox(event.mailbox)
|
||||
mbox:sync()
|
||||
|
||||
67
cmdeploy/src/cmdeploy/external/deployer.py
vendored
Normal file
67
cmdeploy/src/cmdeploy/external/deployer.py
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
import io
|
||||
|
||||
from pyinfra import host
|
||||
from pyinfra.facts.files import File
|
||||
from pyinfra.operations import files, systemd
|
||||
|
||||
from cmdeploy.basedeploy import Deployer, get_resource
|
||||
|
||||
|
||||
class ExternalTlsDeployer(Deployer):
|
||||
"""Expects TLS certificates to be managed on the server.
|
||||
|
||||
Validates that the configured certificate and key files
|
||||
exist on the remote host. Installs a systemd path unit
|
||||
that watches the certificate file and automatically
|
||||
restarts/reloads affected services when it changes.
|
||||
"""
|
||||
|
||||
def __init__(self, cert_path, key_path):
|
||||
self.cert_path = cert_path
|
||||
self.key_path = key_path
|
||||
|
||||
def configure(self):
|
||||
# Verify cert and key exist on the remote host using pyinfra facts.
|
||||
for path in (self.cert_path, self.key_path):
|
||||
info = host.get_fact(File, path=path)
|
||||
if info is None:
|
||||
raise Exception(f"External TLS file not found on server: {path}")
|
||||
|
||||
# Deploy the .path unit (templated with the cert path).
|
||||
# pkg=__package__ is required here because the resource files
|
||||
# live in cmdeploy.external, not the default cmdeploy package.
|
||||
source = get_resource("tls-cert-reload.path.f", pkg=__package__)
|
||||
content = source.read_text().format(cert_path=self.cert_path).encode()
|
||||
|
||||
path_unit = files.put(
|
||||
name="Upload tls-cert-reload.path",
|
||||
src=io.BytesIO(content),
|
||||
dest="/etc/systemd/system/tls-cert-reload.path",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
|
||||
service_unit = files.put(
|
||||
name="Upload tls-cert-reload.service",
|
||||
src=get_resource("tls-cert-reload.service", pkg=__package__),
|
||||
dest="/etc/systemd/system/tls-cert-reload.service",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
|
||||
if path_unit.changed or service_unit.changed:
|
||||
self.need_restart = True
|
||||
|
||||
def activate(self):
|
||||
systemd.service(
|
||||
name="Enable tls-cert-reload path watcher",
|
||||
service="tls-cert-reload.path",
|
||||
running=True,
|
||||
enabled=True,
|
||||
restarted=self.need_restart,
|
||||
daemon_reload=self.need_restart,
|
||||
)
|
||||
# No explicit reload needed here: dovecot/nginx read the cert
|
||||
# on startup, and the .path watcher handles live changes.
|
||||
15
cmdeploy/src/cmdeploy/external/tls-cert-reload.path.f
vendored
Normal file
15
cmdeploy/src/cmdeploy/external/tls-cert-reload.path.f
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# Watch the TLS certificate file for changes.
|
||||
# When the cert is updated (e.g. renewed by an external process),
|
||||
# this triggers tls-cert-reload.service to reload the affected services.
|
||||
#
|
||||
# NOTE: changes to the certificates are not detected if they cross bind-mount boundaries.
|
||||
# After cert renewal, you must then trigger the reload explicitly:
|
||||
# systemctl start tls-cert-reload.service
|
||||
[Unit]
|
||||
Description=Watch TLS certificate for changes
|
||||
|
||||
[Path]
|
||||
PathChanged={cert_path}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
15
cmdeploy/src/cmdeploy/external/tls-cert-reload.service
vendored
Normal file
15
cmdeploy/src/cmdeploy/external/tls-cert-reload.service
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# Reload services that cache the TLS certificate.
|
||||
#
|
||||
# dovecot: caches the cert at startup; reload re-reads SSL certs
|
||||
# without dropping existing connections.
|
||||
# nginx: caches the cert at startup; reload gracefully picks up
|
||||
# the new cert for new connections.
|
||||
# postfix: reads the cert fresh on each TLS handshake,
|
||||
# does NOT need a reload/restart.
|
||||
[Unit]
|
||||
Description=Reload TLS services after certificate change
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/bin/systemctl try-reload-or-restart dovecot
|
||||
ExecStart=/bin/systemctl try-reload-or-restart nginx
|
||||
52
cmdeploy/src/cmdeploy/filtermail/deployer.py
Normal file
52
cmdeploy/src/cmdeploy/filtermail/deployer.py
Normal file
@@ -0,0 +1,52 @@
|
||||
from pyinfra import facts, host
|
||||
from pyinfra.operations import files, systemd
|
||||
|
||||
from cmdeploy.basedeploy import Deployer, get_resource
|
||||
|
||||
|
||||
class FiltermailDeployer(Deployer):
|
||||
services = ["filtermail", "filtermail-incoming"]
|
||||
bin_path = "/usr/local/bin/filtermail"
|
||||
config_path = "/usr/local/lib/chatmaild/chatmail.ini"
|
||||
|
||||
def __init__(self):
|
||||
self.need_restart = False
|
||||
|
||||
def install(self):
|
||||
arch = host.get_fact(facts.server.Arch)
|
||||
url = f"https://github.com/chatmail/filtermail/releases/download/v0.5.2/filtermail-{arch}"
|
||||
sha256sum = {
|
||||
"x86_64": "ce24ca0075aa445510291d775fb3aea8f4411818c7b885ae51a0fe18c5f789ce",
|
||||
"aarch64": "c5d783eefa5332db3d97a0e6a23917d72849e3eb45da3d16ce908a9b4e5a797d",
|
||||
}[arch]
|
||||
self.need_restart |= files.download(
|
||||
name="Download filtermail",
|
||||
src=url,
|
||||
sha256sum=sha256sum,
|
||||
dest=self.bin_path,
|
||||
mode="755",
|
||||
).changed
|
||||
|
||||
def configure(self):
|
||||
for service in self.services:
|
||||
self.need_restart |= files.template(
|
||||
src=get_resource(f"filtermail/{service}.service.j2"),
|
||||
dest=f"/etc/systemd/system/{service}.service",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
bin_path=self.bin_path,
|
||||
config_path=self.config_path,
|
||||
).changed
|
||||
|
||||
def activate(self):
|
||||
for service in self.services:
|
||||
systemd.service(
|
||||
name=f"Start and enable {service}",
|
||||
service=f"{service}.service",
|
||||
running=True,
|
||||
enabled=True,
|
||||
restarted=self.need_restart,
|
||||
daemon_reload=True,
|
||||
)
|
||||
self.need_restart = False
|
||||
@@ -0,0 +1,11 @@
|
||||
[Unit]
|
||||
Description=Incoming Chatmail Postfix before queue filter
|
||||
|
||||
[Service]
|
||||
ExecStart={{ bin_path }} {{ config_path }} incoming
|
||||
Restart=always
|
||||
RestartSec=30
|
||||
User=vmail
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
11
cmdeploy/src/cmdeploy/filtermail/filtermail.service.j2
Normal file
11
cmdeploy/src/cmdeploy/filtermail/filtermail.service.j2
Normal file
@@ -0,0 +1,11 @@
|
||||
[Unit]
|
||||
Description=Outgoing Chatmail Postfix before queue filter
|
||||
|
||||
[Service]
|
||||
ExecStart={{ bin_path }} {{ config_path }} outgoing
|
||||
Restart=always
|
||||
RestartSec=30
|
||||
User=vmail
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,5 +1,11 @@
|
||||
enable_relay = true
|
||||
http_bind_addr = "[::]:3340"
|
||||
enable_stun = true
|
||||
|
||||
# Disable built-in STUN server in iroh-relay 0.35
|
||||
# as we deploy our own TURN server instead.
|
||||
# STUN server is going to be removed in iroh-relay 1.0
|
||||
# and this line can be removed after upgrade.
|
||||
enable_stun = false
|
||||
|
||||
enable_metrics = false
|
||||
metrics_bind_addr = "127.0.0.1:9092"
|
||||
|
||||
475
cmdeploy/src/cmdeploy/lxc/cli.py
Normal file
475
cmdeploy/src/cmdeploy/lxc/cli.py
Normal file
@@ -0,0 +1,475 @@
|
||||
"""lxc-start/stop/status/test subcommands for testing with local containers."""
|
||||
|
||||
import os
|
||||
import time
|
||||
|
||||
from ..util import get_git_hash, get_version_string, shell
|
||||
from .incus import RELAY_IMAGE_ALIAS, Incus, RelayContainer
|
||||
|
||||
RELAY_NAMES = ("test0", "test1")
|
||||
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# lxc-start
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
|
||||
def lxc_start_cmd_options(parser):
|
||||
_add_name_args(
|
||||
parser,
|
||||
help_text="User relay name(s) to create (default: test0).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ipv4-only",
|
||||
dest="ipv4_only",
|
||||
action="store_true",
|
||||
help="Create an IPv4-only container.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--run",
|
||||
action="store_true",
|
||||
help="Run 'cmdeploy run' on each container after starting it.",
|
||||
)
|
||||
|
||||
|
||||
def lxc_start_cmd(args, out):
|
||||
"""Create/Ensure and start LXC relay and DNS containers."""
|
||||
|
||||
with out.section("Preparing container setup"):
|
||||
_lxc_start_cmd(args, out)
|
||||
|
||||
|
||||
def _lxc_start_cmd(args, out):
|
||||
ix = Incus(out)
|
||||
sub = out.new_prefixed_out()
|
||||
out.green("Ensuring base image ...")
|
||||
ix.ensure_base_image()
|
||||
out.green("Ensuring DNS container (ns-localchat) ...")
|
||||
dns_ct = ix.get_dns_container()
|
||||
dns_ct.ensure()
|
||||
sub.print(f"DNS container IP: {dns_ct.ipv4}")
|
||||
|
||||
names = args.names if args.names else RELAY_NAMES
|
||||
relays = list(ix.get_container(n) for n in names)
|
||||
for ct in relays:
|
||||
out.green(f"Ensuring container {ct.name!r} ({ct.domain}) ...")
|
||||
ct.ensure()
|
||||
ip = ct.ipv4
|
||||
|
||||
sub.print("Configuring container hostname ...")
|
||||
ct.configure_hosts(ip)
|
||||
|
||||
sub.print(f"Writing {ct.ini.name} ...")
|
||||
ct.write_ini(disable_ipv6=args.ipv4_only)
|
||||
sub.print(f"Config: {ct.ini}")
|
||||
if args.ipv4_only:
|
||||
ct.disable_ipv6()
|
||||
ipv6 = None
|
||||
else:
|
||||
output = ct.bash(
|
||||
"ip -6 addr show scope global -deprecated"
|
||||
" | grep -oP '(?<=inet6 )[^/]+'",
|
||||
check=False,
|
||||
)
|
||||
ipv6 = output.strip() if output else None
|
||||
sub.print(f"{_format_addrs(ip, ipv6)}")
|
||||
|
||||
sub.green(f"Container {ct.name!r} ready: {ct.domain} -> {ip}")
|
||||
out.print()
|
||||
|
||||
# Reset DNS zones only for the containers we just started
|
||||
started_cnames = {ct.name for ct in relays}
|
||||
managed = ix.list_managed()
|
||||
started = [c for c in managed if c["name"] in started_cnames]
|
||||
|
||||
if started:
|
||||
out.print(
|
||||
f"Resetting DNS zones for {len(started)} domain(s) (A + AAAA records) ..."
|
||||
)
|
||||
dns_ct.reset_dns_records(dns_ct.ipv4, started)
|
||||
|
||||
for ct in relays:
|
||||
if ct.name in started_cnames:
|
||||
sub.print(f"Configuring DNS in {ct.name} ...")
|
||||
ct.configure_dns(dns_ct.ipv4)
|
||||
|
||||
# Generate the unified SSH config
|
||||
out.green("Writing ssh-config ...")
|
||||
ssh_cfg = ix.write_ssh_config()
|
||||
sub.print(f"{ssh_cfg}")
|
||||
|
||||
# Verify SSH via the generated config
|
||||
for ct in relays:
|
||||
sub.print(f"Verifying SSH to {ct.name} via ssh-config ...")
|
||||
if ct.verify_ssh(ssh_cfg):
|
||||
sub.print(f"SSH OK: ssh -F lxconfigs/ssh-config {ct.domain}")
|
||||
else:
|
||||
sub.red(f"WARNING: SSH verification failed for {ct.name}")
|
||||
|
||||
# Print integration suggestions
|
||||
ssh_cfg = ix.ssh_config_path
|
||||
if not ix.check_ssh_include():
|
||||
sub.green(
|
||||
"\n(Optional) To use containers from any SSH client, add to ~/.ssh/config:"
|
||||
)
|
||||
sub.green(f" Include {ssh_cfg}")
|
||||
|
||||
# Optionally run cmdeploy run + dns on each relay
|
||||
if args.run:
|
||||
for ct in relays:
|
||||
with out.section(f"cmdeploy run: {ct.sname} ({ct.domain})"):
|
||||
ret = _run_cmdeploy("run", ct, ix, out, extra=["--skip-dns-check"])
|
||||
if ret:
|
||||
out.red(f"Deploy to {ct.sname} failed (exit {ret})")
|
||||
return ret
|
||||
|
||||
with out.section("loading DNS zones"):
|
||||
for ct in relays:
|
||||
ret = _run_cmdeploy(
|
||||
"dns", ct, ix, out,
|
||||
extra=["--zonefile", str(ct.zone)],
|
||||
)
|
||||
if ret:
|
||||
out.red(f"DNS for {ct.sname} failed (exit {ret})")
|
||||
return ret
|
||||
if ct.zone.exists():
|
||||
dns_ct.set_dns_records(ct.zone.read_text())
|
||||
out.print(f"Restarting filtermail-incoming on {ct.name}")
|
||||
ct.bash("systemctl restart filtermail-incoming")
|
||||
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# lxc-stop
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
|
||||
def lxc_stop_cmd_options(parser):
|
||||
parser.add_argument(
|
||||
"--destroy",
|
||||
action="store_true",
|
||||
help="Delete containers and their config files after stopping.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--destroy-all",
|
||||
dest="destroy_all",
|
||||
action="store_true",
|
||||
help="Like --destroy, but also remove the ns-localchat DNS container.",
|
||||
)
|
||||
_add_name_args(
|
||||
parser,
|
||||
help_text="Container name(s) to stop (default: test0 + test1).",
|
||||
)
|
||||
|
||||
|
||||
def lxc_stop_cmd(args, out):
|
||||
"""Stop (and optionally destroy) local LXC relay containers."""
|
||||
ix = Incus(out)
|
||||
names = args.names or RELAY_NAMES
|
||||
destroy = args.destroy or args.destroy_all
|
||||
|
||||
for ct in map(ix.get_container, names):
|
||||
if destroy:
|
||||
out.green(f"Destroying container {ct.name!r} ...")
|
||||
ct.destroy()
|
||||
else:
|
||||
out.green(f"Stopping container {ct.name!r} ...")
|
||||
ct.stop(force=True)
|
||||
|
||||
if args.destroy_all:
|
||||
dns_ct = ix.get_dns_container()
|
||||
out.green(f"Destroying DNS container {dns_ct.name!r} ...")
|
||||
dns_ct.destroy()
|
||||
ix.delete_images()
|
||||
|
||||
if destroy:
|
||||
ix.write_ssh_config()
|
||||
out.green("LXC containers destroyed.")
|
||||
else:
|
||||
out.green("LXC containers stopped.")
|
||||
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# lxc-test
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
|
||||
def lxc_test_cmd_options(parser):
|
||||
parser.add_argument(
|
||||
"--one",
|
||||
action="store_true",
|
||||
help="Only deploy and test against test0 (skip test1).",
|
||||
)
|
||||
|
||||
|
||||
def lxc_test_cmd(args, out):
|
||||
"""Run full LXC pipeline: start, deploy, DNS, zone files, and tests.
|
||||
|
||||
All commands run directly on the host using
|
||||
``--ssh-config lxconfigs/ssh-config`` for SSH access.
|
||||
"""
|
||||
ix = Incus(out)
|
||||
t_total = time.time()
|
||||
relay_names = list(RELAY_NAMES)
|
||||
if args.one:
|
||||
relay_names = relay_names[:1]
|
||||
|
||||
local_hash = get_git_hash()
|
||||
|
||||
# Per-relay: start, deploy, then snapshot the first relay as a
|
||||
# reusable image so the second relay launches pre-deployed.
|
||||
ipv4_only_flags = {RELAY_NAMES[0]: False, RELAY_NAMES[1]: True}
|
||||
|
||||
for ct in map(ix.get_container, relay_names):
|
||||
name = ct.sname
|
||||
ipv4_only = ipv4_only_flags.get(name, False)
|
||||
v_flag = " -" + "v" * out.verbosity if out.verbosity > 0 else ""
|
||||
start_cmd = f"cmdeploy lxc-start{v_flag} {name}"
|
||||
if ipv4_only:
|
||||
start_cmd += " --ipv4-only"
|
||||
with out.section(f"cmdeploy lxc-start: {name}"):
|
||||
ret = out.shell(start_cmd, cwd=str(ix.project_root))
|
||||
if ret:
|
||||
return ret
|
||||
|
||||
status = _deploy_status(ct, local_hash, ix)
|
||||
with out.section(f"cmdeploy run: {name}"):
|
||||
if "IN-SYNC" in status:
|
||||
out.print(f"{name} is {status}, skipping")
|
||||
else:
|
||||
ret = _run_cmdeploy("run", ct, ix, out, extra=["--skip-dns-check"])
|
||||
if ret:
|
||||
out.red(f"Deploy to {name} failed (exit {ret})")
|
||||
return ret
|
||||
|
||||
# Snapshot the first relay so subsequent ones launch pre-deployed
|
||||
if not ix.find_image([RELAY_IMAGE_ALIAS]):
|
||||
with out.section("lxc-test: caching relay image"):
|
||||
ct.publish_as_relay_image()
|
||||
|
||||
for ct in map(ix.get_container, relay_names):
|
||||
with out.section(f"cmdeploy dns: {ct.sname} ({ct.domain})"):
|
||||
ret = _run_cmdeploy("dns", ct, ix, out, extra=["--zonefile", str(ct.zone)])
|
||||
if ret:
|
||||
out.red(f"DNS for {ct.sname} failed (exit {ret})")
|
||||
return ret
|
||||
|
||||
with out.section(f"lxc-test: loading DNS zones {' & '.join(relay_names)}"):
|
||||
dns_ct = ix.get_dns_container()
|
||||
for ct in map(ix.get_container, relay_names):
|
||||
if ct.zone.exists():
|
||||
zone_data = ct.zone.read_text()
|
||||
out.print(f"Loading {ct.zone} into PowerDNS ...")
|
||||
dns_ct.set_dns_records(zone_data)
|
||||
|
||||
# Restart filtermail so its in-process DNS cache
|
||||
# does not hold stale negative DKIM responses
|
||||
# from before the zones were loaded.
|
||||
for ct in map(ix.get_container, relay_names):
|
||||
out.print(f"Restarting filtermail-incoming on {ct.name} ...")
|
||||
ct.bash("systemctl restart filtermail-incoming")
|
||||
|
||||
with out.section("cmdeploy test"):
|
||||
first = ix.get_container(relay_names[0])
|
||||
env = None
|
||||
if len(relay_names) > 1:
|
||||
env = os.environ.copy()
|
||||
env["CHATMAIL_DOMAIN2"] = ix.get_container(relay_names[1]).domain
|
||||
ret = _run_cmdeploy("test", first, ix, out, **({"env": env} if env else {}))
|
||||
if ret:
|
||||
out.red(f"Tests failed (exit {ret})")
|
||||
return ret
|
||||
|
||||
elapsed = time.time() - t_total
|
||||
out.section_line(f"lxc-test complete ({elapsed:.1f}s)")
|
||||
if out.section_timings:
|
||||
out.print("Section timings:")
|
||||
for name, secs in out.section_timings:
|
||||
out.print(f" {name:.<50s} {secs:5.1f}s")
|
||||
out.print(f" {'total':.<50s} {elapsed:5.1f}s")
|
||||
out.section_timings.clear()
|
||||
return 0
|
||||
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# lxc-status
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
|
||||
def lxc_status_cmd_options(parser):
|
||||
pass
|
||||
|
||||
|
||||
def lxc_status_cmd(args, out):
|
||||
"""Show status of local LXC chatmail containers."""
|
||||
ix = Incus(out)
|
||||
containers = ix.list_managed()
|
||||
if not containers:
|
||||
out.red("No LXC containers found. Run 'cmdeploy lxc-start' first.")
|
||||
return 1
|
||||
|
||||
local_hash = get_git_hash()
|
||||
|
||||
# Get storage pool path for display
|
||||
storage_path = None
|
||||
data = ix.run_json(["storage", "show", "default"], check=False)
|
||||
if data:
|
||||
storage_path = data.get("config", {}).get("source")
|
||||
msg = "Container status"
|
||||
if storage_path:
|
||||
msg += f": {storage_path}"
|
||||
out.section_line(msg)
|
||||
|
||||
dns_ip = None
|
||||
for c in containers:
|
||||
_print_container_status(out, c, ix, local_hash)
|
||||
if c["name"] == ix.get_dns_container().name:
|
||||
dns_ip = c["ip"]
|
||||
|
||||
out.section_line("Host ssh and DNS configuration")
|
||||
_print_ssh_status(out, ix)
|
||||
_print_dns_forwarding_status(out, dns_ip)
|
||||
return 0
|
||||
|
||||
|
||||
def _print_container_status(out, c, ix, local_hash):
|
||||
"""Print name/status, domain/IPs, and RAM for one container."""
|
||||
cname = c["name"]
|
||||
is_running = c.get("status") == "Running"
|
||||
ct = ix.get_container(cname)
|
||||
|
||||
# First line: name + running/STOPPED + deploy status
|
||||
if not is_running:
|
||||
tag = "STOPPED"
|
||||
elif not isinstance(ct, RelayContainer):
|
||||
tag = "running"
|
||||
else:
|
||||
tag = f"running {_deploy_status(ct, local_hash, ix)}"
|
||||
out.print(f"{cname:20s} {tag}")
|
||||
|
||||
# Second line: domain, IPv4, IPv6
|
||||
domain = c.get("domain", "")
|
||||
ip = c.get("ip") or "?"
|
||||
ipv6 = c.get("ipv6")
|
||||
out.print(f"{domain:20s} {_format_addrs(ip, ipv6)}")
|
||||
|
||||
# Third line: RAM (RSS), config
|
||||
detail_out = out.new_prefixed_out(" " * 21)
|
||||
try:
|
||||
used, total = ct.rss_mib()
|
||||
except Exception:
|
||||
ram_str = "RSS ?"
|
||||
else:
|
||||
ram_str = f"RSS {used}/{total} MiB ({used * 100 // total}%)"
|
||||
|
||||
if isinstance(ct, RelayContainer):
|
||||
detail = f"{ram_str}, config: {os.path.relpath(ct.ini)}"
|
||||
else:
|
||||
detail = ram_str
|
||||
|
||||
detail_out.print(detail)
|
||||
out.print()
|
||||
|
||||
|
||||
def _print_ssh_status(out, ix):
|
||||
"""Print SSH integration status."""
|
||||
ssh_cfg = ix.ssh_config_path
|
||||
if ix.check_ssh_include():
|
||||
out.green("SSH: ~/.ssh/config includes lxconfigs/ssh-config ✓")
|
||||
else:
|
||||
out.red("SSH: ~/.ssh/config does NOT include lxconfigs/ssh-config")
|
||||
sub = out.new_prefixed_out()
|
||||
sub.print("Add to ~/.ssh/config:")
|
||||
sub.print(f" Include {ssh_cfg}")
|
||||
|
||||
|
||||
def _print_dns_forwarding_status(out, dns_ip):
|
||||
"""Print host DNS forwarding status for .localchat."""
|
||||
sub = out.new_prefixed_out()
|
||||
if not dns_ip:
|
||||
out.red("DNS: ns-localchat container not found")
|
||||
return
|
||||
try:
|
||||
rv = shell("resolvectl status incusbr0")
|
||||
dns_ok = dns_ip in rv.stdout and "localchat" in rv.stdout
|
||||
except Exception:
|
||||
dns_ok = None
|
||||
if dns_ok is True:
|
||||
out.green(f"DNS: .localchat forwarding to {dns_ip} ✓")
|
||||
elif dns_ok is False:
|
||||
out.red("DNS: .localchat forwarding NOT configured")
|
||||
sub.print("Run:")
|
||||
sub.print(f" sudo resolvectl dns incusbr0 {dns_ip}")
|
||||
sub.print(" sudo resolvectl domain incusbr0 ~localchat")
|
||||
else:
|
||||
sub.print("DNS: .localchat forwarding status UNKNOWN")
|
||||
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Internal helpers
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
|
||||
def _format_addrs(ip, ipv6=None):
|
||||
parts = [f"IPv4 {ip}"]
|
||||
if ipv6:
|
||||
parts.append(f"IPv6 {ipv6}")
|
||||
return ", ".join(parts)
|
||||
|
||||
|
||||
def _deploy_status(ct, local_hash, ix):
|
||||
"""Return a human-readable deploy status string.
|
||||
|
||||
Compares the full deployed version (hash + diff) against
|
||||
the local state built by :func:`~cmdeploy.util.get_version_string`.
|
||||
"""
|
||||
deployed = ct.deployed_version()
|
||||
if deployed is None:
|
||||
return "NOT DEPLOYED"
|
||||
|
||||
# A container launched from the relay image has the same
|
||||
# git hash but a different domain — always redeploy.
|
||||
deployed_domain = ct.deployed_domain()
|
||||
if deployed_domain and deployed_domain != ct.domain:
|
||||
return f"DOMAIN-MISMATCH (deployed: {deployed_domain})"
|
||||
|
||||
deployed_lines = deployed.splitlines()
|
||||
deployed_hash = deployed_lines[0] if deployed_lines else ""
|
||||
short = deployed_hash[:12]
|
||||
|
||||
if not local_hash:
|
||||
return f"UNKNOWN (deployed: {short})"
|
||||
|
||||
local_short = local_hash[:12]
|
||||
if deployed_hash != local_hash:
|
||||
return f"STALE (deployed: {short}, local: {local_short})"
|
||||
|
||||
# Hash matches — check for uncommitted diffs
|
||||
local_version = get_version_string()
|
||||
if deployed != local_version:
|
||||
return f"DIRTY ({local_short}, undeployed changes)"
|
||||
|
||||
return f"IN-SYNC ({short})"
|
||||
|
||||
|
||||
def _add_name_args(parser, help_text):
|
||||
parser.add_argument("names", nargs="*", metavar="NAME", help=help_text)
|
||||
|
||||
|
||||
def _run_cmdeploy(subcmd, ct, ix, out, extra=None, **kwargs):
|
||||
"""Run ``cmdeploy <subcmd>`` with standard --config/--ssh flags.
|
||||
|
||||
*ct* is a Container (uses ``ct.ini`` and ``ct.domain``).
|
||||
Returns the subprocess exit code.
|
||||
"""
|
||||
extra_str = " ".join(extra) if extra else ""
|
||||
v_flag = " -" + "v" * out.verbosity if out.verbosity > 0 else ""
|
||||
cmd = f"""
|
||||
cmdeploy {subcmd}{v_flag}
|
||||
--config {ct.ini}
|
||||
--ssh-config {ix.ssh_config_path}
|
||||
--ssh-host {ct.domain}
|
||||
{extra_str}
|
||||
"""
|
||||
if "cwd" not in kwargs:
|
||||
kwargs["cwd"] = str(ix.project_root)
|
||||
return out.shell(cmd, **kwargs)
|
||||
768
cmdeploy/src/cmdeploy/lxc/incus.py
Normal file
768
cmdeploy/src/cmdeploy/lxc/incus.py
Normal file
@@ -0,0 +1,768 @@
|
||||
"""Core Incus operations for local chatmail LXC containers."""
|
||||
|
||||
import json
|
||||
import subprocess
|
||||
import textwrap
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
from ..util import shell
|
||||
|
||||
LABEL_KEY = "user.localchat-managed"
|
||||
SSH_KEY_NAME = "id_localchat"
|
||||
DOMAIN_SUFFIX = ".localchat"
|
||||
UPSTREAM_IMAGE = "images:debian/12"
|
||||
BASE_IMAGE_ALIAS = "localchat-base"
|
||||
BASE_SETUP_NAME = "localchat-base-setup"
|
||||
RELAY_IMAGE_ALIAS = "localchat-relay"
|
||||
|
||||
DNS_CONTAINER_NAME = "ns-localchat"
|
||||
DNS_DOMAIN = "ns.localchat"
|
||||
|
||||
|
||||
class DNSConfigurationError(Exception):
|
||||
"""Raised when the DNS container is not reachable or not answering."""
|
||||
|
||||
|
||||
def _extract_ip(net_data, family="inet"):
|
||||
"""Extract the first global-scope IP of *family* from network state data.
|
||||
|
||||
*net_data* is the ``state.network`` dict from ``incus list --format=json``.
|
||||
*family* is ``"inet"`` for IPv4 or ``"inet6"`` for IPv6.
|
||||
Returns the address string, or None.
|
||||
"""
|
||||
for iface_name, iface in net_data.items():
|
||||
if iface_name == "lo":
|
||||
continue
|
||||
for addr in iface.get("addresses", []):
|
||||
if addr["family"] == family and addr["scope"] == "global":
|
||||
return addr["address"]
|
||||
return None
|
||||
|
||||
|
||||
class Incus:
|
||||
"""Gateway for all Incus container operations.
|
||||
|
||||
Instantiated once per CLI command and passed around so that
|
||||
all modules share a single entry point for Incus interactions.
|
||||
"""
|
||||
|
||||
def __init__(self, out):
|
||||
self.out = out
|
||||
self.project_root = Path(__file__).resolve().parent.parent.parent.parent.parent
|
||||
self.lxconfigs_dir = self.project_root / "lxconfigs"
|
||||
self.lxconfigs_dir.mkdir(exist_ok=True)
|
||||
self.ssh_key_path = self.lxconfigs_dir / SSH_KEY_NAME
|
||||
if not self.ssh_key_path.exists():
|
||||
shell(
|
||||
f"ssh-keygen -t ed25519 -f {self.ssh_key_path} -N '' -C localchat",
|
||||
check=True,
|
||||
)
|
||||
self.ssh_config_path = self.lxconfigs_dir / "ssh-config"
|
||||
|
||||
def write_ssh_config(self):
|
||||
"""Write ``lxconfigs/ssh-config`` mapping all containers to their IPs.
|
||||
|
||||
Each Host block maps the container name, the domain name, and the
|
||||
short relay name (e.g. ``_test0``) to the container's IP, using the
|
||||
shared localchat SSH key. Returns the path to the file.
|
||||
"""
|
||||
containers = self.list_managed()
|
||||
key_path = self.ssh_key_path
|
||||
lines = ["# Auto-generated by cmdeploy lxc-start — do not edit\n"]
|
||||
for c in containers:
|
||||
hosts = [c["name"]]
|
||||
domain = c.get("domain", "")
|
||||
if domain and domain != c["name"]:
|
||||
hosts.append(domain)
|
||||
short = domain.split(".")[0]
|
||||
if short and short not in hosts:
|
||||
hosts.append(short)
|
||||
lines.append(f"\nHost {' '.join(hosts)}\n")
|
||||
lines.append(f" Hostname {c['ip']}\n")
|
||||
lines.append(" User root\n")
|
||||
lines.append(f" IdentityFile {key_path}\n")
|
||||
lines.append(" IdentitiesOnly yes\n")
|
||||
lines.append(" StrictHostKeyChecking accept-new\n")
|
||||
lines.append(" UserKnownHostsFile /dev/null\n")
|
||||
lines.append(" LogLevel ERROR\n")
|
||||
path = self.ssh_config_path
|
||||
path.write_text("".join(lines))
|
||||
return path
|
||||
|
||||
def check_ssh_include(self):
|
||||
"""Check if the user's ~/.ssh/config already includes our ssh-config."""
|
||||
user_ssh_config = Path.home() / ".ssh" / "config"
|
||||
if not user_ssh_config.exists():
|
||||
return False
|
||||
lines = user_ssh_config.read_text().splitlines()
|
||||
target = f"include {self.ssh_config_path}".lower()
|
||||
return any(line.strip().lower() == target for line in lines)
|
||||
|
||||
def get_host_nameservers(self):
|
||||
"""Return upstream nameservers found on the host."""
|
||||
ns = []
|
||||
for path in ["/run/systemd/resolve/resolv.conf", "/etc/resolv.conf"]:
|
||||
p = Path(path)
|
||||
if p.exists():
|
||||
for line in p.read_text().splitlines():
|
||||
if line.strip().startswith("nameserver "):
|
||||
addr = line.split()[1]
|
||||
if addr not in ("127.0.0.1", "127.0.0.53", "::1"):
|
||||
if addr not in ns:
|
||||
ns.append(addr)
|
||||
if ns:
|
||||
break
|
||||
return ns
|
||||
|
||||
def run(self, args, check=True, capture=True, input=None):
|
||||
"""Run an incus command.
|
||||
|
||||
When *capture* is True and *verbosity* >= 1, output is streamed
|
||||
to the terminal line-by-line while also being captured for
|
||||
later return via result.stdout.
|
||||
"""
|
||||
cmd = ["incus", "--quiet"] + list(args)
|
||||
sub = self.out.new_prefixed_out(" ")
|
||||
|
||||
if not capture:
|
||||
# Simple case: let subprocess handle streams (no capture)
|
||||
if self.out.verbosity >= 1:
|
||||
sub.print(f"$ {' '.join(cmd)}")
|
||||
return subprocess.run(
|
||||
cmd, text=True, input=input, check=check, stdout=None, stderr=None
|
||||
)
|
||||
|
||||
# Capture case: we may need to stream while capturing
|
||||
if sub.verbosity >= 1:
|
||||
cmd_lines = " ".join(cmd).splitlines()
|
||||
sub.print(f"$ {cmd_lines.pop(0)}")
|
||||
if sub.verbosity >= 2:
|
||||
for line in cmd_lines:
|
||||
sub.print(f" {line}")
|
||||
|
||||
proc = subprocess.Popen(
|
||||
cmd,
|
||||
text=True,
|
||||
stdin=subprocess.PIPE if input else subprocess.DEVNULL,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
|
||||
stdout_lines = []
|
||||
if input:
|
||||
proc.stdin.write(input)
|
||||
proc.stdin.close()
|
||||
|
||||
for line in proc.stdout:
|
||||
stdout_lines.append(line)
|
||||
if sub.verbosity >= 2:
|
||||
sub.print(f" > {line.rstrip()}")
|
||||
|
||||
stderr = proc.stderr.read()
|
||||
ret = proc.wait()
|
||||
stdout = "".join(stdout_lines)
|
||||
if check and ret != 0:
|
||||
full_output = stdout + stderr
|
||||
for line in full_output.splitlines():
|
||||
if sub.verbosity < 1: # and we haven't printed it yet
|
||||
sub.red(line)
|
||||
raise subprocess.CalledProcessError(ret, cmd, output=stdout, stderr=stderr)
|
||||
|
||||
return subprocess.CompletedProcess(cmd, ret, stdout=stdout, stderr=stderr)
|
||||
|
||||
def run_json(self, args, check=True):
|
||||
"""Run an incus command with ``--format=json``.
|
||||
|
||||
Returns the parsed JSON on success.
|
||||
When *check* is True raises ``subprocess.CalledProcessError``
|
||||
on non-zero exit; when False returns *None* instead.
|
||||
"""
|
||||
result = self.run(
|
||||
list(args) + ["--format=json"],
|
||||
check=check,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
return None
|
||||
return json.loads(result.stdout)
|
||||
|
||||
def run_output(self, args, check=True):
|
||||
"""Run an incus command and return its stripped stdout.
|
||||
|
||||
When *check* is False, returns *None* on non-zero exit
|
||||
instead of raising.
|
||||
"""
|
||||
result = self.run(args, check=check)
|
||||
if result.returncode != 0:
|
||||
return None
|
||||
return result.stdout.strip()
|
||||
|
||||
def find_image(self, aliases):
|
||||
"""Return the first alias from *aliases* that exists, else None."""
|
||||
images = self.run_json(["image", "list"], check=False) or []
|
||||
existing = {a.get("name") for img in images for a in img.get("aliases", [])}
|
||||
for alias in aliases:
|
||||
if alias in existing:
|
||||
return alias
|
||||
return None
|
||||
|
||||
def delete_images(self):
|
||||
"""Delete the cached base and relay images."""
|
||||
for alias in (RELAY_IMAGE_ALIAS, BASE_IMAGE_ALIAS):
|
||||
self.run(["image", "delete", alias], check=False) # ok if absent
|
||||
|
||||
def list_managed(self):
|
||||
"""Return list of dicts with name, ip, ipv6, domain, status, memory_usage."""
|
||||
containers = []
|
||||
for ct in self.run_json(["list"]):
|
||||
config = ct.get("config", {})
|
||||
if config.get(LABEL_KEY) != "true":
|
||||
continue
|
||||
name = ct["name"]
|
||||
state = ct.get("state", {})
|
||||
net = state.get("network") or {}
|
||||
containers.append(
|
||||
{
|
||||
"name": name,
|
||||
"ip": _extract_ip(net, "inet"),
|
||||
"ipv6": _extract_ip(net, "inet6"),
|
||||
"domain": config.get(
|
||||
"user.localchat-domain", f"{name}{DOMAIN_SUFFIX}"
|
||||
),
|
||||
"status": ct.get("status", "Unknown"),
|
||||
"memory_usage": state.get("memory", {}).get("usage", 0),
|
||||
}
|
||||
)
|
||||
return containers
|
||||
|
||||
def ensure_base_image(self):
|
||||
"""Build and cache a base image with openssh and the SSH key.
|
||||
|
||||
The image is published as a local incus image with alias
|
||||
'localchat-base'. Subsequent container launches use this
|
||||
image instead of the upstream Debian 12, skipping the
|
||||
slow apt-get install step.
|
||||
Returns the image alias.
|
||||
"""
|
||||
if self.find_image([BASE_IMAGE_ALIAS]):
|
||||
self.out.print(f" Base image '{BASE_IMAGE_ALIAS}' already cached.")
|
||||
return BASE_IMAGE_ALIAS
|
||||
|
||||
self.out.print(" Building base image (one-time setup) ...")
|
||||
|
||||
self.run(["delete", BASE_SETUP_NAME, "--force"], check=False)
|
||||
self.run(["image", "delete", BASE_IMAGE_ALIAS], check=False)
|
||||
self.run(["launch", UPSTREAM_IMAGE, BASE_SETUP_NAME])
|
||||
|
||||
ct = Container(self, BASE_SETUP_NAME)
|
||||
ct.wait_ready()
|
||||
|
||||
key_path = self.ssh_key_path
|
||||
pub_key = key_path.with_suffix(".pub").read_text().strip()
|
||||
host_ns = self.get_host_nameservers()
|
||||
ns_lines = "\n".join(f"nameserver {n}" for n in host_ns)
|
||||
ct.bash(f"""
|
||||
printf '{ns_lines}\n' > /etc/resolv.conf
|
||||
apt-get -o DPkg::Lock::Timeout=60 update
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y openssh-server python3
|
||||
systemctl enable ssh
|
||||
apt-get clean
|
||||
mkdir -p /root/.ssh
|
||||
chmod 700 /root/.ssh
|
||||
echo '{pub_key}' > /root/.ssh/authorized_keys
|
||||
chmod 600 /root/.ssh/authorized_keys
|
||||
""")
|
||||
|
||||
self.run(["stop", BASE_SETUP_NAME])
|
||||
self.run(["publish", BASE_SETUP_NAME, f"--alias={BASE_IMAGE_ALIAS}"])
|
||||
self.run(["delete", BASE_SETUP_NAME, "--force"])
|
||||
self.out.print(f" Base image '{BASE_IMAGE_ALIAS}' ready.")
|
||||
return BASE_IMAGE_ALIAS
|
||||
|
||||
def get_container(self, name):
|
||||
"""Return a container handle for the given name.
|
||||
|
||||
Accepts both short relay names (``test0``) and full Incus
|
||||
container names (``test0-localchat``). Returns
|
||||
``DNSContainer`` for the DNS container and
|
||||
``RelayContainer`` for everything else.
|
||||
"""
|
||||
if name == DNS_CONTAINER_NAME:
|
||||
return DNSContainer(self)
|
||||
return RelayContainer(self, name.removesuffix("-localchat"))
|
||||
|
||||
def get_dns_container(self):
|
||||
"""Return a DNSContainer handle."""
|
||||
return DNSContainer(self)
|
||||
|
||||
|
||||
class Container:
|
||||
"""The base container handle wraps all interactions with incus."""
|
||||
|
||||
def __init__(self, incus, name, domain=None):
|
||||
self.incus = incus
|
||||
self.out = incus.out
|
||||
self.name = name
|
||||
self.domain = domain or f"{name}{DOMAIN_SUFFIX}"
|
||||
self.ipv4 = None
|
||||
self.ipv6 = None
|
||||
|
||||
def bash(self, script, check=True):
|
||||
"""Returns stdout from executing ``bash -ec <script>`` inside this container.
|
||||
|
||||
*script* is dedented and stripped so callers can use triple-quoted strings.
|
||||
When *check* is False, returns *None* on non-zero exit instead of raising.
|
||||
"""
|
||||
script = textwrap.dedent(script).strip()
|
||||
cmd = ["exec", self.name, "--", "bash", "-ec", script]
|
||||
return self.incus.run_output(cmd, check=check)
|
||||
|
||||
def run_cmd(self, *args, check=True):
|
||||
"""Return stdout from running a command directly in the container (no shell).
|
||||
|
||||
When *check* is False, returns *None* on non-zero exit instead of raising.
|
||||
"""
|
||||
return self.incus.run_output(
|
||||
["exec", self.name, "--", *args],
|
||||
check=check,
|
||||
)
|
||||
|
||||
def start(self):
|
||||
self.incus.run(["start", self.name])
|
||||
|
||||
def stop(self, force=False):
|
||||
cmd = ["stop", self.name]
|
||||
if force:
|
||||
cmd.append("--force")
|
||||
self.incus.run(cmd, check=False)
|
||||
|
||||
def launch(self):
|
||||
"""Launch from the best available image, return the alias used."""
|
||||
image = self.incus.find_image([RELAY_IMAGE_ALIAS, BASE_IMAGE_ALIAS])
|
||||
if not image:
|
||||
raise RuntimeError(
|
||||
f"No base image '{BASE_IMAGE_ALIAS}' found. "
|
||||
"Call ensure_base_image() before launching containers."
|
||||
)
|
||||
self.out.print(f" Launching from '{image}' image ...")
|
||||
cfg = []
|
||||
cfg += ("-c", f"{LABEL_KEY}=true")
|
||||
cfg += ("-c", f"user.localchat-domain={self.domain}")
|
||||
self.incus.run(["launch", image, self.name, *cfg])
|
||||
return image
|
||||
|
||||
def ensure(self):
|
||||
"""Create/start this container from the cached base image.
|
||||
|
||||
On first call, builds the base image (~30s).
|
||||
Subsequent containers launch in ~2s from the cached image.
|
||||
Returns ``self`` for chaining.
|
||||
"""
|
||||
data = self.incus.run_json(["list", self.name], check=False) or []
|
||||
|
||||
existing = [c for c in data if c["name"] == self.name]
|
||||
if existing:
|
||||
if existing[0]["status"] != "Running":
|
||||
self.start()
|
||||
else:
|
||||
self.launch()
|
||||
self.wait_ready()
|
||||
return self
|
||||
|
||||
def destroy(self):
|
||||
"""Stop, delete, and clean up config files."""
|
||||
self.stop(force=True)
|
||||
self.incus.run(["delete", self.name, "--force"], check=False)
|
||||
|
||||
def push_file_content(self, dest_path, content):
|
||||
"""Write *content* to *dest_path* inside the container.
|
||||
|
||||
*content* is dedented and stripped so callers can use
|
||||
indented triple-quoted strings.
|
||||
"""
|
||||
content = textwrap.dedent(content).strip() + "\n"
|
||||
self.incus.run(
|
||||
["file", "push", "-", f"{self.name}{dest_path}"],
|
||||
input=content,
|
||||
)
|
||||
self.bash(f"chmod 644 {dest_path}")
|
||||
|
||||
def wait_ready(self, timeout=60):
|
||||
"""Wait until the container is running with an IPv4 address.
|
||||
|
||||
Sets ``self.ipv4`` and ``self.ipv6`` (may be *None*),
|
||||
or raises ``TimeoutError``.
|
||||
"""
|
||||
deadline = time.time() + timeout
|
||||
while time.time() < deadline:
|
||||
data = self.incus.run_json(
|
||||
["list", self.name],
|
||||
check=False,
|
||||
)
|
||||
if data and data[0].get("status") == "Running":
|
||||
net = data[0].get("state", {}).get("network", {})
|
||||
self.ipv4 = _extract_ip(net, "inet")
|
||||
self.ipv6 = _extract_ip(net, "inet6")
|
||||
if self.ipv4:
|
||||
return
|
||||
time.sleep(1)
|
||||
raise TimeoutError(
|
||||
f"Container {self.name!r} did not become ready within {timeout}s"
|
||||
)
|
||||
|
||||
def rss_mib(self):
|
||||
"""Return ``(used, total)`` memory from container (or None if unobtainable)."""
|
||||
output = self.bash("free -m", check=False)
|
||||
if output:
|
||||
for line in output.splitlines():
|
||||
if line.startswith("Mem:"):
|
||||
parts = line.split()
|
||||
return int(parts[2]), int(parts[1])
|
||||
|
||||
|
||||
class RelayContainer(Container):
|
||||
"""Container handle for a chatmail relay.
|
||||
|
||||
Accepts the short relay name (e.g. ``test0``) and derives
|
||||
the Incus container name and mail domain automatically.
|
||||
"""
|
||||
|
||||
def __init__(self, incus, name):
|
||||
super().__init__(
|
||||
incus,
|
||||
f"{name}-localchat",
|
||||
domain=f"_{name}{DOMAIN_SUFFIX}",
|
||||
)
|
||||
self.sname = name
|
||||
self.ini = incus.lxconfigs_dir / f"chatmail-{name}.ini"
|
||||
self.zone = incus.lxconfigs_dir / f"{name}.zone"
|
||||
|
||||
def launch(self):
|
||||
"""Launch (from a potentially cached image) and clear inherited chatmail-version."""
|
||||
image = super().launch()
|
||||
self.bash("rm -f /etc/chatmail-version")
|
||||
return image
|
||||
|
||||
def destroy(self):
|
||||
"""Stop, delete, and clean up config files."""
|
||||
super().destroy()
|
||||
if self.ini.exists():
|
||||
self.ini.unlink()
|
||||
|
||||
def disable_ipv6(self):
|
||||
"""Disable IPv6 inside the container via sysctl."""
|
||||
# incus provides net.* virtualization for LXC containers so that
|
||||
# these sysctls only affect the container's network namespace.
|
||||
self.bash("""
|
||||
sysctl -w net.ipv6.conf.all.disable_ipv6=1
|
||||
sysctl -w net.ipv6.conf.default.disable_ipv6=1
|
||||
""")
|
||||
self.push_file_content(
|
||||
"/etc/sysctl.d/99-disable-ipv6.conf",
|
||||
"""
|
||||
net.ipv6.conf.all.disable_ipv6=1
|
||||
net.ipv6.conf.default.disable_ipv6=1
|
||||
""",
|
||||
)
|
||||
|
||||
def configure_hosts(self, ip):
|
||||
"""Set hostname and /etc/hosts inside the container."""
|
||||
self.bash(f"""
|
||||
echo '{self.name}' > /etc/hostname
|
||||
hostname {self.name}
|
||||
sed -i '/ {self.domain}$/d' /etc/hosts
|
||||
echo '{ip} {self.name} {self.domain}' >> /etc/hosts
|
||||
""")
|
||||
|
||||
def publish_as_relay_image(self):
|
||||
"""Publish this container as a reusable relay image.
|
||||
|
||||
Stops the container, 'publishes' it as 'localchat-relay', then restarts it.
|
||||
"""
|
||||
if self.incus.find_image([RELAY_IMAGE_ALIAS]):
|
||||
return
|
||||
self.out.print(
|
||||
f" Locally caching {self.name!r} as '{RELAY_IMAGE_ALIAS}' image ..."
|
||||
)
|
||||
self.incus.run(
|
||||
["publish", self.name, f"--alias={RELAY_IMAGE_ALIAS}", "--force"]
|
||||
)
|
||||
self.wait_ready()
|
||||
self.out.print(f" Relay image '{RELAY_IMAGE_ALIAS}' ready.")
|
||||
|
||||
def deployed_version(self):
|
||||
"""Read /etc/chatmail-version, or None if absent."""
|
||||
return self.bash("cat /etc/chatmail-version", check=False)
|
||||
|
||||
def deployed_domain(self):
|
||||
"""Read the domain deployed on the container (postfix myhostname)."""
|
||||
return self.bash(
|
||||
"postconf -h myhostname 2>/dev/null",
|
||||
check=False,
|
||||
)
|
||||
|
||||
def verify_ssh(self, ssh_config):
|
||||
"""Verify SSH connectivity to this container."""
|
||||
cmd = f"ssh -F {ssh_config} -o ConnectTimeout=60 root@{self.domain} hostname"
|
||||
return shell(cmd, timeout=60).returncode == 0
|
||||
|
||||
def configure_dns(self, dns_ip):
|
||||
"""Point this container's resolver at *dns_ip* and verify DNS is reachable."""
|
||||
self.bash(f"""
|
||||
systemctl disable --now systemd-resolved 2>/dev/null || true
|
||||
rm -f /etc/resolv.conf
|
||||
printf 'nameserver {dns_ip}\\n' >/etc/resolv.conf
|
||||
mkdir -p /etc/unbound/unbound.conf.d
|
||||
""")
|
||||
self.push_file_content(
|
||||
"/etc/unbound/unbound.conf.d/localchat-forward.conf",
|
||||
f"""
|
||||
server:
|
||||
domain-insecure: "localchat"
|
||||
|
||||
forward-zone:
|
||||
name: "localchat"
|
||||
forward-addr: {dns_ip}
|
||||
""",
|
||||
)
|
||||
self.bash("systemctl restart unbound 2>/dev/null || true")
|
||||
self._wait_dns_reachable(dns_ip)
|
||||
|
||||
def _wait_dns_reachable(self, dns_ip, timeout=10):
|
||||
"""Poll until *dns_ip* answers a DNS query from this container."""
|
||||
if self.bash("which dig", check=False) is None:
|
||||
self.bash(
|
||||
"DEBIAN_FRONTEND=noninteractive "
|
||||
"apt-get install -y dnsutils 2>/dev/null || true"
|
||||
)
|
||||
deadline = time.time() + timeout
|
||||
while time.time() < deadline:
|
||||
result = self.bash(
|
||||
f"dig @{dns_ip} . SOA +short +time=1 +tries=1",
|
||||
check=False,
|
||||
)
|
||||
if result and result.strip():
|
||||
return
|
||||
time.sleep(0.5)
|
||||
raise DNSConfigurationError(
|
||||
f"DNS at {dns_ip} not reachable from {self.name} after {timeout}s"
|
||||
)
|
||||
|
||||
def write_ini(self, disable_ipv6=False):
|
||||
"""Generate a chatmail.ini config file in lxconfigs/."""
|
||||
from chatmaild.config import write_initial_config
|
||||
|
||||
overrides = {
|
||||
"max_user_send_per_minute": 600,
|
||||
"max_user_send_burst_size": 100,
|
||||
"mtail_address": "127.0.0.1",
|
||||
}
|
||||
if disable_ipv6:
|
||||
overrides["disable_ipv6"] = "True"
|
||||
write_initial_config(self.ini, self.domain, overrides)
|
||||
return self.ini
|
||||
|
||||
|
||||
class DNSContainer(Container):
|
||||
"""Container handle for the PowerDNS name server.
|
||||
|
||||
Manages the authoritative and recursive DNS services required for
|
||||
name resolution in the local testing environment.
|
||||
"""
|
||||
|
||||
def __init__(self, incus):
|
||||
super().__init__(incus, DNS_CONTAINER_NAME, domain=DNS_DOMAIN)
|
||||
|
||||
def pdnsutil(self, *args, check=True):
|
||||
"""Run ``pdnsutil <args>`` inside the DNS container."""
|
||||
return self.run_cmd("pdnsutil", *args, check=check)
|
||||
|
||||
def replace_rrset(self, zone, name, rtype, ttl, rdata):
|
||||
"""Shortcut for ``pdnsutil replace-rrset``."""
|
||||
self.pdnsutil("replace-rrset", zone, name, rtype, ttl, rdata)
|
||||
|
||||
def restart_services(self):
|
||||
"""Restart pdns and pdns-recursor, then wait until DNS is answering."""
|
||||
self.bash("""
|
||||
systemctl restart pdns
|
||||
systemctl restart pdns-recursor || true
|
||||
""")
|
||||
self._wait_dns_ready()
|
||||
|
||||
def _wait_dns_ready(self, timeout=60):
|
||||
"""Poll until the recursor answers a query on port 53."""
|
||||
deadline = time.time() + timeout
|
||||
while time.time() < deadline:
|
||||
result = self.bash(
|
||||
"dig @127.0.0.1 . SOA +short +time=1 +tries=1",
|
||||
check=False,
|
||||
)
|
||||
if result and result.strip():
|
||||
return
|
||||
time.sleep(0.5)
|
||||
raise DNSConfigurationError(f"DNS recursor not answering after {timeout}s")
|
||||
|
||||
def ensure(self):
|
||||
"""Create the DNS container with PowerDNS if needed.
|
||||
|
||||
Calls ``super().ensure()`` to create/start the container
|
||||
and set up SSH, then installs PowerDNS and configures
|
||||
the Incus bridge to use this container as DNS.
|
||||
"""
|
||||
super().ensure()
|
||||
self._install_powerdns()
|
||||
self.incus.run(
|
||||
["network", "set", "incusbr0", "dns.mode=none"],
|
||||
check=False,
|
||||
)
|
||||
self.incus.run(
|
||||
["network", "set", "incusbr0", f"raw.dnsmasq=dhcp-option=6,{self.ipv4}"],
|
||||
check=False,
|
||||
)
|
||||
|
||||
def destroy(self):
|
||||
"""Stop, delete, and reset bridge DNS config."""
|
||||
super().destroy()
|
||||
self.incus.run(["network", "unset", "incusbr0", "dns.mode"], check=False)
|
||||
self.incus.run(["network", "unset", "incusbr0", "raw.dnsmasq"], check=False)
|
||||
|
||||
def _install_powerdns(self):
|
||||
"""Install and configure PowerDNS if not already present."""
|
||||
if self.run_cmd("which", "pdns_server", check=False) is not None:
|
||||
return
|
||||
|
||||
host_ns = self.incus.get_host_nameservers()
|
||||
ns_lines = "\n".join(f"nameserver {n}" for n in host_ns)
|
||||
|
||||
self.bash(f"""
|
||||
systemctl disable --now systemd-resolved 2>/dev/null || true
|
||||
rm -f /etc/resolv.conf
|
||||
printf '{ns_lines}\n' > /etc/resolv.conf
|
||||
|
||||
# Block automatic service startup during package installation
|
||||
printf '#!/bin/sh\\nexit 101\\n' > /usr/sbin/policy-rc.d
|
||||
chmod +x /usr/sbin/policy-rc.d
|
||||
|
||||
apt-get -o DPkg::Lock::Timeout=60 update
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
pdns-server pdns-backend-sqlite3 sqlite3 pdns-recursor dnsutils
|
||||
|
||||
# Remove the startup block
|
||||
rm /usr/sbin/policy-rc.d
|
||||
|
||||
systemctl stop pdns pdns-recursor || true
|
||||
mkdir -p /var/lib/powerdns
|
||||
sqlite3 /var/lib/powerdns/pdns.sqlite3 \
|
||||
</usr/share/doc/pdns-backend-sqlite3/schema.sqlite3.sql
|
||||
chown -R pdns:pdns /var/lib/powerdns
|
||||
""")
|
||||
|
||||
self.push_file_content(
|
||||
"/etc/powerdns/pdns.conf",
|
||||
"""
|
||||
launch=gsqlite3
|
||||
gsqlite3-database=/var/lib/powerdns/pdns.sqlite3
|
||||
local-address=127.0.0.1
|
||||
local-port=5353
|
||||
""",
|
||||
)
|
||||
|
||||
self.push_file_content(
|
||||
"/etc/powerdns/recursor.conf",
|
||||
"""
|
||||
local-address=0.0.0.0
|
||||
local-port=53
|
||||
forward-zones=localchat=127.0.0.1:5353
|
||||
allow-from=0.0.0.0/0
|
||||
dont-query=
|
||||
dnssec=off
|
||||
""",
|
||||
)
|
||||
|
||||
self.bash("""
|
||||
systemctl start pdns
|
||||
systemctl start pdns-recursor
|
||||
echo 'nameserver 127.0.0.1' > /etc/resolv.conf
|
||||
""")
|
||||
self._wait_dns_ready()
|
||||
|
||||
def reset_dns_records(self, dns_ip, domains):
|
||||
"""Create DNS zones with initial A records via pdnsutil.
|
||||
|
||||
Only sets SOA, NS, and A records as the minimal set
|
||||
needed for SSH connectivity. Full records (MX, TXT, SRV,
|
||||
CNAME, DKIM) are added later by ``cmdeploy dns``.
|
||||
|
||||
Args:
|
||||
dns_ip: IP of the DNS container
|
||||
domains: list of dicts with 'name', 'domain', 'ip'
|
||||
"""
|
||||
for d in domains:
|
||||
domain = d["domain"]
|
||||
ip = d["ip"]
|
||||
self.out.print(f" {domain} -> {ip}")
|
||||
|
||||
# Delete and recreate zone fresh (removes stale records)
|
||||
self.pdnsutil("delete-zone", domain, check=False)
|
||||
self.pdnsutil("create-zone", domain, f"ns.{domain}")
|
||||
|
||||
serial = str(int(time.time()))
|
||||
soa = f"ns.{domain} hostmaster.{domain} {serial} 3600 900 604800 300"
|
||||
self.replace_rrset(domain, ".", "SOA", "3600", soa)
|
||||
self.replace_rrset(domain, ".", "NS", "3600", f"ns.{domain}.")
|
||||
self.replace_rrset(domain, ".", "A", "3600", ip)
|
||||
self.replace_rrset(domain, "ns", "A", "3600", dns_ip)
|
||||
|
||||
# AAAA (domain -> container IPv6, if available)
|
||||
ipv6 = d.get("ipv6")
|
||||
if ipv6:
|
||||
self.replace_rrset(domain, ".", "AAAA", "3600", ipv6)
|
||||
self.out.print(f" zone reset: SOA, NS, A, AAAA ({ip}, {ipv6})")
|
||||
else:
|
||||
# Remove any stale AAAA record
|
||||
self.pdnsutil("delete-rrset", domain, ".", "AAAA", check=False)
|
||||
self.out.print(f" zone reset: SOA, NS, A ({ip}, IPv4-only)")
|
||||
|
||||
self.restart_services()
|
||||
|
||||
def set_dns_records(self, text):
|
||||
"""Add or overwrite DNS records from standard BIND format.
|
||||
|
||||
Uses ``cmdeploy.dns.parse_zone_records`` to parse.
|
||||
Zones are created automatically from the record names.
|
||||
"""
|
||||
from ..dns import parse_zone_records
|
||||
|
||||
zones_seen = set()
|
||||
|
||||
for name, ttl, rtype, rdata in parse_zone_records(text):
|
||||
# Derive zone from name: find top-level .localchat domain
|
||||
name_parts = name.split(".")
|
||||
zone = name # fallback
|
||||
for i in range(len(name_parts) - 1):
|
||||
if name_parts[i + 1 :] == ["localchat"]:
|
||||
zone = ".".join(name_parts[i:])
|
||||
break
|
||||
|
||||
# Create zone if first time seeing it
|
||||
if zone not in zones_seen:
|
||||
self.pdnsutil(
|
||||
"create-zone",
|
||||
zone,
|
||||
f"ns.{zone}",
|
||||
check=False,
|
||||
)
|
||||
zones_seen.add(zone)
|
||||
|
||||
# Figure out the record name relative to zone
|
||||
if name == zone:
|
||||
relative = "."
|
||||
elif name.endswith(f".{zone}"):
|
||||
relative = name[: -(len(zone) + 1)]
|
||||
else:
|
||||
relative = name
|
||||
|
||||
self.replace_rrset(zone, relative, rtype, ttl, rdata)
|
||||
|
||||
if zones_seen:
|
||||
self.restart_services()
|
||||
@@ -1 +0,0 @@
|
||||
*/5 * * * * root {{ config.execpath }} {{ config.mailboxes_dir }} >/var/www/html/metrics
|
||||
@@ -44,21 +44,37 @@ counter warning_count
|
||||
}
|
||||
|
||||
|
||||
counter filtered_mail_count
|
||||
counter filtered_outgoing_mail_count
|
||||
|
||||
counter encrypted_mail_count
|
||||
/Filtering encrypted mail\./ {
|
||||
encrypted_mail_count++
|
||||
filtered_mail_count++
|
||||
counter outgoing_encrypted_mail_count
|
||||
/Outgoing: Filtering encrypted mail\./ {
|
||||
outgoing_encrypted_mail_count++
|
||||
filtered_outgoing_mail_count++
|
||||
}
|
||||
|
||||
counter unencrypted_mail_count
|
||||
/Filtering unencrypted mail\./ {
|
||||
unencrypted_mail_count++
|
||||
filtered_mail_count++
|
||||
counter outgoing_unencrypted_mail_count
|
||||
/Outgoing: Filtering unencrypted mail\./ {
|
||||
outgoing_unencrypted_mail_count++
|
||||
filtered_outgoing_mail_count++
|
||||
}
|
||||
|
||||
|
||||
counter filtered_incoming_mail_count
|
||||
|
||||
counter incoming_encrypted_mail_count
|
||||
/Incoming: Filtering encrypted mail\./ {
|
||||
incoming_encrypted_mail_count++
|
||||
filtered_incoming_mail_count++
|
||||
}
|
||||
|
||||
counter incoming_unencrypted_mail_count
|
||||
/Incoming: Filtering unencrypted mail\./ {
|
||||
incoming_unencrypted_mail_count++
|
||||
filtered_incoming_mail_count++
|
||||
}
|
||||
|
||||
|
||||
counter rejected_unencrypted_mail_count
|
||||
/Rejected unencrypted mail\./ {
|
||||
/Rejected unencrypted mail/ {
|
||||
rejected_unencrypted_mail_count++
|
||||
}
|
||||
|
||||
68
cmdeploy/src/cmdeploy/mtail/deployer.py
Normal file
68
cmdeploy/src/cmdeploy/mtail/deployer.py
Normal file
@@ -0,0 +1,68 @@
|
||||
from pyinfra import facts, host
|
||||
from pyinfra.operations import apt, files, server, systemd
|
||||
|
||||
from cmdeploy.basedeploy import (
|
||||
Deployer,
|
||||
get_resource,
|
||||
)
|
||||
|
||||
|
||||
class MtailDeployer(Deployer):
|
||||
def __init__(self, mtail_address):
|
||||
self.mtail_address = mtail_address
|
||||
|
||||
def install(self):
|
||||
# Uninstall mtail package to install a static binary.
|
||||
apt.packages(name="Uninstall mtail", packages=["mtail"], present=False)
|
||||
|
||||
(url, sha256sum) = {
|
||||
"x86_64": (
|
||||
"https://github.com/google/mtail/releases/download/v3.0.8/mtail_3.0.8_linux_amd64.tar.gz",
|
||||
"123c2ee5f48c3eff12ebccee38befd2233d715da736000ccde49e3d5607724e4",
|
||||
),
|
||||
"aarch64": (
|
||||
"https://github.com/google/mtail/releases/download/v3.0.8/mtail_3.0.8_linux_arm64.tar.gz",
|
||||
"aa04811c0929b6754408676de520e050c45dddeb3401881888a092c9aea89cae",
|
||||
),
|
||||
}[host.get_fact(facts.server.Arch)]
|
||||
|
||||
server.shell(
|
||||
name="Download mtail",
|
||||
commands=[
|
||||
f"(echo '{sha256sum} /usr/local/bin/mtail' | sha256sum -c) || (curl -L {url} | gunzip | tar -x -f - mtail -O >/usr/local/bin/mtail.new && mv /usr/local/bin/mtail.new /usr/local/bin/mtail)",
|
||||
"chmod 755 /usr/local/bin/mtail",
|
||||
],
|
||||
)
|
||||
|
||||
def configure(self):
|
||||
# Using our own systemd unit instead of `/usr/lib/systemd/system/mtail.service`.
|
||||
# This allows to read from journalctl instead of log files.
|
||||
files.template(
|
||||
src=get_resource("mtail/mtail.service.j2"),
|
||||
dest="/etc/systemd/system/mtail.service",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
address=self.mtail_address or "127.0.0.1",
|
||||
port=3903,
|
||||
)
|
||||
|
||||
mtail_conf = files.put(
|
||||
name="Mtail configuration",
|
||||
src=get_resource("mtail/delivered_mail.mtail"),
|
||||
dest="/etc/mtail/delivered_mail.mtail",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
self.need_restart = mtail_conf.changed
|
||||
|
||||
def activate(self):
|
||||
systemd.service(
|
||||
name="Start and enable mtail",
|
||||
service="mtail.service",
|
||||
running=bool(self.mtail_address),
|
||||
enabled=bool(self.mtail_address),
|
||||
restarted=self.need_restart,
|
||||
)
|
||||
self.need_restart = False
|
||||
@@ -3,7 +3,7 @@ Description=mtail
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/bin/sh -c "journalctl -f -o short-iso -n 0 | /usr/bin/mtail --address={{ address }} --port={{ port }} --progs /etc/mtail --logtostderr --logs -"
|
||||
ExecStart=/bin/sh -c "journalctl -f -o short-iso -n 0 | /usr/local/bin/mtail --address={{ address }} --port={{ port }} --progs /etc/mtail --logtostderr --logs -"
|
||||
Restart=on-failure
|
||||
|
||||
[Install]
|
||||
|
||||
@@ -1,47 +1,47 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<clientConfig version="1.1">
|
||||
<emailProvider id="{{ config.domain_name }}">
|
||||
<domain>{{ config.domain_name }}</domain>
|
||||
<displayName>{{ config.domain_name }} chatmail</displayName>
|
||||
<displayShortName>{{ config.domain_name }}</displayShortName>
|
||||
<emailProvider id="{{ config.mail_domain }}">
|
||||
<domain>{{ config.mail_domain }}</domain>
|
||||
<displayName>{{ config.mail_domain }} chatmail</displayName>
|
||||
<displayShortName>{{ config.mail_domain }}</displayShortName>
|
||||
<incomingServer type="imap">
|
||||
<hostname>{{ config.domain_name }}</hostname>
|
||||
<hostname>{{ config.mail_domain }}</hostname>
|
||||
<port>993</port>
|
||||
<socketType>SSL</socketType>
|
||||
<authentication>password-cleartext</authentication>
|
||||
<username>%EMAILADDRESS%</username>
|
||||
</incomingServer>
|
||||
<incomingServer type="imap">
|
||||
<hostname>{{ config.domain_name }}</hostname>
|
||||
<hostname>{{ config.mail_domain }}</hostname>
|
||||
<port>143</port>
|
||||
<socketType>STARTTLS</socketType>
|
||||
<authentication>password-cleartext</authentication>
|
||||
<username>%EMAILADDRESS%</username>
|
||||
</incomingServer>
|
||||
<incomingServer type="imap">
|
||||
<hostname>{{ config.domain_name }}</hostname>
|
||||
<hostname>{{ config.mail_domain }}</hostname>
|
||||
<port>443</port>
|
||||
<socketType>SSL</socketType>
|
||||
<authentication>password-cleartext</authentication>
|
||||
<username>%EMAILADDRESS%</username>
|
||||
</incomingServer>
|
||||
<outgoingServer type="smtp">
|
||||
<hostname>{{ config.domain_name }}</hostname>
|
||||
<hostname>{{ config.mail_domain }}</hostname>
|
||||
<port>465</port>
|
||||
<socketType>SSL</socketType>
|
||||
<authentication>password-cleartext</authentication>
|
||||
<username>%EMAILADDRESS%</username>
|
||||
</outgoingServer>
|
||||
<outgoingServer type="smtp">
|
||||
<hostname>{{ config.domain_name }}</hostname>
|
||||
<hostname>{{ config.mail_domain }}</hostname>
|
||||
<port>587</port>
|
||||
<socketType>STARTTLS</socketType>
|
||||
<authentication>password-cleartext</authentication>
|
||||
<username>%EMAILADDRESS%</username>
|
||||
</outgoingServer>
|
||||
<outgoingServer type="smtp">
|
||||
<hostname>{{ config.domain_name }}</hostname>
|
||||
<hostname>{{ config.mail_domain }}</hostname>
|
||||
<port>443</port>
|
||||
<socketType>SSL</socketType>
|
||||
<authentication>password-cleartext</authentication>
|
||||
|
||||
117
cmdeploy/src/cmdeploy/nginx/deployer.py
Normal file
117
cmdeploy/src/cmdeploy/nginx/deployer.py
Normal file
@@ -0,0 +1,117 @@
|
||||
from chatmaild.config import Config
|
||||
from pyinfra.operations import apt, files, systemd
|
||||
|
||||
from cmdeploy.basedeploy import (
|
||||
Deployer,
|
||||
get_resource,
|
||||
)
|
||||
|
||||
|
||||
class NginxDeployer(Deployer):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
|
||||
def install(self):
|
||||
#
|
||||
# If we allow nginx to start up on install, it will grab port
|
||||
# 80, which then will block acmetool from listening on the port.
|
||||
# That in turn prevents getting certificates, which then causes
|
||||
# an error when we try to start nginx on the custom config
|
||||
# that leaves port 80 open but also requires certificates to
|
||||
# be present. To avoid getting into that interlocking mess,
|
||||
# we use policy-rc.d to prevent nginx from starting up when it
|
||||
# is installed.
|
||||
#
|
||||
# This approach allows us to avoid performing any explicit
|
||||
# systemd operations during the install stage (as opposed to
|
||||
# allowing it to start and then forcing it to stop), which allows
|
||||
# the install stage to run in non-systemd environments like a
|
||||
# container image build.
|
||||
#
|
||||
# For documentation about policy-rc.d, see:
|
||||
# https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt
|
||||
#
|
||||
files.put(
|
||||
src=get_resource("policy-rc.d"),
|
||||
dest="/usr/sbin/policy-rc.d",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="755",
|
||||
)
|
||||
|
||||
apt.packages(
|
||||
name="Install nginx",
|
||||
packages=["nginx", "libnginx-mod-stream"],
|
||||
)
|
||||
|
||||
files.file("/usr/sbin/policy-rc.d", present=False)
|
||||
|
||||
def configure(self):
|
||||
self.need_restart = _configure_nginx(self.config)
|
||||
|
||||
def activate(self):
|
||||
systemd.service(
|
||||
name="Start and enable nginx",
|
||||
service="nginx.service",
|
||||
running=True,
|
||||
enabled=True,
|
||||
restarted=self.need_restart,
|
||||
)
|
||||
self.need_restart = False
|
||||
|
||||
|
||||
def _configure_nginx(config: Config, debug: bool = False) -> bool:
|
||||
"""Configures nginx HTTP server."""
|
||||
need_restart = False
|
||||
|
||||
main_config = files.template(
|
||||
src=get_resource("nginx/nginx.conf.j2"),
|
||||
dest="/etc/nginx/nginx.conf",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
config=config,
|
||||
disable_ipv6=config.disable_ipv6,
|
||||
)
|
||||
need_restart |= main_config.changed
|
||||
|
||||
autoconfig = files.template(
|
||||
src=get_resource("nginx/autoconfig.xml.j2"),
|
||||
dest="/var/www/html/.well-known/autoconfig/mail/config-v1.1.xml",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
config=config,
|
||||
)
|
||||
need_restart |= autoconfig.changed
|
||||
|
||||
mta_sts_config = files.template(
|
||||
src=get_resource("nginx/mta-sts.txt.j2"),
|
||||
dest="/var/www/html/.well-known/mta-sts.txt",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
config=config,
|
||||
)
|
||||
need_restart |= mta_sts_config.changed
|
||||
|
||||
# install CGI newemail script
|
||||
#
|
||||
cgi_dir = "/usr/lib/cgi-bin"
|
||||
files.directory(
|
||||
name=f"Ensure {cgi_dir} exists",
|
||||
path=cgi_dir,
|
||||
user="root",
|
||||
group="root",
|
||||
)
|
||||
|
||||
files.put(
|
||||
name="Upload cgi newemail.py script",
|
||||
src=get_resource("newemail.py", pkg="chatmaild").open("rb"),
|
||||
dest=f"{cgi_dir}/newemail.py",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="755",
|
||||
)
|
||||
|
||||
return need_restart
|
||||
@@ -1,4 +1,4 @@
|
||||
version: STSv1
|
||||
mode: enforce
|
||||
mx: {{ config.domain_name }}
|
||||
mx: {{ config.mail_domain }}
|
||||
max_age: 2419200
|
||||
|
||||
@@ -2,11 +2,25 @@ load_module modules/ngx_stream_module.so;
|
||||
|
||||
user www-data;
|
||||
worker_processes auto;
|
||||
|
||||
# Increase the number of connections
|
||||
# that a worker process can open
|
||||
# to avoid errors such as
|
||||
# accept4() failed (24: Too many open files)
|
||||
# and
|
||||
# socket() failed (24: Too many open files) while connecting to upstream
|
||||
# in the logs.
|
||||
# <https://nginx.org/en/docs/ngx_core_module.html#worker_rlimit_nofile>
|
||||
worker_rlimit_nofile 2048;
|
||||
pid /run/nginx.pid;
|
||||
error_log syslog:server=unix:/dev/log,facility=local3;
|
||||
|
||||
events {
|
||||
worker_connections 768;
|
||||
# Increase to avoid errors such as
|
||||
# 768 worker_connections are not enough while connecting to upstream
|
||||
# in the logs.
|
||||
# <https://nginx.org/en/docs/ngx_core_module.html#worker_connections>
|
||||
worker_connections 2048;
|
||||
# multi_accept on;
|
||||
}
|
||||
|
||||
@@ -28,6 +42,9 @@ stream {
|
||||
}
|
||||
|
||||
http {
|
||||
{% if config.tls_cert_mode == "self" %}
|
||||
limit_req_zone $binary_remote_addr zone=newaccount:10m rate=2r/s;
|
||||
{% endif %}
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
|
||||
@@ -37,25 +54,22 @@ http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_certificate /var/lib/acme/live/{{ config.domain_name }}/fullchain;
|
||||
ssl_certificate_key /var/lib/acme/live/{{ config.domain_name }}/privkey;
|
||||
ssl_certificate {{ config.tls_cert_path }};
|
||||
ssl_certificate_key {{ config.tls_key_path }};
|
||||
|
||||
gzip on;
|
||||
|
||||
server {
|
||||
|
||||
listen 8443 ssl default_server;
|
||||
{% if not disable_ipv6 %}
|
||||
listen [::]:8443 ssl default_server;
|
||||
{% endif %}
|
||||
listen 127.0.0.1:8443 ssl default_server;
|
||||
|
||||
root /var/www/html;
|
||||
|
||||
index index.html index.htm;
|
||||
|
||||
server_name _;
|
||||
server_name {{ config.mail_domain }} www.{{ config.mail_domain }} mta-sts.{{ config.mail_domain }};
|
||||
|
||||
access_log syslog:server=unix:/dev/log,facility=local7;
|
||||
|
||||
@@ -65,16 +79,16 @@ http {
|
||||
try_files $uri $uri/ =404;
|
||||
}
|
||||
|
||||
location /metrics {
|
||||
default_type text/plain;
|
||||
}
|
||||
|
||||
location /new {
|
||||
{% if config.tls_cert_mode != "self" %}
|
||||
if ($request_method = GET) {
|
||||
# Redirect to Delta Chat,
|
||||
# which will in turn do a POST request.
|
||||
return 301 dcaccount:https://{{ config.domain_name }}/new;
|
||||
return 301 dcaccount:https://{{ config.mail_domain }}/new;
|
||||
}
|
||||
{% else %}
|
||||
limit_req zone=newaccount burst=5 nodelay;
|
||||
{% endif %}
|
||||
|
||||
fastcgi_pass unix:/run/fcgiwrap.socket;
|
||||
include /etc/nginx/fastcgi_params;
|
||||
@@ -88,9 +102,11 @@ http {
|
||||
#
|
||||
# Redirects are only for browsers.
|
||||
location /cgi-bin/newemail.py {
|
||||
{% if config.tls_cert_mode != "self" %}
|
||||
if ($request_method = GET) {
|
||||
return 301 dcaccount:https://{{ config.domain_name }}/new;
|
||||
return 301 dcaccount:https://{{ config.mail_domain }}/new;
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
fastcgi_pass unix:/run/fcgiwrap.socket;
|
||||
include /etc/nginx/fastcgi_params;
|
||||
@@ -120,12 +136,30 @@ http {
|
||||
|
||||
# Redirect www. to non-www
|
||||
server {
|
||||
listen 8443 ssl;
|
||||
{% if not disable_ipv6 %}
|
||||
listen [::]:8443 ssl;
|
||||
{% endif %}
|
||||
server_name www.{{ config.domain_name }};
|
||||
return 301 $scheme://{{ config.domain_name }}$request_uri;
|
||||
listen 127.0.0.1:8443 ssl;
|
||||
server_name www.{{ config.mail_domain }};
|
||||
return 301 $scheme://{{ config.mail_domain }}$request_uri;
|
||||
access_log syslog:server=unix:/dev/log,facility=local7;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
{% if not disable_ipv6 %}
|
||||
listen [::]:80;
|
||||
{% endif %}
|
||||
|
||||
{% if config.tls_cert_mode == "acme" %}
|
||||
location /.well-known/acme-challenge/ {
|
||||
proxy_pass http://acmetool;
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
{% if config.tls_cert_mode == "acme" %}
|
||||
upstream acmetool {
|
||||
server 127.0.0.1:402;
|
||||
}
|
||||
{% endif %}
|
||||
}
|
||||
|
||||
Binary file not shown.
124
cmdeploy/src/cmdeploy/opendkim/deployer.py
Normal file
124
cmdeploy/src/cmdeploy/opendkim/deployer.py
Normal file
@@ -0,0 +1,124 @@
|
||||
"""
|
||||
Installs OpenDKIM
|
||||
"""
|
||||
|
||||
from pyinfra import host
|
||||
from pyinfra.facts.files import File
|
||||
from pyinfra.operations import apt, files, server, systemd
|
||||
|
||||
from cmdeploy.basedeploy import Deployer, get_resource
|
||||
|
||||
|
||||
class OpendkimDeployer(Deployer):
|
||||
required_users = [("opendkim", None, ["opendkim"])]
|
||||
|
||||
def __init__(self, mail_domain):
|
||||
self.mail_domain = mail_domain
|
||||
|
||||
def install(self):
|
||||
apt.packages(
|
||||
name="apt install opendkim opendkim-tools",
|
||||
packages=["opendkim", "opendkim-tools"],
|
||||
)
|
||||
|
||||
def configure(self):
|
||||
domain = self.mail_domain
|
||||
dkim_selector = "opendkim"
|
||||
"""Configures OpenDKIM"""
|
||||
need_restart = False
|
||||
|
||||
main_config = files.template(
|
||||
src=get_resource("opendkim/opendkim.conf"),
|
||||
dest="/etc/opendkim.conf",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
config={"domain_name": domain, "opendkim_selector": dkim_selector},
|
||||
)
|
||||
need_restart |= main_config.changed
|
||||
|
||||
screen_script = files.file(
|
||||
path="/etc/opendkim/screen.lua",
|
||||
present=False,
|
||||
)
|
||||
need_restart |= screen_script.changed
|
||||
|
||||
final_script = files.file(
|
||||
path="/etc/opendkim/final.lua",
|
||||
present=False,
|
||||
)
|
||||
need_restart |= final_script.changed
|
||||
|
||||
files.directory(
|
||||
name="Add opendkim directory to /etc",
|
||||
path="/etc/opendkim",
|
||||
user="opendkim",
|
||||
group="opendkim",
|
||||
mode="750",
|
||||
present=True,
|
||||
)
|
||||
|
||||
keytable = files.template(
|
||||
src=get_resource("opendkim/KeyTable"),
|
||||
dest="/etc/dkimkeys/KeyTable",
|
||||
user="opendkim",
|
||||
group="opendkim",
|
||||
mode="644",
|
||||
config={"domain_name": domain, "opendkim_selector": dkim_selector},
|
||||
)
|
||||
need_restart |= keytable.changed
|
||||
|
||||
signing_table = files.template(
|
||||
src=get_resource("opendkim/SigningTable"),
|
||||
dest="/etc/dkimkeys/SigningTable",
|
||||
user="opendkim",
|
||||
group="opendkim",
|
||||
mode="644",
|
||||
config={"domain_name": domain, "opendkim_selector": dkim_selector},
|
||||
)
|
||||
need_restart |= signing_table.changed
|
||||
files.directory(
|
||||
name="Add opendkim socket directory to /var/spool/postfix",
|
||||
path="/var/spool/postfix/opendkim",
|
||||
user="opendkim",
|
||||
group="opendkim",
|
||||
mode="750",
|
||||
present=True,
|
||||
)
|
||||
|
||||
if not host.get_fact(File, f"/etc/dkimkeys/{dkim_selector}.private"):
|
||||
server.shell(
|
||||
name="Generate OpenDKIM domain keys",
|
||||
commands=[
|
||||
f"/usr/sbin/opendkim-genkey -D /etc/dkimkeys -d {domain} -s {dkim_selector}"
|
||||
],
|
||||
_use_su_login=True,
|
||||
_su_user="opendkim",
|
||||
)
|
||||
|
||||
service_file = files.put(
|
||||
name="Configure opendkim to restart once a day",
|
||||
src=get_resource("opendkim/systemd.conf"),
|
||||
dest="/etc/systemd/system/opendkim.service.d/10-prevent-memory-leak.conf",
|
||||
)
|
||||
need_restart |= service_file.changed
|
||||
|
||||
files.file(
|
||||
name="chown opendkim: /etc/dkimkeys/opendkim.private",
|
||||
path="/etc/dkimkeys/opendkim.private",
|
||||
user="opendkim",
|
||||
group="opendkim",
|
||||
)
|
||||
|
||||
self.need_restart = need_restart
|
||||
|
||||
def activate(self):
|
||||
systemd.service(
|
||||
name="Start and enable OpenDKIM",
|
||||
service="opendkim.service",
|
||||
running=True,
|
||||
enabled=True,
|
||||
daemon_reload=self.need_restart,
|
||||
restarted=self.need_restart,
|
||||
)
|
||||
self.need_restart = False
|
||||
@@ -1,28 +0,0 @@
|
||||
if odkim.internal_ip(ctx) == 1 then
|
||||
-- Outgoing message will be signed,
|
||||
-- no need to look for signatures.
|
||||
return nil
|
||||
end
|
||||
|
||||
nsigs = odkim.get_sigcount(ctx)
|
||||
if nsigs == nil then
|
||||
return nil
|
||||
end
|
||||
|
||||
for i = 1, nsigs do
|
||||
sig = odkim.get_sighandle(ctx, i - 1)
|
||||
sigres = odkim.sig_result(sig)
|
||||
|
||||
-- All signatures that do not correspond to From:
|
||||
-- were ignored in screen.lua and return sigres -1.
|
||||
--
|
||||
-- Any valid signature that was not ignored like this
|
||||
-- means the message is acceptable.
|
||||
if sigres == 0 then
|
||||
return nil
|
||||
end
|
||||
end
|
||||
|
||||
odkim.set_reply(ctx, "554", "5.7.1", "No valid DKIM signature found")
|
||||
odkim.set_result(ctx, SMFIS_REJECT)
|
||||
return nil
|
||||
@@ -13,6 +13,7 @@ OversignHeaders From
|
||||
On-BadSignature reject
|
||||
On-KeyNotFound reject
|
||||
On-NoSignature reject
|
||||
DNSTimeout 60
|
||||
|
||||
# Signing domain, selector, and key (required). For example, perform signing
|
||||
# for domain "example.com" with selector "2020" (2020._domainkey.example.com),
|
||||
@@ -44,12 +45,6 @@ SignHeaders *,+autocrypt,+content-type
|
||||
# Default is empty.
|
||||
OversignHeaders from,reply-to,subject,date,to,cc,resent-date,resent-from,resent-sender,resent-to,resent-cc,in-reply-to,references,list-id,list-help,list-unsubscribe,list-subscribe,list-post,list-owner,list-archive,autocrypt
|
||||
|
||||
# Script to ignore signatures that do not correspond to the From: domain.
|
||||
ScreenPolicyScript /etc/opendkim/screen.lua
|
||||
|
||||
# Script to reject mails without a valid DKIM signature.
|
||||
FinalPolicyScript /etc/opendkim/final.lua
|
||||
|
||||
# In Debian, opendkim runs as user "opendkim". A umask of 007 is required when
|
||||
# using a local socket with MTAs that access the socket as a non-privileged
|
||||
# user (for example, Postfix). You may need to add user "postfix" to group
|
||||
@@ -64,3 +59,9 @@ PidFile /run/opendkim/opendkim.pid
|
||||
# The trust anchor enables DNSSEC. In Debian, the trust anchor file is provided
|
||||
# by the package dns-root-data.
|
||||
TrustAnchorFile /usr/share/dns/root.key
|
||||
|
||||
# Sign messages when `-o milter_macro_daemon_name=ORIGINATING` is set.
|
||||
MTA ORIGINATING
|
||||
|
||||
# No hosts are treated as internal, ORIGINATING daemon name should be set explicitly.
|
||||
InternalHosts -
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
-- Ignore signatures that do not correspond to the From: domain.
|
||||
|
||||
from_domain = odkim.get_fromdomain(ctx)
|
||||
if from_domain == nil then
|
||||
return nil
|
||||
end
|
||||
|
||||
n = odkim.get_sigcount(ctx)
|
||||
if n == nil then
|
||||
return nil
|
||||
end
|
||||
|
||||
for i = 1, n do
|
||||
sig = odkim.get_sighandle(ctx, i - 1)
|
||||
sig_domain = odkim.sig_getdomain(sig)
|
||||
if from_domain ~= sig_domain then
|
||||
odkim.sig_ignore(sig)
|
||||
end
|
||||
end
|
||||
|
||||
return nil
|
||||
3
cmdeploy/src/cmdeploy/opendkim/systemd.conf
Normal file
3
cmdeploy/src/cmdeploy/opendkim/systemd.conf
Normal file
@@ -0,0 +1,3 @@
|
||||
[Service]
|
||||
Restart=always
|
||||
RuntimeMaxSec=1d
|
||||
3
cmdeploy/src/cmdeploy/policy-rc.d
Executable file
3
cmdeploy/src/cmdeploy/policy-rc.d
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
echo "All runlevel operations denied by policy" >&2
|
||||
exit 101
|
||||
119
cmdeploy/src/cmdeploy/postfix/deployer.py
Normal file
119
cmdeploy/src/cmdeploy/postfix/deployer.py
Normal file
@@ -0,0 +1,119 @@
|
||||
from pyinfra.operations import apt, files, server, systemd
|
||||
|
||||
from cmdeploy.basedeploy import Deployer, get_resource
|
||||
|
||||
|
||||
class PostfixDeployer(Deployer):
|
||||
required_users = [("postfix", None, ["opendkim"])]
|
||||
daemon_reload = False
|
||||
|
||||
def __init__(self, config, disable_mail):
|
||||
self.config = config
|
||||
self.disable_mail = disable_mail
|
||||
|
||||
def install(self):
|
||||
apt.packages(
|
||||
name="Install Postfix",
|
||||
packages="postfix",
|
||||
)
|
||||
|
||||
def configure(self):
|
||||
config = self.config
|
||||
need_restart = False
|
||||
|
||||
main_config = files.template(
|
||||
src=get_resource("postfix/main.cf.j2"),
|
||||
dest="/etc/postfix/main.cf",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
config=config,
|
||||
disable_ipv6=config.disable_ipv6,
|
||||
)
|
||||
need_restart |= main_config.changed
|
||||
|
||||
master_config = files.template(
|
||||
src=get_resource("postfix/master.cf.j2"),
|
||||
dest="/etc/postfix/master.cf",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
debug=False,
|
||||
config=config,
|
||||
)
|
||||
need_restart |= master_config.changed
|
||||
|
||||
header_cleanup = files.put(
|
||||
src=get_resource("postfix/submission_header_cleanup"),
|
||||
dest="/etc/postfix/submission_header_cleanup",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
need_restart |= header_cleanup.changed
|
||||
|
||||
lmtp_header_cleanup = files.put(
|
||||
src=get_resource("postfix/lmtp_header_cleanup"),
|
||||
dest="/etc/postfix/lmtp_header_cleanup",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
need_restart |= lmtp_header_cleanup.changed
|
||||
|
||||
tls_policy_map = files.put(
|
||||
name="Upload SMTP TLS Policy that accepts self-signed certificates for IP-only hosts",
|
||||
src=get_resource("postfix/smtp_tls_policy_map"),
|
||||
dest="/etc/postfix/smtp_tls_policy_map",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
need_restart |= tls_policy_map.changed
|
||||
if tls_policy_map.changed:
|
||||
server.shell(
|
||||
commands=["postmap /etc/postfix/smtp_tls_policy_map"],
|
||||
)
|
||||
|
||||
# Login map that 1:1 maps email address to login.
|
||||
login_map = files.put(
|
||||
src=get_resource("postfix/login_map"),
|
||||
dest="/etc/postfix/login_map",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
need_restart |= login_map.changed
|
||||
|
||||
restart_conf = files.put(
|
||||
name="postfix: restart automatically on failure",
|
||||
src=get_resource("service/10_restart.conf"),
|
||||
dest="/etc/systemd/system/postfix@.service.d/10_restart.conf",
|
||||
)
|
||||
self.daemon_reload = restart_conf.changed
|
||||
|
||||
# Validate postfix configuration before restart
|
||||
if need_restart:
|
||||
server.shell(
|
||||
name="Validate postfix configuration",
|
||||
# Extract stderr and quit with error if non-zero
|
||||
commands=[
|
||||
"""bash -c 'w=$(postconf 2>&1 >/dev/null); [[ -z "$w" ]] || { echo "$w"; false; }'"""
|
||||
],
|
||||
)
|
||||
self.need_restart = need_restart
|
||||
|
||||
def activate(self):
|
||||
restart = False if self.disable_mail else self.need_restart
|
||||
|
||||
systemd.service(
|
||||
name="disable postfix for now"
|
||||
if self.disable_mail
|
||||
else "Start and enable Postfix",
|
||||
service="postfix.service",
|
||||
running=False if self.disable_mail else True,
|
||||
enabled=False if self.disable_mail else True,
|
||||
restarted=restart,
|
||||
daemon_reload=self.daemon_reload,
|
||||
)
|
||||
self.need_restart = False
|
||||
3
cmdeploy/src/cmdeploy/postfix/lmtp_header_cleanup
Normal file
3
cmdeploy/src/cmdeploy/postfix/lmtp_header_cleanup
Normal file
@@ -0,0 +1,3 @@
|
||||
/^DKIM-Signature:/ IGNORE
|
||||
/^Authentication-Results:/ IGNORE
|
||||
/^Received:/ IGNORE
|
||||
@@ -15,15 +15,19 @@ readme_directory = no
|
||||
compatibility_level = 3.6
|
||||
|
||||
# TLS parameters
|
||||
smtpd_tls_cert_file=/var/lib/acme/live/{{ config.mail_domain }}/fullchain
|
||||
smtpd_tls_key_file=/var/lib/acme/live/{{ config.mail_domain }}/privkey
|
||||
smtpd_tls_cert_file={{ config.tls_cert_path }}
|
||||
smtpd_tls_key_file={{ config.tls_key_path }}
|
||||
smtpd_tls_security_level=may
|
||||
|
||||
smtp_tls_CApath=/etc/ssl/certs
|
||||
smtp_tls_security_level=may
|
||||
smtp_tls_security_level={{ "verify" if config.tls_cert_mode == "acme" else "encrypt" }}
|
||||
# Send SNI extension when connecting to other servers.
|
||||
# <https://www.postfix.org/postconf.5.html#smtp_tls_servername>
|
||||
smtp_tls_servername = hostname
|
||||
smtp_tls_session_cache_database = btree:${data_directory}/smtp_scache
|
||||
smtp_tls_policy_maps = socketmap:inet:127.0.0.1:8461:postfix
|
||||
smtpd_tls_protocols = >=TLSv1.2
|
||||
smtp_tls_policy_maps = regexp:/etc/postfix/smtp_tls_policy_map
|
||||
smtp_tls_protocols = >=TLSv1.2
|
||||
smtp_tls_mandatory_protocols = >=TLSv1.2
|
||||
|
||||
# Disable anonymous cipher suites
|
||||
# and known insecure algorithms.
|
||||
@@ -60,7 +64,20 @@ alias_database = hash:/etc/aliases
|
||||
mydestination =
|
||||
|
||||
relayhost =
|
||||
{% if disable_ipv6 %}
|
||||
mynetworks = 127.0.0.0/8
|
||||
{% else %}
|
||||
mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128
|
||||
{% endif %}
|
||||
{% if config.addr_v4 %}
|
||||
smtp_bind_address = {{ config.addr_v4 }}
|
||||
{% endif %}
|
||||
{% if config.addr_v6 %}
|
||||
smtp_bind_address6 = {{ config.addr_v6 }}
|
||||
{% endif %}
|
||||
{% if config.addr_v4 or config.addr_v6 %}
|
||||
smtp_bind_address_enforce = yes
|
||||
{% endif %}
|
||||
mailbox_size_limit = 0
|
||||
message_size_limit = {{config.max_message_size}}
|
||||
recipient_delimiter = +
|
||||
@@ -73,6 +90,7 @@ inet_protocols = all
|
||||
|
||||
virtual_transport = lmtp:unix:private/dovecot-lmtp
|
||||
virtual_mailbox_domains = {{ config.mail_domain }}
|
||||
lmtp_header_checks = regexp:/etc/postfix/lmtp_header_cleanup
|
||||
|
||||
mua_client_restrictions = permit_sasl_authenticated, reject
|
||||
mua_sender_restrictions = reject_sender_login_mismatch, permit_sasl_authenticated, reject
|
||||
|
||||
@@ -14,10 +14,13 @@ smtp inet n - y - - smtpd -v
|
||||
{%- else %}
|
||||
smtp inet n - y - - smtpd
|
||||
{%- endif %}
|
||||
-o smtpd_milters=unix:opendkim/opendkim.sock
|
||||
-o smtpd_tls_security_level=encrypt
|
||||
-o smtpd_tls_mandatory_protocols=>=TLSv1.2
|
||||
-o smtpd_proxy_filter=127.0.0.1:{{ config.filtermail_smtp_port_incoming }}
|
||||
submission inet n - y - 5000 smtpd
|
||||
-o syslog_name=postfix/submission
|
||||
-o smtpd_tls_security_level=encrypt
|
||||
-o smtpd_tls_mandatory_protocols=>=TLSv1.3
|
||||
-o smtpd_sasl_auth_enable=yes
|
||||
-o smtpd_sasl_type=dovecot
|
||||
-o smtpd_sasl_path=private/auth
|
||||
@@ -28,14 +31,13 @@ submission inet n - y - 5000 smtpd
|
||||
-o smtpd_sender_restrictions=$mua_sender_restrictions
|
||||
-o smtpd_recipient_restrictions=
|
||||
-o smtpd_relay_restrictions=permit_sasl_authenticated,reject
|
||||
-o milter_macro_daemon_name=ORIGINATING
|
||||
-o smtpd_client_connection_count_limit=1000
|
||||
-o smtpd_proxy_filter=127.0.0.1:{{ config.filtermail_smtp_port }}
|
||||
-o cleanup_service_name=authclean
|
||||
smtps inet n - y - 5000 smtpd
|
||||
-o syslog_name=postfix/smtps
|
||||
-o smtpd_tls_wrappermode=yes
|
||||
-o smtpd_tls_security_level=encrypt
|
||||
-o smtpd_tls_mandatory_protocols=>=TLSv1.3
|
||||
-o smtpd_sasl_auth_enable=yes
|
||||
-o smtpd_sasl_type=dovecot
|
||||
-o smtpd_sasl_path=private/auth
|
||||
@@ -46,9 +48,7 @@ smtps inet n - y - 5000 smtpd
|
||||
-o smtpd_recipient_restrictions=
|
||||
-o smtpd_relay_restrictions=permit_sasl_authenticated,reject
|
||||
-o smtpd_client_connection_count_limit=1000
|
||||
-o milter_macro_daemon_name=ORIGINATING
|
||||
-o smtpd_proxy_filter=127.0.0.1:{{ config.filtermail_smtp_port }}
|
||||
-o cleanup_service_name=authclean
|
||||
#628 inet n - y - - qmqpd
|
||||
pickup unix n - y 60 1 pickup
|
||||
cleanup unix n - y - 0 cleanup
|
||||
@@ -76,17 +76,27 @@ anvil unix - - y - 1 anvil
|
||||
scache unix - - y - 1 scache
|
||||
postlog unix-dgram n - n - 1 postlogd
|
||||
filter unix - n n - - lmtp
|
||||
# Local SMTP server for reinjecting filered mail.
|
||||
localhost:{{ config.postfix_reinject_port }} inet n - n - 10 smtpd
|
||||
# Local SMTP server for reinjecting outgoing filtered mail.
|
||||
127.0.0.1:{{ config.postfix_reinject_port }} inet n - n - 100 smtpd
|
||||
-o syslog_name=postfix/reinject
|
||||
-o milter_macro_daemon_name=ORIGINATING
|
||||
-o smtpd_milters=unix:opendkim/opendkim.sock
|
||||
-o cleanup_service_name=authclean
|
||||
|
||||
# Local SMTP server for reinjecting incoming filtered mail
|
||||
127.0.0.1:{{ config.postfix_reinject_port_incoming }} inet n - n - 100 smtpd
|
||||
-o syslog_name=postfix/reinject_incoming
|
||||
|
||||
# Cleanup `Received` headers for authenticated mail
|
||||
# to avoid leaking client IP.
|
||||
#
|
||||
# We do not do this for received mails
|
||||
# as this will break DKIM signatures
|
||||
# if `Received` header is signed.
|
||||
#
|
||||
# This service also rewrites
|
||||
# Subject with `[...]`
|
||||
# to make sure the users
|
||||
# cannot send unprotected Subject.
|
||||
authclean unix n - - - 0 cleanup
|
||||
-o header_checks=regexp:/etc/postfix/submission_header_cleanup
|
||||
|
||||
3
cmdeploy/src/cmdeploy/postfix/smtp_tls_policy_map
Normal file
3
cmdeploy/src/cmdeploy/postfix/smtp_tls_policy_map
Normal file
@@ -0,0 +1,3 @@
|
||||
/^\[[^]]+\]$/ encrypt
|
||||
/^_/ encrypt
|
||||
/^nauta\.cu$/ may
|
||||
@@ -2,3 +2,4 @@
|
||||
/^X-Originating-IP:/ IGNORE
|
||||
/^X-Mailer:/ IGNORE
|
||||
/^User-Agent:/ IGNORE
|
||||
/^Subject:/ REPLACE Subject: [...]
|
||||
|
||||
@@ -12,48 +12,54 @@ All functions of this module
|
||||
|
||||
import re
|
||||
|
||||
from .rshell import CalledProcessError, shell
|
||||
from .rshell import CalledProcessError, log_progress, shell
|
||||
|
||||
|
||||
def perform_initial_checks(mail_domain):
|
||||
def perform_initial_checks(mail_domain, pre_command=""):
|
||||
"""Collecting initial DNS settings."""
|
||||
assert mail_domain
|
||||
if not shell("dig", fail_ok=True):
|
||||
shell("apt-get install -y dnsutils")
|
||||
if not shell("dig", fail_ok=True, print=log_progress):
|
||||
shell("apt-get update && apt-get install -y dnsutils", print=log_progress)
|
||||
A = query_dns("A", mail_domain)
|
||||
AAAA = query_dns("AAAA", mail_domain)
|
||||
MTA_STS = query_dns("CNAME", f"mta-sts.{mail_domain}")
|
||||
WWW = query_dns("CNAME", f"www.{mail_domain}")
|
||||
|
||||
res = dict(mail_domain=mail_domain, A=A, AAAA=AAAA, MTA_STS=MTA_STS, WWW=WWW)
|
||||
res["acme_account_url"] = shell("acmetool account-url", fail_ok=True)
|
||||
res["acme_account_url"] = shell(
|
||||
pre_command + "acmetool account-url", fail_ok=True, print=log_progress
|
||||
)
|
||||
res["dkim_entry"], res["web_dkim_entry"] = get_dkim_entry(
|
||||
mail_domain, dkim_selector="opendkim"
|
||||
mail_domain, pre_command, dkim_selector="opendkim"
|
||||
)
|
||||
|
||||
if not MTA_STS or not WWW or (not A and not AAAA):
|
||||
return res
|
||||
|
||||
# parse out sts-id if exists, example: "v=STSv1; id=2090123"
|
||||
parts = query_dns("TXT", f"_mta-sts.{mail_domain}").split("id=")
|
||||
mta_sts_txt = query_dns("TXT", f"_mta-sts.{mail_domain}")
|
||||
if not mta_sts_txt:
|
||||
return res
|
||||
parts = mta_sts_txt.split("id=")
|
||||
res["sts_id"] = parts[1].rstrip('"') if len(parts) == 2 else ""
|
||||
return res
|
||||
|
||||
|
||||
def get_dkim_entry(mail_domain, dkim_selector):
|
||||
def get_dkim_entry(mail_domain, pre_command, dkim_selector):
|
||||
try:
|
||||
dkim_pubkey = shell(
|
||||
f"openssl rsa -in /etc/dkimkeys/{dkim_selector}.private "
|
||||
"-pubout 2>/dev/null | awk '/-/{next}{printf(\"%s\",$0)}'"
|
||||
f"{pre_command}openssl rsa -in /etc/dkimkeys/{dkim_selector}.private "
|
||||
"-pubout 2>/dev/null | awk '/-/{next}{printf(\"%s\",$0)}'",
|
||||
print=log_progress,
|
||||
)
|
||||
except CalledProcessError:
|
||||
return
|
||||
return None, None
|
||||
dkim_value_raw = f"v=DKIM1;k=rsa;p={dkim_pubkey};s=email;t=s"
|
||||
dkim_value = '" "'.join(re.findall(".{1,255}", dkim_value_raw))
|
||||
web_dkim_value = "".join(re.findall(".{1,255}", dkim_value_raw))
|
||||
return (
|
||||
f'{dkim_selector}._domainkey.{mail_domain}. TXT "{dkim_value}"',
|
||||
f'{dkim_selector}._domainkey.{mail_domain}. TXT "{web_dkim_value}"',
|
||||
f'{dkim_selector}._domainkey.{mail_domain}. 3600 IN TXT "{dkim_value}"',
|
||||
f'{dkim_selector}._domainkey.{mail_domain}. 3600 IN TXT "{web_dkim_value}"',
|
||||
)
|
||||
|
||||
|
||||
@@ -61,9 +67,9 @@ def query_dns(typ, domain):
|
||||
# Get autoritative nameserver from the SOA record.
|
||||
soa_answers = [
|
||||
x.split()
|
||||
for x in shell(f"dig -r -q {domain} -t SOA +noall +authority +answer").split(
|
||||
"\n"
|
||||
)
|
||||
for x in shell(
|
||||
f"dig -r -q {domain} -t SOA +noall +authority +answer", print=log_progress
|
||||
).split("\n")
|
||||
]
|
||||
soa = [a for a in soa_answers if len(a) >= 3 and a[3] == "SOA"]
|
||||
if not soa:
|
||||
@@ -71,13 +77,11 @@ def query_dns(typ, domain):
|
||||
ns = soa[0][4]
|
||||
|
||||
# Query authoritative nameserver directly to bypass DNS cache.
|
||||
res = shell(f"dig @{ns} -r -q {domain} -t {typ} +short")
|
||||
if res:
|
||||
return res.split("\n")[0]
|
||||
return ""
|
||||
res = shell(f"dig @{ns} -r -q {domain} -t {typ} +short", print=log_progress)
|
||||
return next((line for line in res.split("\n") if not line.startswith(";")), "")
|
||||
|
||||
|
||||
def check_zonefile(zonefile, mail_domain):
|
||||
def check_zonefile(zonefile, verbose=True):
|
||||
"""Check expected zone file entries."""
|
||||
required = True
|
||||
required_diff = []
|
||||
@@ -89,10 +93,12 @@ def check_zonefile(zonefile, mail_domain):
|
||||
continue
|
||||
if not zf_line.strip() or zf_line.startswith(";"):
|
||||
continue
|
||||
print(f"dns-checking {zf_line!r}")
|
||||
zf_domain, zf_typ, zf_value = zf_line.split(maxsplit=2)
|
||||
zf_domain = zf_domain.rstrip(".")
|
||||
zf_value = zf_value.strip()
|
||||
print(f"dns-checking {zf_line!r}") if verbose else log_progress("")
|
||||
parts = zf_line.split(None, 4)
|
||||
zf_domain = parts[0].rstrip(".")
|
||||
# parts[1]=TTL, parts[2]=IN, parts[3]=type, parts[4]=rdata
|
||||
zf_typ = parts[3]
|
||||
zf_value = parts[4].strip()
|
||||
query_value = query_dns(zf_typ, zf_domain)
|
||||
if zf_value != query_value:
|
||||
assert zf_typ in ("A", "AAAA", "CNAME", "CAA", "SRV", "MX", "TXT"), zf_line
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user