mirror of
https://github.com/chatmail/relay.git
synced 2026-05-11 16:34:39 +00:00
Compare commits
153 Commits
docs-inter
...
hpk/test-c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6573ccc05f | ||
|
|
25285005c3 | ||
|
|
482194437d | ||
|
|
735e9d3e7f | ||
|
|
4b79606d49 | ||
|
|
6e52bfe8c4 | ||
|
|
95c76aa2b0 | ||
|
|
4f109e8c31 | ||
|
|
8c30714279 | ||
|
|
23f21d36b1 | ||
|
|
4ed3f5dd91 | ||
|
|
972b46be74 | ||
|
|
7edb4e860a | ||
|
|
ed9b4092a8 | ||
|
|
1b8ad3ca12 | ||
|
|
f85d304e65 | ||
|
|
4d1856d8f1 | ||
|
|
ae2ab52aa9 | ||
|
|
d0c396538b | ||
|
|
78a4e28408 | ||
|
|
2432d4f498 | ||
|
|
31301abb42 | ||
|
|
6b4edd8502 | ||
|
|
9c467ab3e8 | ||
|
|
774350778b | ||
|
|
06d53503e5 | ||
|
|
b128935940 | ||
|
|
2e38c61ca2 | ||
|
|
9dd8ce8ce1 | ||
|
|
0ae3f94ecc | ||
|
|
4481a12369 | ||
|
|
a47016e9f2 | ||
|
|
4e6ba7378d | ||
|
|
e428c646d1 | ||
|
|
dbd5cd16f5 | ||
|
|
e21f2a0fa2 | ||
|
|
8ca0909fa5 | ||
|
|
2c99cc84aa | ||
|
|
73309778c2 | ||
|
|
50ecc2b315 | ||
|
|
7b5b180b4b | ||
|
|
193624e522 | ||
|
|
437287fadc | ||
|
|
0ad679997a | ||
|
|
38cc1c7cd6 | ||
|
|
7a6ed8340e | ||
|
|
2ce9e5fe78 | ||
|
|
cf96be2cbb | ||
|
|
36eb63faa1 | ||
|
|
91df11015e | ||
|
|
d4f8a29243 | ||
|
|
0144fc3ea8 | ||
|
|
e7ce6679b9 | ||
|
|
d1adf52f89 | ||
|
|
56d0e2ca27 | ||
|
|
2613558db6 | ||
|
|
6843fcb1a0 | ||
|
|
ff54ad88d8 | ||
|
|
cce2b27ae7 | ||
|
|
87022e3681 | ||
|
|
06560dd071 | ||
|
|
1b0337a5f7 | ||
|
|
dfcaf415b1 | ||
|
|
c0718325ef | ||
|
|
7d72b0e592 | ||
|
|
8f1e23d98e | ||
|
|
56aaf2649b | ||
|
|
2660b4d24c | ||
|
|
ea60ecfb57 | ||
|
|
2a3a224cc2 | ||
|
|
e42139e97b | ||
|
|
65b660c413 | ||
|
|
dd2beb226a | ||
|
|
9c7508cc33 | ||
|
|
ab3492d9a1 | ||
|
|
032faf0a94 | ||
|
|
c45fe03652 | ||
|
|
08bf4c234b | ||
|
|
2d0ccdb4a3 | ||
|
|
3abba6f2fa | ||
|
|
f9aaeb0f42 | ||
|
|
e0c44bf04f | ||
|
|
8ff53d12cb | ||
|
|
0aa0324c81 | ||
|
|
bfcfc9b090 | ||
|
|
e101c36ab4 | ||
|
|
be7aa21039 | ||
|
|
4906b82e44 | ||
|
|
5d49b4c0fd | ||
|
|
56c8f9faae | ||
|
|
203a7da3f4 | ||
|
|
a1667ca54d | ||
|
|
6401bbb32c | ||
|
|
325cc7a7b4 | ||
|
|
c2acbad802 | ||
|
|
0e7ab96dc8 | ||
|
|
d1f9523836 | ||
|
|
bcf2fdb5d0 | ||
|
|
77a6f49c9b | ||
|
|
99630e4d1b | ||
|
|
2f8199a7c6 | ||
|
|
4eeead2826 | ||
|
|
0d890274fd | ||
|
|
7191329a9f | ||
|
|
1ae4c8451a | ||
|
|
f04a624e19 | ||
|
|
24e3f33acd | ||
|
|
610843a44a | ||
|
|
966754a346 | ||
|
|
87153667ed | ||
|
|
abe0cb5d08 | ||
|
|
8c8c37c822 | ||
|
|
e7bed4d2a1 | ||
|
|
df21076e9b | ||
|
|
70da217442 | ||
|
|
40fd62c562 | ||
|
|
d76b33def1 | ||
|
|
bab3de9768 | ||
|
|
49c66116bf | ||
|
|
9bf99cc8a9 | ||
|
|
1188aed061 | ||
|
|
e15b8ebf11 | ||
|
|
c84ddf69e8 | ||
|
|
96fc3d9ff6 | ||
|
|
4b5e8feb96 | ||
|
|
c98853570b | ||
|
|
bad356503e | ||
|
|
dba48e88d1 | ||
|
|
3ae8834cbe | ||
|
|
81391f4066 | ||
|
|
55cfd00505 | ||
|
|
b000213c68 | ||
|
|
51d16b6bb8 | ||
|
|
2beba8c455 | ||
|
|
33c67d22fa | ||
|
|
166bf68915 | ||
|
|
abb70a6b14 | ||
|
|
96108bbaba | ||
|
|
8f68672e31 | ||
|
|
9e6e3af534 | ||
|
|
fa5a6a64b3 | ||
|
|
6b7c002e24 | ||
|
|
4b2f98788d | ||
|
|
13faa42abd | ||
|
|
7c12136991 | ||
|
|
3637bba5dc | ||
|
|
e2b157bd96 | ||
|
|
83abb3a3e1 | ||
|
|
2e3e3101b6 | ||
|
|
213d68ed02 | ||
|
|
68cc6676ef | ||
|
|
14ca95d25a | ||
|
|
3524b055db |
95
.github/workflows/ci.yaml
vendored
95
.github/workflows/ci.yaml
vendored
@@ -14,28 +14,103 @@ jobs:
|
|||||||
# Otherwise `test_deployed_state` will be unhappy.
|
# Otherwise `test_deployed_state` will be unhappy.
|
||||||
with:
|
with:
|
||||||
ref: ${{ github.event.pull_request.head.sha }}
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
- name: download filtermail
|
||||||
- name: run chatmaild tests
|
run: curl -L https://github.com/chatmail/filtermail/releases/download/v0.5.2/filtermail-x86_64 -o /usr/local/bin/filtermail && chmod +x /usr/local/bin/filtermail
|
||||||
|
- name: run chatmaild tests
|
||||||
working-directory: chatmaild
|
working-directory: chatmaild
|
||||||
run: pipx run tox
|
run: pipx run tox
|
||||||
|
|
||||||
scripts:
|
scripts:
|
||||||
name: deploy-chatmail tests
|
name: deploy-chatmail tests
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: initenv
|
- name: initenv
|
||||||
run: scripts/initenv.sh
|
run: scripts/initenv.sh
|
||||||
|
|
||||||
- name: append venv/bin to PATH
|
- name: append venv/bin to PATH
|
||||||
run: echo venv/bin >>$GITHUB_PATH
|
run: echo venv/bin >>$GITHUB_PATH
|
||||||
|
|
||||||
- name: run formatting checks
|
- name: run formatting checks
|
||||||
run: cmdeploy fmt -v
|
run: cmdeploy fmt -v
|
||||||
|
|
||||||
- name: run deploy-chatmail offline tests
|
- name: run deploy-chatmail offline tests
|
||||||
run: pytest --pyargs cmdeploy
|
run: pytest --pyargs cmdeploy
|
||||||
|
|
||||||
# all other cmdeploy commands require a staging server
|
lxc-test:
|
||||||
# see https://github.com/deltachat/chatmail/issues/100
|
name: LXC deploy and test
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
timeout-minutes: 30
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
|
||||||
|
- name: install incus
|
||||||
|
run: |
|
||||||
|
# zabbly is the official incus community packages source
|
||||||
|
curl -fsSL https://pkgs.zabbly.com/key.asc \
|
||||||
|
| sudo gpg --dearmor -o /etc/apt/keyrings/zabbly.gpg
|
||||||
|
sudo sh -c 'cat <<EOF > /etc/apt/sources.list.d/zabbly-incus-stable.sources
|
||||||
|
Enabled: yes
|
||||||
|
Types: deb
|
||||||
|
URIs: https://pkgs.zabbly.com/incus/stable
|
||||||
|
Suites: $(. /etc/os-release && echo ${VERSION_CODENAME})
|
||||||
|
Components: main
|
||||||
|
Architectures: $(dpkg --print-architecture)
|
||||||
|
Signed-By: /etc/apt/keyrings/zabbly.gpg
|
||||||
|
EOF'
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y incus
|
||||||
|
|
||||||
|
- name: initialise incus
|
||||||
|
run: |
|
||||||
|
sudo systemctl stop docker.socket docker || true
|
||||||
|
sudo iptables -P FORWARD ACCEPT
|
||||||
|
sudo sysctl -w fs.inotify.max_user_instances=65535
|
||||||
|
sudo sysctl -w fs.inotify.max_user_watches=65535
|
||||||
|
sudo incus admin init --minimal
|
||||||
|
sudo usermod -aG incus-admin "$USER"
|
||||||
|
|
||||||
|
- name: initenv
|
||||||
|
run: scripts/initenv.sh
|
||||||
|
|
||||||
|
- name: append venv/bin to PATH
|
||||||
|
run: echo venv/bin >>$GITHUB_PATH
|
||||||
|
|
||||||
|
- name: restore cached images
|
||||||
|
id: cache-images
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
/tmp/localchat-base.tar.gz
|
||||||
|
/tmp/localchat-ns.tar.gz
|
||||||
|
/tmp/localchat-test0.tar.gz
|
||||||
|
/tmp/localchat-test1.tar.gz
|
||||||
|
lxconfigs/id_localchat*
|
||||||
|
key: incus-images-${{ runner.os }}-${{ github.ref_name }}
|
||||||
|
restore-keys: |
|
||||||
|
incus-images-${{ runner.os }}-${{ github.ref_name }}-
|
||||||
|
incus-images-${{ runner.os }}-main-
|
||||||
|
incus-images-${{ runner.os }}-
|
||||||
|
|
||||||
|
- name: import cached images
|
||||||
|
run: |
|
||||||
|
for alias in localchat-base localchat-ns localchat-test0 localchat-test1; do
|
||||||
|
if [ -f /tmp/$alias.tar.gz ]; then
|
||||||
|
sg incus-admin -c "incus image import /tmp/$alias.tar.gz --alias $alias" || true
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
- name: lxc-test
|
||||||
|
run: sg incus-admin -c 'cmdeploy lxc-test'
|
||||||
|
|
||||||
|
- name: export images for cache
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
for alias in localchat-base localchat-ns localchat-test0 localchat-test1; do
|
||||||
|
if ! [ -f /tmp/$alias.tar.gz ]; then
|
||||||
|
sg incus-admin -c "incus image export $alias /tmp/$alias" || true
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|||||||
33
.github/workflows/docs-preview.yaml
vendored
33
.github/workflows/docs-preview.yaml
vendored
@@ -11,6 +11,9 @@ jobs:
|
|||||||
scripts:
|
scripts:
|
||||||
name: build
|
name: build
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
environment:
|
||||||
|
name: 'staging.chatmail.at/doc/relay/'
|
||||||
|
url: https://staging.chatmail.at/doc/relay/${{ steps.prepare.outputs.prid }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
@@ -44,36 +47,6 @@ jobs:
|
|||||||
chmod 600 "$HOME/.ssh/key"
|
chmod 600 "$HOME/.ssh/key"
|
||||||
rsync -rILvh -e "ssh -i $HOME/.ssh/key -o StrictHostKeyChecking=no" $GITHUB_WORKSPACE/doc/build/ "${{ secrets.USERNAME }}@chatmail.at:/var/www/html/staging.chatmail.at/doc/relay/${{ steps.prepare.outputs.prid }}/"
|
rsync -rILvh -e "ssh -i $HOME/.ssh/key -o StrictHostKeyChecking=no" $GITHUB_WORKSPACE/doc/build/ "${{ secrets.USERNAME }}@chatmail.at:/var/www/html/staging.chatmail.at/doc/relay/${{ steps.prepare.outputs.prid }}/"
|
||||||
|
|
||||||
- name: "Post links to details"
|
|
||||||
id: details
|
|
||||||
if: steps.prepare.outputs.uploadtoserver
|
|
||||||
run: |
|
|
||||||
# URLs for API connection and uploads
|
|
||||||
export GITHUB_API_URL="https://api.github.com/repos/chatmail/relay/statuses/${{ github.event.after }}"
|
|
||||||
export PREVIEW_LINK="https://staging.chatmail.at/doc/relay/${{ steps.prepare.outputs.prid }}/"
|
|
||||||
export STATUS_DATA="{\"state\": \"success\", \
|
|
||||||
\"description\": \"Preview the changed documentation here:\", \
|
|
||||||
\"context\": \"Documentation Preview\", \
|
|
||||||
\"target_url\": \"${PREVIEW_LINK}\"}"
|
|
||||||
curl -X POST --header "Accept: application/vnd.github+json" --header "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" --url "$GITHUB_API_URL" --header "content-type: application/json" --data "$STATUS_DATA"
|
|
||||||
|
|
||||||
#check if comment already exists, if not post it
|
|
||||||
export GITHUB_API_URL="https://api.github.com/repos/chatmail/relay/issues/${{ steps.prepare.outputs.prid }}/comments"
|
|
||||||
export RESPONSE=$(curl -L --header "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" --url "$GITHUB_API_URL" --header "content-type: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28")
|
|
||||||
echo $RESPONSE > response
|
|
||||||
grep -v '"Check out the page preview at https://staging.chatmail.at/doc/relay' response && echo "comment=true" >> $GITHUB_OUTPUT || true
|
|
||||||
- name: "Post link to comments"
|
|
||||||
if: steps.details.outputs.comment
|
|
||||||
uses: actions/github-script@v7
|
|
||||||
with:
|
|
||||||
script: |
|
|
||||||
github.rest.issues.createComment({
|
|
||||||
issue_number: context.issue.number,
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
body: "Check out the page preview at https://staging.chatmail.at/doc/relay/${{ steps.prepare.outputs.prid }}/"
|
|
||||||
})
|
|
||||||
|
|
||||||
- name: check links
|
- name: check links
|
||||||
working-directory: doc
|
working-directory: doc
|
||||||
run: sphinx-build --builder linkcheck source build
|
run: sphinx-build --builder linkcheck source build
|
||||||
|
|||||||
3
.github/workflows/docs.yaml
vendored
3
.github/workflows/docs.yaml
vendored
@@ -14,6 +14,9 @@ jobs:
|
|||||||
scripts:
|
scripts:
|
||||||
name: build
|
name: build
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
environment:
|
||||||
|
name: 'chatmail.at/doc/relay/'
|
||||||
|
url: https://chatmail.at/doc/relay/
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
|||||||
@@ -1,20 +0,0 @@
|
|||||||
;; Zone file for staging-ipv4.testrun.org
|
|
||||||
|
|
||||||
$ORIGIN staging-ipv4.testrun.org.
|
|
||||||
$TTL 300
|
|
||||||
|
|
||||||
@ IN SOA ns.testrun.org. root.nine.testrun.org (
|
|
||||||
2023010101 ; Serial
|
|
||||||
7200 ; Refresh
|
|
||||||
3600 ; Retry
|
|
||||||
1209600 ; Expire
|
|
||||||
3600 ; Negative response caching TTL
|
|
||||||
)
|
|
||||||
|
|
||||||
;; Nameservers.
|
|
||||||
@ IN NS ns.testrun.org.
|
|
||||||
|
|
||||||
;; DNS records.
|
|
||||||
@ IN A 37.27.95.249
|
|
||||||
mta-sts.staging-ipv4.testrun.org. CNAME staging-ipv4.testrun.org.
|
|
||||||
www.staging-ipv4.testrun.org. CNAME staging-ipv4.testrun.org.
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
;; Zone file for staging2.testrun.org
|
|
||||||
|
|
||||||
$ORIGIN staging2.testrun.org.
|
|
||||||
$TTL 300
|
|
||||||
|
|
||||||
@ IN SOA ns.testrun.org. root.nine.testrun.org (
|
|
||||||
2023010101 ; Serial
|
|
||||||
7200 ; Refresh
|
|
||||||
3600 ; Retry
|
|
||||||
1209600 ; Expire
|
|
||||||
3600 ; Negative response caching TTL
|
|
||||||
)
|
|
||||||
|
|
||||||
;; Nameservers.
|
|
||||||
@ IN NS ns.testrun.org.
|
|
||||||
|
|
||||||
;; DNS records.
|
|
||||||
@ IN A 37.27.24.139
|
|
||||||
mta-sts.staging2.testrun.org. CNAME staging2.testrun.org.
|
|
||||||
www.staging2.testrun.org. CNAME staging2.testrun.org.
|
|
||||||
|
|
||||||
97
.github/workflows/test-and-deploy-ipv4only.yaml
vendored
97
.github/workflows/test-and-deploy-ipv4only.yaml
vendored
@@ -1,97 +0,0 @@
|
|||||||
name: deploy on staging-ipv4.testrun.org, and run tests
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
pull_request:
|
|
||||||
paths-ignore:
|
|
||||||
- 'scripts/**'
|
|
||||||
- '**/README.md'
|
|
||||||
- 'CHANGELOG.md'
|
|
||||||
- 'LICENSE'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
deploy:
|
|
||||||
name: deploy on staging-ipv4.testrun.org, and run tests
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
timeout-minutes: 30
|
|
||||||
concurrency:
|
|
||||||
group: ci-ipv4-${{ github.workflow }}-${{ github.ref }}
|
|
||||||
cancel-in-progress: ${{ !contains(github.ref, '$GITHUB_REF') }}
|
|
||||||
steps:
|
|
||||||
- uses: jsok/serialize-workflow-action@515cd04c46d7ea7435c4a22a3b4419127afdefe9
|
|
||||||
with:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: prepare SSH
|
|
||||||
run: |
|
|
||||||
mkdir ~/.ssh
|
|
||||||
echo "${{ secrets.STAGING_SSH_KEY }}" >> ~/.ssh/id_ed25519
|
|
||||||
chmod 600 ~/.ssh/id_ed25519
|
|
||||||
ssh-keyscan staging-ipv4.testrun.org > ~/.ssh/known_hosts
|
|
||||||
# save previous acme & dkim state
|
|
||||||
rsync -avz root@staging-ipv4.testrun.org:/var/lib/acme acme-ipv4 || true
|
|
||||||
rsync -avz root@staging-ipv4.testrun.org:/etc/dkimkeys dkimkeys-ipv4 || true
|
|
||||||
# store previous acme & dkim state on ns.testrun.org, if it contains useful certs
|
|
||||||
if [ -f dkimkeys-ipv4/dkimkeys/opendkim.private ]; then rsync -avz -e "ssh -o StrictHostKeyChecking=accept-new" dkimkeys-ipv4 root@ns.testrun.org:/tmp/ || true; fi
|
|
||||||
if [ "$(ls -A acme-ipv4/acme/certs)" ]; then rsync -avz -e "ssh -o StrictHostKeyChecking=accept-new" acme-ipv4 root@ns.testrun.org:/tmp/ || true; fi
|
|
||||||
# make sure CAA record isn't set
|
|
||||||
scp -o StrictHostKeyChecking=accept-new .github/workflows/staging-ipv4.testrun.org-default.zone root@ns.testrun.org:/etc/nsd/staging-ipv4.testrun.org.zone
|
|
||||||
ssh root@ns.testrun.org sed -i '/CAA/d' /etc/nsd/staging-ipv4.testrun.org.zone
|
|
||||||
ssh root@ns.testrun.org nsd-checkzone staging-ipv4.testrun.org /etc/nsd/staging-ipv4.testrun.org.zone
|
|
||||||
ssh root@ns.testrun.org systemctl reload nsd
|
|
||||||
|
|
||||||
- name: rebuild staging-ipv4.testrun.org to have a clean VPS
|
|
||||||
run: |
|
|
||||||
curl -X POST \
|
|
||||||
-H "Authorization: Bearer ${{ secrets.HETZNER_API_TOKEN }}" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
-d '{"image":"debian-12"}' \
|
|
||||||
"https://api.hetzner.cloud/v1/servers/${{ secrets.STAGING_IPV4_SERVER_ID }}/actions/rebuild"
|
|
||||||
|
|
||||||
- run: scripts/initenv.sh
|
|
||||||
|
|
||||||
- name: append venv/bin to PATH
|
|
||||||
run: echo venv/bin >>$GITHUB_PATH
|
|
||||||
|
|
||||||
- name: upload TLS cert after rebuilding
|
|
||||||
run: |
|
|
||||||
echo " --- wait until staging-ipv4.testrun.org VPS is rebuilt --- "
|
|
||||||
rm ~/.ssh/known_hosts
|
|
||||||
while ! ssh -o ConnectTimeout=180 -o StrictHostKeyChecking=accept-new -v root@staging-ipv4.testrun.org id -u ; do sleep 1 ; done
|
|
||||||
ssh -o StrictHostKeyChecking=accept-new -v root@staging-ipv4.testrun.org id -u
|
|
||||||
# download acme & dkim state from ns.testrun.org
|
|
||||||
rsync -e "ssh -o StrictHostKeyChecking=accept-new" -avz root@ns.testrun.org:/tmp/acme-ipv4/acme acme-restore || true
|
|
||||||
rsync -avz root@ns.testrun.org:/tmp/dkimkeys-ipv4/dkimkeys dkimkeys-restore || true
|
|
||||||
# restore acme & dkim state to staging2.testrun.org
|
|
||||||
rsync -avz acme-restore/acme root@staging-ipv4.testrun.org:/var/lib/ || true
|
|
||||||
rsync -avz dkimkeys-restore/dkimkeys root@staging-ipv4.testrun.org:/etc/ || true
|
|
||||||
ssh -o StrictHostKeyChecking=accept-new -v root@staging-ipv4.testrun.org chown root:root -R /var/lib/acme || true
|
|
||||||
|
|
||||||
- name: run deploy-chatmail offline tests
|
|
||||||
run: pytest --pyargs cmdeploy
|
|
||||||
|
|
||||||
- run: |
|
|
||||||
cmdeploy init staging-ipv4.testrun.org
|
|
||||||
sed -i 's#disable_ipv6 = False#disable_ipv6 = True#' chatmail.ini
|
|
||||||
|
|
||||||
- run: cmdeploy run --verbose --skip-dns-check
|
|
||||||
|
|
||||||
- name: set DNS entries
|
|
||||||
run: |
|
|
||||||
ssh -o StrictHostKeyChecking=accept-new -v root@staging-ipv4.testrun.org chown opendkim:opendkim -R /etc/dkimkeys
|
|
||||||
cmdeploy dns --zonefile staging-generated.zone
|
|
||||||
cat staging-generated.zone >> .github/workflows/staging-ipv4.testrun.org-default.zone
|
|
||||||
cat .github/workflows/staging-ipv4.testrun.org-default.zone
|
|
||||||
scp .github/workflows/staging-ipv4.testrun.org-default.zone root@ns.testrun.org:/etc/nsd/staging-ipv4.testrun.org.zone
|
|
||||||
ssh root@ns.testrun.org nsd-checkzone staging-ipv4.testrun.org /etc/nsd/staging-ipv4.testrun.org.zone
|
|
||||||
ssh root@ns.testrun.org systemctl reload nsd
|
|
||||||
|
|
||||||
- name: cmdeploy test
|
|
||||||
run: CHATMAIL_DOMAIN2=nine.testrun.org cmdeploy test --slow
|
|
||||||
|
|
||||||
- name: cmdeploy dns
|
|
||||||
run: cmdeploy dns -v
|
|
||||||
|
|
||||||
95
.github/workflows/test-and-deploy.yaml
vendored
95
.github/workflows/test-and-deploy.yaml
vendored
@@ -1,95 +0,0 @@
|
|||||||
name: deploy on staging2.testrun.org, and run tests
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
pull_request:
|
|
||||||
paths-ignore:
|
|
||||||
- 'scripts/**'
|
|
||||||
- '**/README.md'
|
|
||||||
- 'CHANGELOG.md'
|
|
||||||
- 'LICENSE'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
deploy:
|
|
||||||
name: deploy on staging2.testrun.org, and run tests
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
timeout-minutes: 30
|
|
||||||
concurrency:
|
|
||||||
group: ci-${{ github.workflow }}-${{ github.ref }}
|
|
||||||
cancel-in-progress: ${{ !contains(github.ref, '$GITHUB_REF') }}
|
|
||||||
steps:
|
|
||||||
- uses: jsok/serialize-workflow-action@515cd04c46d7ea7435c4a22a3b4419127afdefe9
|
|
||||||
with:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: prepare SSH
|
|
||||||
run: |
|
|
||||||
mkdir ~/.ssh
|
|
||||||
echo "${{ secrets.STAGING_SSH_KEY }}" >> ~/.ssh/id_ed25519
|
|
||||||
chmod 600 ~/.ssh/id_ed25519
|
|
||||||
ssh-keyscan staging2.testrun.org > ~/.ssh/known_hosts
|
|
||||||
# save previous acme & dkim state
|
|
||||||
rsync -avz root@staging2.testrun.org:/var/lib/acme . || true
|
|
||||||
rsync -avz root@staging2.testrun.org:/etc/dkimkeys . || true
|
|
||||||
# store previous acme & dkim state on ns.testrun.org, if it contains useful certs
|
|
||||||
if [ -f dkimkeys/opendkim.private ]; then rsync -avz -e "ssh -o StrictHostKeyChecking=accept-new" dkimkeys root@ns.testrun.org:/tmp/ || true; fi
|
|
||||||
if [ "$(ls -A acme/certs)" ]; then rsync -avz -e "ssh -o StrictHostKeyChecking=accept-new" acme root@ns.testrun.org:/tmp/ || true; fi
|
|
||||||
# make sure CAA record isn't set
|
|
||||||
scp -o StrictHostKeyChecking=accept-new .github/workflows/staging.testrun.org-default.zone root@ns.testrun.org:/etc/nsd/staging2.testrun.org.zone
|
|
||||||
ssh root@ns.testrun.org sed -i '/CAA/d' /etc/nsd/staging2.testrun.org.zone
|
|
||||||
ssh root@ns.testrun.org nsd-checkzone staging2.testrun.org /etc/nsd/staging2.testrun.org.zone
|
|
||||||
ssh root@ns.testrun.org systemctl reload nsd
|
|
||||||
|
|
||||||
- name: rebuild staging2.testrun.org to have a clean VPS
|
|
||||||
run: |
|
|
||||||
curl -X POST \
|
|
||||||
-H "Authorization: Bearer ${{ secrets.HETZNER_API_TOKEN }}" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
-d '{"image":"debian-12"}' \
|
|
||||||
"https://api.hetzner.cloud/v1/servers/${{ secrets.STAGING_SERVER_ID }}/actions/rebuild"
|
|
||||||
|
|
||||||
- run: scripts/initenv.sh
|
|
||||||
|
|
||||||
- name: append venv/bin to PATH
|
|
||||||
run: echo venv/bin >>$GITHUB_PATH
|
|
||||||
|
|
||||||
- name: upload TLS cert after rebuilding
|
|
||||||
run: |
|
|
||||||
echo " --- wait until staging2.testrun.org VPS is rebuilt --- "
|
|
||||||
rm ~/.ssh/known_hosts
|
|
||||||
while ! ssh -o ConnectTimeout=180 -o StrictHostKeyChecking=accept-new -v root@staging2.testrun.org id -u ; do sleep 1 ; done
|
|
||||||
ssh -o StrictHostKeyChecking=accept-new -v root@staging2.testrun.org id -u
|
|
||||||
# download acme & dkim state from ns.testrun.org
|
|
||||||
rsync -e "ssh -o StrictHostKeyChecking=accept-new" -avz root@ns.testrun.org:/tmp/acme acme-restore || true
|
|
||||||
rsync -avz root@ns.testrun.org:/tmp/dkimkeys dkimkeys-restore || true
|
|
||||||
# restore acme & dkim state to staging2.testrun.org
|
|
||||||
rsync -avz acme-restore/acme root@staging2.testrun.org:/var/lib/ || true
|
|
||||||
rsync -avz dkimkeys-restore/dkimkeys root@staging2.testrun.org:/etc/ || true
|
|
||||||
ssh -o StrictHostKeyChecking=accept-new -v root@staging2.testrun.org chown root:root -R /var/lib/acme || true
|
|
||||||
|
|
||||||
- name: run deploy-chatmail offline tests
|
|
||||||
run: pytest --pyargs cmdeploy
|
|
||||||
|
|
||||||
- run: cmdeploy init staging2.testrun.org
|
|
||||||
|
|
||||||
- run: cmdeploy run --verbose --skip-dns-check
|
|
||||||
|
|
||||||
- name: set DNS entries
|
|
||||||
run: |
|
|
||||||
ssh -o StrictHostKeyChecking=accept-new root@staging2.testrun.org chown opendkim:opendkim -R /etc/dkimkeys
|
|
||||||
cmdeploy dns --zonefile staging-generated.zone --verbose
|
|
||||||
cat staging-generated.zone >> .github/workflows/staging.testrun.org-default.zone
|
|
||||||
cat .github/workflows/staging.testrun.org-default.zone
|
|
||||||
scp .github/workflows/staging.testrun.org-default.zone root@ns.testrun.org:/etc/nsd/staging2.testrun.org.zone
|
|
||||||
ssh root@ns.testrun.org nsd-checkzone staging2.testrun.org /etc/nsd/staging2.testrun.org.zone
|
|
||||||
ssh root@ns.testrun.org systemctl reload nsd
|
|
||||||
|
|
||||||
- name: cmdeploy test
|
|
||||||
run: CHATMAIL_DOMAIN2=nine.testrun.org cmdeploy test --slow
|
|
||||||
|
|
||||||
- name: cmdeploy dns
|
|
||||||
run: cmdeploy dns -v
|
|
||||||
|
|
||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -4,7 +4,8 @@ __pycache__/
|
|||||||
*$py.class
|
*$py.class
|
||||||
*.swp
|
*.swp
|
||||||
*qr-*.png
|
*qr-*.png
|
||||||
chatmail.ini
|
chatmail*.ini
|
||||||
|
lxconfigs/
|
||||||
|
|
||||||
|
|
||||||
# C extensions
|
# C extensions
|
||||||
|
|||||||
47
CHANGELOG.md
47
CHANGELOG.md
@@ -1,6 +1,47 @@
|
|||||||
# Changelog for chatmail deployment
|
# Changelog for chatmail deployment
|
||||||
|
|
||||||
## untagged
|
## 1.9.0 2025-12-18
|
||||||
|
|
||||||
|
### Documentation
|
||||||
|
|
||||||
|
- Add RELEASE.md and CONTRIBUTING.md
|
||||||
|
- README update, mention Chatmail Cookbook project
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
- Expire messages also from IMAP subfolders
|
||||||
|
- Use absolute path instead of relative path in message expiration script
|
||||||
|
- Restart Postfix and Dovecot automatically on failure
|
||||||
|
- acmetool: Use a fixed name and `reconcile` instead of `want`
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- Report DKIM error code in SMTP response
|
||||||
|
- Remove development notice from the web pages
|
||||||
|
|
||||||
|
### Miscellaneous Tasks
|
||||||
|
|
||||||
|
- Update the heading in the CHANGELOG.md
|
||||||
|
- Setup git-cliff
|
||||||
|
- Run tests against ci-chatmail.testrun.org instead of nine.testrun.org
|
||||||
|
- Cleanup remaining echobot code, remove echobot user from deployment and passthrough recipients
|
||||||
|
|
||||||
|
## 1.8.0 2025-12-12
|
||||||
|
|
||||||
|
- Add imap_compress option to chatmail.ini
|
||||||
|
([#760](https://github.com/chatmail/relay/pull/760))
|
||||||
|
|
||||||
|
- Remove echobot from relays
|
||||||
|
([#753](https://github.com/chatmail/relay/pull/753))
|
||||||
|
|
||||||
|
- Fix `cmdeploy webdev`
|
||||||
|
([#743](https://github.com/chatmail/relay/pull/743))
|
||||||
|
|
||||||
|
- Add robots.txt to exclude all web crawlers
|
||||||
|
([#732](https://github.com/chatmail/relay/pull/732))
|
||||||
|
|
||||||
|
- acmetool: accept new Let's Encrypt ToS: https://letsencrypt.org/documents/LE-SA-v1.6-August-18-2025.pdf
|
||||||
|
([#729](https://github.com/chatmail/relay/pull/729))
|
||||||
|
|
||||||
- Organized cmdeploy into install, configure, and activate stages
|
- Organized cmdeploy into install, configure, and activate stages
|
||||||
([#695](https://github.com/chatmail/relay/pull/695))
|
([#695](https://github.com/chatmail/relay/pull/695))
|
||||||
@@ -21,10 +62,10 @@
|
|||||||
([#689](https://github.com/chatmail/relay/pull/689))
|
([#689](https://github.com/chatmail/relay/pull/689))
|
||||||
|
|
||||||
- Require TLS 1.2 for outgoing SMTP connections
|
- Require TLS 1.2 for outgoing SMTP connections
|
||||||
([#685](https://github.com/chatmail/relay/pull/685))
|
([#685](https://github.com/chatmail/relay/pull/685), [#730](https://github.com/chatmail/relay/pull/730))
|
||||||
|
|
||||||
- require STARTTLS for incoming port 25 connections
|
- require STARTTLS for incoming port 25 connections
|
||||||
([#684](https://github.com/chatmail/relay/pull/684))
|
([#684](https://github.com/chatmail/relay/pull/684), [#730](https://github.com/chatmail/relay/pull/730))
|
||||||
|
|
||||||
- filtermail: run CPU-intensive handle_DATA in a thread pool executor
|
- filtermail: run CPU-intensive handle_DATA in a thread pool executor
|
||||||
([#676](https://github.com/chatmail/relay/pull/676))
|
([#676](https://github.com/chatmail/relay/pull/676))
|
||||||
|
|||||||
7
CONTRIBUTING.md
Normal file
7
CONTRIBUTING.md
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
# Contributing to the chatmail relay
|
||||||
|
|
||||||
|
Commit messages follow the [Conventional Commits] notation.
|
||||||
|
We use [git-cliff] to generate the changelog from commit messages before the release.
|
||||||
|
|
||||||
|
[Conventional Commits]: https://www.conventionalcommits.org/
|
||||||
|
[git-cliff]: https://git-cliff.org/
|
||||||
15
RELEASE.md
Normal file
15
RELEASE.md
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
# Releasing a new version of chatmail relay
|
||||||
|
|
||||||
|
For example, to release version 1.9.0 of chatmail relay, do the following steps.
|
||||||
|
|
||||||
|
1. Update the changelog: `git cliff --unreleased --tag 1.9.0 --prepend CHANGELOG.md` or `git cliff -u -t 1.9.0 -p CHANGELOG.md`.
|
||||||
|
|
||||||
|
2. Open the changelog in the editor, edit it if required.
|
||||||
|
|
||||||
|
3. Commit the changes to the changelog with a commit message `chore(release): prepare for 1.9.0`.
|
||||||
|
|
||||||
|
3. Tag the release: `git tag --annotate 1.9.0`.
|
||||||
|
|
||||||
|
4. Push the release tag: `git push origin 1.9.0`.
|
||||||
|
|
||||||
|
5. Create a GitHub release: `gh release create 1.9.0`.
|
||||||
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "chatmaild"
|
name = "chatmaild"
|
||||||
version = "0.2"
|
version = "0.3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aiosmtpd",
|
"aiosmtpd",
|
||||||
"iniconfig",
|
"iniconfig",
|
||||||
@@ -24,9 +24,6 @@ where = ['src']
|
|||||||
[project.scripts]
|
[project.scripts]
|
||||||
doveauth = "chatmaild.doveauth:main"
|
doveauth = "chatmaild.doveauth:main"
|
||||||
chatmail-metadata = "chatmaild.metadata:main"
|
chatmail-metadata = "chatmaild.metadata:main"
|
||||||
filtermail = "chatmaild.filtermail:main"
|
|
||||||
echobot = "chatmaild.echo:main"
|
|
||||||
chatmail-metrics = "chatmaild.metrics:main"
|
|
||||||
chatmail-expire = "chatmaild.expire:main"
|
chatmail-expire = "chatmaild.expire:main"
|
||||||
chatmail-fsreport = "chatmaild.fsreport:main"
|
chatmail-fsreport = "chatmaild.fsreport:main"
|
||||||
lastlogin = "chatmaild.lastlogin:main"
|
lastlogin = "chatmaild.lastlogin:main"
|
||||||
@@ -73,5 +70,6 @@ commands =
|
|||||||
deps = pytest
|
deps = pytest
|
||||||
pdbpp
|
pdbpp
|
||||||
pytest-localserver
|
pytest-localserver
|
||||||
|
execnet
|
||||||
commands = pytest -v -rsXx {posargs}
|
commands = pytest -v -rsXx {posargs}
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -1,11 +1,10 @@
|
|||||||
|
import os
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import iniconfig
|
import iniconfig
|
||||||
|
|
||||||
from chatmaild.user import User
|
from chatmaild.user import User
|
||||||
|
|
||||||
echobot_password_path = Path("/run/echobot/password")
|
|
||||||
|
|
||||||
|
|
||||||
def read_config(inipath):
|
def read_config(inipath):
|
||||||
assert Path(inipath).exists(), inipath
|
assert Path(inipath).exists(), inipath
|
||||||
@@ -22,7 +21,8 @@ class Config:
|
|||||||
def __init__(self, inipath, params):
|
def __init__(self, inipath, params):
|
||||||
self._inipath = inipath
|
self._inipath = inipath
|
||||||
self.mail_domain = params["mail_domain"]
|
self.mail_domain = params["mail_domain"]
|
||||||
self.max_user_send_per_minute = int(params["max_user_send_per_minute"])
|
self.max_user_send_per_minute = int(params.get("max_user_send_per_minute", 60))
|
||||||
|
self.max_user_send_burst_size = int(params.get("max_user_send_burst_size", 10))
|
||||||
self.max_mailbox_size = params["max_mailbox_size"]
|
self.max_mailbox_size = params["max_mailbox_size"]
|
||||||
self.max_message_size = int(params.get("max_message_size", "31457280"))
|
self.max_message_size = int(params.get("max_message_size", "31457280"))
|
||||||
self.delete_mails_after = params["delete_mails_after"]
|
self.delete_mails_after = params["delete_mails_after"]
|
||||||
@@ -34,18 +34,21 @@ class Config:
|
|||||||
self.passthrough_senders = params["passthrough_senders"].split()
|
self.passthrough_senders = params["passthrough_senders"].split()
|
||||||
self.passthrough_recipients = params["passthrough_recipients"].split()
|
self.passthrough_recipients = params["passthrough_recipients"].split()
|
||||||
self.www_folder = params.get("www_folder", "")
|
self.www_folder = params.get("www_folder", "")
|
||||||
self.filtermail_smtp_port = int(params["filtermail_smtp_port"])
|
self.filtermail_smtp_port = int(params.get("filtermail_smtp_port", "10080"))
|
||||||
self.filtermail_smtp_port_incoming = int(
|
self.filtermail_smtp_port_incoming = int(
|
||||||
params["filtermail_smtp_port_incoming"]
|
params.get("filtermail_smtp_port_incoming", "10081")
|
||||||
)
|
)
|
||||||
self.postfix_reinject_port = int(params["postfix_reinject_port"])
|
self.postfix_reinject_port = int(params.get("postfix_reinject_port", "10025"))
|
||||||
self.postfix_reinject_port_incoming = int(
|
self.postfix_reinject_port_incoming = int(
|
||||||
params["postfix_reinject_port_incoming"]
|
params.get("postfix_reinject_port_incoming", "10026")
|
||||||
)
|
)
|
||||||
self.mtail_address = params.get("mtail_address")
|
self.mtail_address = params.get("mtail_address")
|
||||||
self.disable_ipv6 = params.get("disable_ipv6", "false").lower() == "true"
|
self.disable_ipv6 = params.get("disable_ipv6", "false").lower() == "true"
|
||||||
|
self.addr_v4 = os.environ.get("CHATMAIL_ADDR_V4", "")
|
||||||
|
self.addr_v6 = os.environ.get("CHATMAIL_ADDR_V6", "")
|
||||||
self.acme_email = params.get("acme_email", "")
|
self.acme_email = params.get("acme_email", "")
|
||||||
self.imap_rawlog = params.get("imap_rawlog", "false").lower() == "true"
|
self.imap_rawlog = params.get("imap_rawlog", "false").lower() == "true"
|
||||||
|
self.imap_compress = params.get("imap_compress", "false").lower() == "true"
|
||||||
if "iroh_relay" not in params:
|
if "iroh_relay" not in params:
|
||||||
self.iroh_relay = "https://" + params["mail_domain"]
|
self.iroh_relay = "https://" + params["mail_domain"]
|
||||||
self.enable_iroh_relay = True
|
self.enable_iroh_relay = True
|
||||||
@@ -57,6 +60,31 @@ class Config:
|
|||||||
self.privacy_pdo = params.get("privacy_pdo")
|
self.privacy_pdo = params.get("privacy_pdo")
|
||||||
self.privacy_supervisor = params.get("privacy_supervisor")
|
self.privacy_supervisor = params.get("privacy_supervisor")
|
||||||
|
|
||||||
|
# TLS certificate management.
|
||||||
|
# If tls_external_cert_and_key is set, use externally managed certs.
|
||||||
|
# Otherwise derived from the domain name:
|
||||||
|
# - Domains starting with "_" use self-signed certificates
|
||||||
|
# - All other domains use ACME.
|
||||||
|
external = params.get("tls_external_cert_and_key", "").strip()
|
||||||
|
|
||||||
|
if external:
|
||||||
|
parts = external.split()
|
||||||
|
if len(parts) != 2:
|
||||||
|
raise ValueError(
|
||||||
|
"tls_external_cert_and_key must have two space-separated"
|
||||||
|
" paths: CERT_PATH KEY_PATH"
|
||||||
|
)
|
||||||
|
self.tls_cert_mode = "external"
|
||||||
|
self.tls_cert_path, self.tls_key_path = parts
|
||||||
|
elif self.mail_domain.startswith("_"):
|
||||||
|
self.tls_cert_mode = "self"
|
||||||
|
self.tls_cert_path = "/etc/ssl/certs/mailserver.pem"
|
||||||
|
self.tls_key_path = "/etc/ssl/private/mailserver.key"
|
||||||
|
else:
|
||||||
|
self.tls_cert_mode = "acme"
|
||||||
|
self.tls_cert_path = f"/var/lib/acme/live/{self.mail_domain}/fullchain"
|
||||||
|
self.tls_key_path = f"/var/lib/acme/live/{self.mail_domain}/privkey"
|
||||||
|
|
||||||
# deprecated option
|
# deprecated option
|
||||||
mbdir = params.get("mailboxes_dir", f"/home/vmail/mail/{self.mail_domain}")
|
mbdir = params.get("mailboxes_dir", f"/home/vmail/mail/{self.mail_domain}")
|
||||||
self.mailboxes_dir = Path(mbdir.strip())
|
self.mailboxes_dir = Path(mbdir.strip())
|
||||||
@@ -72,10 +100,7 @@ class Config:
|
|||||||
raise ValueError(f"invalid address {addr!r}")
|
raise ValueError(f"invalid address {addr!r}")
|
||||||
|
|
||||||
maildir = self.mailboxes_dir.joinpath(addr)
|
maildir = self.mailboxes_dir.joinpath(addr)
|
||||||
if addr.startswith("echo@"):
|
password_path = maildir.joinpath("password")
|
||||||
password_path = echobot_password_path
|
|
||||||
else:
|
|
||||||
password_path = maildir.joinpath("password")
|
|
||||||
|
|
||||||
return User(maildir, addr, password_path, uid="vmail", gid="vmail")
|
return User(maildir, addr, password_path, uid="vmail", gid="vmail")
|
||||||
|
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ class DictProxy:
|
|||||||
wfile.flush()
|
wfile.flush()
|
||||||
|
|
||||||
def handle_dovecot_request(self, msg, transactions):
|
def handle_dovecot_request(self, msg, transactions):
|
||||||
# see https://doc.dovecot.org/developer_manual/design/dict_protocol/#dovecot-dict-protocol
|
# see https://doc.dovecot.org/2.3/developer_manual/design/dict_protocol/#dovecot-dict-protocol
|
||||||
short_command = msg[0]
|
short_command = msg[0]
|
||||||
parts = msg[1:].split("\t")
|
parts = msg[1:].split("\t")
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,11 @@
|
|||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import re
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
import filelock
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import crypt_r
|
import crypt_r
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@@ -13,10 +16,11 @@ from .dictproxy import DictProxy
|
|||||||
from .migrate_db import migrate_from_db_to_maildir
|
from .migrate_db import migrate_from_db_to_maildir
|
||||||
|
|
||||||
NOCREATE_FILE = "/etc/chatmail-nocreate"
|
NOCREATE_FILE = "/etc/chatmail-nocreate"
|
||||||
|
VALID_LOCALPART_RE = re.compile(r"^[a-z0-9._-]+$")
|
||||||
|
|
||||||
|
|
||||||
def encrypt_password(password: str):
|
def encrypt_password(password: str):
|
||||||
# https://doc.dovecot.org/configuration_manual/authentication/password_schemes/
|
# https://doc.dovecot.org/2.3/configuration_manual/authentication/password_schemes/
|
||||||
passhash = crypt_r.crypt(password, crypt_r.METHOD_SHA512)
|
passhash = crypt_r.crypt(password, crypt_r.METHOD_SHA512)
|
||||||
return "{SHA512-CRYPT}" + passhash
|
return "{SHA512-CRYPT}" + passhash
|
||||||
|
|
||||||
@@ -40,10 +44,6 @@ def is_allowed_to_create(config: Config, user, cleartext_password) -> bool:
|
|||||||
return False
|
return False
|
||||||
localpart, domain = parts
|
localpart, domain = parts
|
||||||
|
|
||||||
if localpart == "echo":
|
|
||||||
# echobot account should not be created in the database
|
|
||||||
return False
|
|
||||||
|
|
||||||
if (
|
if (
|
||||||
len(localpart) > config.username_max_length
|
len(localpart) > config.username_max_length
|
||||||
or len(localpart) < config.username_min_length
|
or len(localpart) < config.username_min_length
|
||||||
@@ -56,6 +56,10 @@ def is_allowed_to_create(config: Config, user, cleartext_password) -> bool:
|
|||||||
)
|
)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
if not VALID_LOCALPART_RE.match(localpart):
|
||||||
|
logging.warning("localpart %r contains invalid characters", localpart)
|
||||||
|
return False
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
@@ -144,8 +148,13 @@ class AuthDictProxy(DictProxy):
|
|||||||
if not is_allowed_to_create(self.config, addr, cleartext_password):
|
if not is_allowed_to_create(self.config, addr, cleartext_password):
|
||||||
return
|
return
|
||||||
|
|
||||||
user.set_password(encrypt_password(cleartext_password))
|
lock = filelock.FileLock(str(user.password_path) + ".lock", timeout=5)
|
||||||
print(f"Created address: {addr}", file=sys.stderr)
|
with lock:
|
||||||
|
userdata = user.get_userdb_dict()
|
||||||
|
if userdata:
|
||||||
|
return userdata
|
||||||
|
user.set_password(encrypt_password(cleartext_password))
|
||||||
|
print(f"Created address: {addr}", file=sys.stderr)
|
||||||
return user.get_userdb_dict()
|
return user.get_userdb_dict()
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,109 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""Advanced echo bot example.
|
|
||||||
|
|
||||||
it will echo back any message that has non-empty text and also supports the /help command.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
from deltachat_rpc_client import Bot, DeltaChat, EventType, Rpc, events
|
|
||||||
|
|
||||||
from chatmaild.config import echobot_password_path, read_config
|
|
||||||
from chatmaild.doveauth import encrypt_password
|
|
||||||
from chatmaild.newemail import create_newemail_dict
|
|
||||||
|
|
||||||
hooks = events.HookCollection()
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.on(events.RawEvent)
|
|
||||||
def log_event(event):
|
|
||||||
if event.kind == EventType.INFO:
|
|
||||||
logging.info(event.msg)
|
|
||||||
elif event.kind == EventType.WARNING:
|
|
||||||
logging.warning(event.msg)
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.on(events.RawEvent(EventType.ERROR))
|
|
||||||
def log_error(event):
|
|
||||||
logging.error("%s", event.msg)
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.on(events.MemberListChanged)
|
|
||||||
def on_memberlist_changed(event):
|
|
||||||
logging.info(
|
|
||||||
"member %s was %s", event.member, "added" if event.member_added else "removed"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.on(events.GroupImageChanged)
|
|
||||||
def on_group_image_changed(event):
|
|
||||||
logging.info("group image %s", "deleted" if event.image_deleted else "changed")
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.on(events.GroupNameChanged)
|
|
||||||
def on_group_name_changed(event):
|
|
||||||
logging.info(f"group name changed, old name: {event.old_name}")
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.on(events.NewMessage(func=lambda e: not e.command))
|
|
||||||
def echo(event):
|
|
||||||
snapshot = event.message_snapshot
|
|
||||||
if snapshot.is_info:
|
|
||||||
# Ignore info messages
|
|
||||||
return
|
|
||||||
if snapshot.text or snapshot.file:
|
|
||||||
snapshot.chat.send_message(text=snapshot.text, file=snapshot.file)
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.on(events.NewMessage(command="/help"))
|
|
||||||
def help_command(event):
|
|
||||||
snapshot = event.message_snapshot
|
|
||||||
snapshot.chat.send_text("Send me any message and I will echo it back")
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
|
||||||
path = os.environ.get("PATH")
|
|
||||||
venv_path = sys.argv[0].strip("echobot")
|
|
||||||
os.environ["PATH"] = path + ":" + venv_path
|
|
||||||
with Rpc() as rpc:
|
|
||||||
deltachat = DeltaChat(rpc)
|
|
||||||
system_info = deltachat.get_system_info()
|
|
||||||
logging.info(f"Running deltachat core {system_info.deltachat_core_version}")
|
|
||||||
|
|
||||||
accounts = deltachat.get_all_accounts()
|
|
||||||
account = accounts[0] if accounts else deltachat.add_account()
|
|
||||||
|
|
||||||
bot = Bot(account, hooks)
|
|
||||||
|
|
||||||
config = read_config(sys.argv[1])
|
|
||||||
addr = "echo@" + config.mail_domain
|
|
||||||
|
|
||||||
# Create password file
|
|
||||||
if bot.is_configured():
|
|
||||||
password = bot.account.get_config("mail_pw")
|
|
||||||
else:
|
|
||||||
password = create_newemail_dict(config)["password"]
|
|
||||||
|
|
||||||
echobot_password_path.write_text(encrypt_password(password))
|
|
||||||
# Give the user which doveauth runs as access to the password file.
|
|
||||||
subprocess.check_call(
|
|
||||||
["/usr/bin/setfacl", "-m", "user:vmail:r", echobot_password_path],
|
|
||||||
)
|
|
||||||
|
|
||||||
if not bot.is_configured():
|
|
||||||
bot.configure(addr, password)
|
|
||||||
|
|
||||||
# write invite link to working directory
|
|
||||||
invitelink = bot.account.get_qr_code()
|
|
||||||
Path("invite-link.txt").write_text(invitelink)
|
|
||||||
|
|
||||||
bot.run_forever()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -14,7 +14,7 @@ from stat import S_ISREG
|
|||||||
|
|
||||||
from chatmaild.config import read_config
|
from chatmaild.config import read_config
|
||||||
|
|
||||||
FileEntry = namedtuple("FileEntry", ("relpath", "mtime", "size"))
|
FileEntry = namedtuple("FileEntry", ("path", "mtime", "size"))
|
||||||
|
|
||||||
|
|
||||||
def iter_mailboxes(basedir, maxnum):
|
def iter_mailboxes(basedir, maxnum):
|
||||||
@@ -51,33 +51,27 @@ class MailboxStat:
|
|||||||
|
|
||||||
def __init__(self, basedir):
|
def __init__(self, basedir):
|
||||||
self.basedir = str(basedir)
|
self.basedir = str(basedir)
|
||||||
# all detected messages in cur/new/tmp folders
|
|
||||||
self.messages = []
|
self.messages = []
|
||||||
|
|
||||||
# all detected files in mailbox top dir
|
|
||||||
self.extrafiles = []
|
self.extrafiles = []
|
||||||
|
self.scandir(self.basedir)
|
||||||
|
|
||||||
# scan all relevant files (without recursion)
|
def scandir(self, folderdir):
|
||||||
old_cwd = os.getcwd()
|
for name in os_listdir_if_exists(folderdir):
|
||||||
try:
|
path = f"{folderdir}/{name}"
|
||||||
os.chdir(self.basedir)
|
|
||||||
except FileNotFoundError:
|
|
||||||
return
|
|
||||||
for name in os_listdir_if_exists("."):
|
|
||||||
if name in ("cur", "new", "tmp"):
|
if name in ("cur", "new", "tmp"):
|
||||||
for msg_name in os_listdir_if_exists(name):
|
for msg_name in os_listdir_if_exists(path):
|
||||||
entry = get_file_entry(f"{name}/{msg_name}")
|
entry = get_file_entry(f"{path}/{msg_name}")
|
||||||
if entry is not None:
|
if entry is not None:
|
||||||
self.messages.append(entry)
|
self.messages.append(entry)
|
||||||
|
elif os.path.isdir(path):
|
||||||
|
self.scandir(path)
|
||||||
else:
|
else:
|
||||||
entry = get_file_entry(name)
|
entry = get_file_entry(path)
|
||||||
if entry is not None:
|
if entry is not None:
|
||||||
self.extrafiles.append(entry)
|
self.extrafiles.append(entry)
|
||||||
if name == "password":
|
if name == "password":
|
||||||
self.last_login = entry.mtime
|
self.last_login = entry.mtime
|
||||||
self.extrafiles.sort(key=lambda x: -x.size)
|
self.extrafiles.sort(key=lambda x: -x.size)
|
||||||
os.chdir(old_cwd)
|
|
||||||
|
|
||||||
|
|
||||||
def print_info(msg):
|
def print_info(msg):
|
||||||
@@ -130,13 +124,6 @@ class Expiry:
|
|||||||
self.remove_mailbox(mbox.basedir)
|
self.remove_mailbox(mbox.basedir)
|
||||||
return
|
return
|
||||||
|
|
||||||
# all to-be-removed files are relative to the mailbox basedir
|
|
||||||
try:
|
|
||||||
os.chdir(mbox.basedir)
|
|
||||||
except FileNotFoundError:
|
|
||||||
print_info(f"mailbox not found/vanished {mbox.basedir}")
|
|
||||||
return
|
|
||||||
|
|
||||||
mboxname = os.path.basename(mbox.basedir)
|
mboxname = os.path.basename(mbox.basedir)
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
date = datetime.fromtimestamp(mbox.last_login) if mbox.last_login else None
|
date = datetime.fromtimestamp(mbox.last_login) if mbox.last_login else None
|
||||||
@@ -147,16 +134,17 @@ class Expiry:
|
|||||||
self.all_files += len(mbox.messages)
|
self.all_files += len(mbox.messages)
|
||||||
for message in mbox.messages:
|
for message in mbox.messages:
|
||||||
if message.mtime < cutoff_mails:
|
if message.mtime < cutoff_mails:
|
||||||
self.remove_file(message.relpath, mtime=message.mtime)
|
self.remove_file(message.path, mtime=message.mtime)
|
||||||
elif message.size > 200000 and message.mtime < cutoff_large_mails:
|
elif message.size > 200000 and message.mtime < cutoff_large_mails:
|
||||||
# we only remove noticed large files (not unnoticed ones in new/)
|
# we only remove noticed large files (not unnoticed ones in new/)
|
||||||
if message.relpath.startswith("cur/"):
|
parts = message.path.split("/")
|
||||||
self.remove_file(message.relpath, mtime=message.mtime)
|
if len(parts) >= 2 and parts[-2] == "cur":
|
||||||
|
self.remove_file(message.path, mtime=message.mtime)
|
||||||
else:
|
else:
|
||||||
continue
|
continue
|
||||||
changed = True
|
changed = True
|
||||||
if changed:
|
if changed:
|
||||||
self.remove_file("maildirsize")
|
self.remove_file(f"{mbox.basedir}/maildirsize")
|
||||||
|
|
||||||
def get_summary(self):
|
def get_summary(self):
|
||||||
return (
|
return (
|
||||||
|
|||||||
@@ -1,381 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
import asyncio
|
|
||||||
import base64
|
|
||||||
import binascii
|
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
from email import policy
|
|
||||||
from email.parser import BytesParser
|
|
||||||
from email.utils import parseaddr
|
|
||||||
from smtplib import SMTP as SMTPClient
|
|
||||||
|
|
||||||
from aiosmtpd.controller import Controller
|
|
||||||
from aiosmtpd.smtp import SMTP
|
|
||||||
|
|
||||||
from .config import read_config
|
|
||||||
|
|
||||||
ENCRYPTION_NEEDED_523 = "523 Encryption Needed: Invalid Unencrypted Mail"
|
|
||||||
|
|
||||||
|
|
||||||
def check_openpgp_payload(payload: bytes):
|
|
||||||
"""Checks the OpenPGP payload.
|
|
||||||
|
|
||||||
OpenPGP payload must consist only of PKESK and SKESK packets
|
|
||||||
terminated by a single SEIPD packet.
|
|
||||||
|
|
||||||
Returns True if OpenPGP payload is correct,
|
|
||||||
False otherwise.
|
|
||||||
|
|
||||||
May raise IndexError while trying to read OpenPGP packet header
|
|
||||||
if it is truncated.
|
|
||||||
"""
|
|
||||||
i = 0
|
|
||||||
while i < len(payload):
|
|
||||||
# Only OpenPGP format is allowed.
|
|
||||||
if payload[i] & 0xC0 != 0xC0:
|
|
||||||
return False
|
|
||||||
|
|
||||||
packet_type_id = payload[i] & 0x3F
|
|
||||||
i += 1
|
|
||||||
|
|
||||||
while payload[i] >= 224 and payload[i] < 255:
|
|
||||||
# Partial body length.
|
|
||||||
partial_length = 1 << (payload[i] & 0x1F)
|
|
||||||
i += 1 + partial_length
|
|
||||||
|
|
||||||
if payload[i] < 192:
|
|
||||||
# One-octet length.
|
|
||||||
body_len = payload[i]
|
|
||||||
i += 1
|
|
||||||
elif payload[i] < 224:
|
|
||||||
# Two-octet length.
|
|
||||||
body_len = ((payload[i] - 192) << 8) + payload[i + 1] + 192
|
|
||||||
i += 2
|
|
||||||
elif payload[i] == 255:
|
|
||||||
# Five-octet length.
|
|
||||||
body_len = (
|
|
||||||
(payload[i + 1] << 24)
|
|
||||||
| (payload[i + 2] << 16)
|
|
||||||
| (payload[i + 3] << 8)
|
|
||||||
| payload[i + 4]
|
|
||||||
)
|
|
||||||
i += 5
|
|
||||||
else:
|
|
||||||
# Impossible, partial body length was processed above.
|
|
||||||
return False
|
|
||||||
|
|
||||||
i += body_len
|
|
||||||
|
|
||||||
if i == len(payload):
|
|
||||||
# Last packet should be
|
|
||||||
# Symmetrically Encrypted and Integrity Protected Data Packet (SEIPD)
|
|
||||||
#
|
|
||||||
# This is the only place where this function may return `True`.
|
|
||||||
return packet_type_id == 18
|
|
||||||
elif packet_type_id not in [1, 3]:
|
|
||||||
# All packets except the last one must be either
|
|
||||||
# Public-Key Encrypted Session Key Packet (PKESK)
|
|
||||||
# or
|
|
||||||
# Symmetric-Key Encrypted Session Key Packet (SKESK)
|
|
||||||
return False
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def check_armored_payload(payload: str, outgoing: bool):
|
|
||||||
"""Check the armored PGP message for invalid content.
|
|
||||||
|
|
||||||
:param payload: the armored PGP message
|
|
||||||
:param outgoing: whether the message is outgoing or incoming
|
|
||||||
:return: whether the message is a valid PGP message
|
|
||||||
"""
|
|
||||||
prefix = "-----BEGIN PGP MESSAGE-----\r\n"
|
|
||||||
if not payload.startswith(prefix):
|
|
||||||
return False
|
|
||||||
payload = payload.removeprefix(prefix)
|
|
||||||
|
|
||||||
while payload.endswith("\r\n"):
|
|
||||||
payload = payload.removesuffix("\r\n")
|
|
||||||
suffix = "-----END PGP MESSAGE-----"
|
|
||||||
if not payload.endswith(suffix):
|
|
||||||
return False
|
|
||||||
payload = payload.removesuffix(suffix)
|
|
||||||
|
|
||||||
version_comment = "Version: "
|
|
||||||
if payload.startswith(version_comment):
|
|
||||||
if outgoing: # Disallow comments in outgoing messages
|
|
||||||
return False
|
|
||||||
# Remove comments from incoming messages
|
|
||||||
payload = payload.partition("\r\n")[2]
|
|
||||||
|
|
||||||
while payload.startswith("\r\n"):
|
|
||||||
payload = payload.removeprefix("\r\n")
|
|
||||||
|
|
||||||
# Remove CRC24.
|
|
||||||
payload = payload.rpartition("=")[0]
|
|
||||||
|
|
||||||
try:
|
|
||||||
payload = base64.b64decode(payload)
|
|
||||||
except binascii.Error:
|
|
||||||
return False
|
|
||||||
|
|
||||||
try:
|
|
||||||
return check_openpgp_payload(payload)
|
|
||||||
except IndexError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def is_securejoin(message):
|
|
||||||
if message.get("secure-join") not in ["vc-request", "vg-request"]:
|
|
||||||
return False
|
|
||||||
if not message.is_multipart():
|
|
||||||
return False
|
|
||||||
parts_count = 0
|
|
||||||
for part in message.iter_parts():
|
|
||||||
parts_count += 1
|
|
||||||
if parts_count > 1:
|
|
||||||
return False
|
|
||||||
if part.is_multipart():
|
|
||||||
return False
|
|
||||||
if part.get_content_type() != "text/plain":
|
|
||||||
return False
|
|
||||||
|
|
||||||
payload = part.get_payload().strip().lower()
|
|
||||||
if payload not in ("secure-join: vc-request", "secure-join: vg-request"):
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def check_encrypted(message, outgoing=True):
|
|
||||||
"""Check that the message is an OpenPGP-encrypted message.
|
|
||||||
|
|
||||||
MIME structure of the message must correspond to <https://www.rfc-editor.org/rfc/rfc3156>.
|
|
||||||
"""
|
|
||||||
if not message.is_multipart():
|
|
||||||
return False
|
|
||||||
if message.get_content_type() != "multipart/encrypted":
|
|
||||||
return False
|
|
||||||
parts_count = 0
|
|
||||||
for part in message.iter_parts():
|
|
||||||
# We explicitly check Content-Type of each part later,
|
|
||||||
# but this is to be absolutely sure `get_payload()` returns string and not list.
|
|
||||||
if part.is_multipart():
|
|
||||||
return False
|
|
||||||
|
|
||||||
if parts_count == 0:
|
|
||||||
if part.get_content_type() != "application/pgp-encrypted":
|
|
||||||
return False
|
|
||||||
|
|
||||||
payload = part.get_payload()
|
|
||||||
if payload.strip() != "Version: 1":
|
|
||||||
return False
|
|
||||||
elif parts_count == 1:
|
|
||||||
if part.get_content_type() != "application/octet-stream":
|
|
||||||
return False
|
|
||||||
|
|
||||||
if not check_armored_payload(part.get_payload(), outgoing=outgoing):
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
parts_count += 1
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
async def asyncmain_beforequeue(config, mode):
|
|
||||||
if mode == "outgoing":
|
|
||||||
port = config.filtermail_smtp_port
|
|
||||||
handler = OutgoingBeforeQueueHandler(config)
|
|
||||||
else:
|
|
||||||
port = config.filtermail_smtp_port_incoming
|
|
||||||
handler = IncomingBeforeQueueHandler(config)
|
|
||||||
HackedController(
|
|
||||||
handler,
|
|
||||||
hostname="127.0.0.1",
|
|
||||||
port=port,
|
|
||||||
data_size_limit=config.max_message_size,
|
|
||||||
).start()
|
|
||||||
|
|
||||||
|
|
||||||
def recipient_matches_passthrough(recipient, passthrough_recipients):
|
|
||||||
for addr in passthrough_recipients:
|
|
||||||
if recipient == addr:
|
|
||||||
return True
|
|
||||||
if addr[0] == "@" and recipient.endswith(addr):
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class HackedController(Controller):
|
|
||||||
def factory(self):
|
|
||||||
return SMTPDiscardRCPTO_options(self.handler, **self.SMTP_kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
class SMTPDiscardRCPTO_options(SMTP):
|
|
||||||
def _getparams(self, params):
|
|
||||||
# Ignore RCPT TO parameters.
|
|
||||||
#
|
|
||||||
# Otherwise parameters such as `ORCPT=...`
|
|
||||||
# or `NOTIFY=DELAY,FAILURE` (generated by Stalwart)
|
|
||||||
# make aiosmtpd reject the message here:
|
|
||||||
# <https://github.com/aio-libs/aiosmtpd/blob/98f578389ae86e5345cc343fa4e5a17b21d9c96d/aiosmtpd/smtp.py#L1379-L1384>
|
|
||||||
return {}
|
|
||||||
|
|
||||||
|
|
||||||
class OutgoingBeforeQueueHandler:
|
|
||||||
def __init__(self, config):
|
|
||||||
self.config = config
|
|
||||||
self.send_rate_limiter = SendRateLimiter()
|
|
||||||
|
|
||||||
async def handle_MAIL(self, server, session, envelope, address, mail_options):
|
|
||||||
log_info(f"handle_MAIL from {address}")
|
|
||||||
envelope.mail_from = address
|
|
||||||
max_sent = self.config.max_user_send_per_minute
|
|
||||||
if not self.send_rate_limiter.is_sending_allowed(address, max_sent):
|
|
||||||
return f"450 4.7.1: Too much mail from {address}"
|
|
||||||
|
|
||||||
parts = envelope.mail_from.split("@")
|
|
||||||
if len(parts) != 2:
|
|
||||||
return f"500 Invalid from address <{envelope.mail_from!r}>"
|
|
||||||
|
|
||||||
return "250 OK"
|
|
||||||
|
|
||||||
async def handle_DATA(self, server, session, envelope):
|
|
||||||
loop = asyncio.get_running_loop()
|
|
||||||
return await loop.run_in_executor(None, self.sync_handle_DATA, envelope)
|
|
||||||
|
|
||||||
def sync_handle_DATA(self, envelope):
|
|
||||||
log_info("handle_DATA before-queue")
|
|
||||||
error = self.check_DATA(envelope)
|
|
||||||
if error:
|
|
||||||
return error
|
|
||||||
log_info("re-injecting the mail that passed checks")
|
|
||||||
client = SMTPClient("localhost", self.config.postfix_reinject_port)
|
|
||||||
client.sendmail(
|
|
||||||
envelope.mail_from, envelope.rcpt_tos, envelope.original_content
|
|
||||||
)
|
|
||||||
return "250 OK"
|
|
||||||
|
|
||||||
def check_DATA(self, envelope):
|
|
||||||
"""the central filtering function for e-mails."""
|
|
||||||
log_info(f"Processing DATA message from {envelope.mail_from}")
|
|
||||||
|
|
||||||
message = BytesParser(policy=policy.default).parsebytes(envelope.content)
|
|
||||||
mail_encrypted = check_encrypted(message, outgoing=True)
|
|
||||||
|
|
||||||
_, from_addr = parseaddr(message.get("from").strip())
|
|
||||||
|
|
||||||
if envelope.mail_from.lower() != from_addr.lower():
|
|
||||||
return f"500 Invalid FROM <{from_addr!r}> for <{envelope.mail_from!r}>"
|
|
||||||
|
|
||||||
if mail_encrypted or is_securejoin(message):
|
|
||||||
print("Outgoing: Filtering encrypted mail.", file=sys.stderr)
|
|
||||||
return
|
|
||||||
|
|
||||||
print("Outgoing: Filtering unencrypted mail.", file=sys.stderr)
|
|
||||||
|
|
||||||
if envelope.mail_from in self.config.passthrough_senders:
|
|
||||||
return
|
|
||||||
|
|
||||||
# allow self-sent Autocrypt Setup Message
|
|
||||||
if envelope.rcpt_tos == [from_addr]:
|
|
||||||
if message.get("subject") == "Autocrypt Setup Message":
|
|
||||||
if message.get_content_type() == "multipart/mixed":
|
|
||||||
return
|
|
||||||
|
|
||||||
passthrough_recipients = self.config.passthrough_recipients
|
|
||||||
|
|
||||||
for recipient in envelope.rcpt_tos:
|
|
||||||
if recipient_matches_passthrough(recipient, passthrough_recipients):
|
|
||||||
continue
|
|
||||||
|
|
||||||
print("Rejected unencrypted mail.", file=sys.stderr)
|
|
||||||
return ENCRYPTION_NEEDED_523
|
|
||||||
|
|
||||||
|
|
||||||
class IncomingBeforeQueueHandler:
|
|
||||||
def __init__(self, config):
|
|
||||||
self.config = config
|
|
||||||
|
|
||||||
async def handle_DATA(self, server, session, envelope):
|
|
||||||
loop = asyncio.get_running_loop()
|
|
||||||
return await loop.run_in_executor(None, self.sync_handle_DATA, envelope)
|
|
||||||
|
|
||||||
def sync_handle_DATA(self, envelope):
|
|
||||||
log_info("handle_DATA before-queue")
|
|
||||||
error = self.check_DATA(envelope)
|
|
||||||
if error:
|
|
||||||
return error
|
|
||||||
log_info("re-injecting the mail that passed checks")
|
|
||||||
|
|
||||||
# the smtp daemon on reinject_port_incoming gives it to dkim milter
|
|
||||||
# which looks at source address to determine whether to verify or sign
|
|
||||||
client = SMTPClient(
|
|
||||||
"localhost",
|
|
||||||
self.config.postfix_reinject_port_incoming,
|
|
||||||
source_address=("127.0.0.2", 0),
|
|
||||||
)
|
|
||||||
client.sendmail(
|
|
||||||
envelope.mail_from, envelope.rcpt_tos, envelope.original_content
|
|
||||||
)
|
|
||||||
return "250 OK"
|
|
||||||
|
|
||||||
def check_DATA(self, envelope):
|
|
||||||
"""the central filtering function for e-mails."""
|
|
||||||
log_info(f"Processing DATA message from {envelope.mail_from}")
|
|
||||||
|
|
||||||
message = BytesParser(policy=policy.default).parsebytes(envelope.content)
|
|
||||||
mail_encrypted = check_encrypted(message, outgoing=False)
|
|
||||||
|
|
||||||
if mail_encrypted or is_securejoin(message):
|
|
||||||
print("Incoming: Filtering encrypted mail.", file=sys.stderr)
|
|
||||||
return
|
|
||||||
|
|
||||||
print("Incoming: Filtering unencrypted mail.", file=sys.stderr)
|
|
||||||
|
|
||||||
# we want cleartext mailer-daemon messages to pass through
|
|
||||||
# chatmail core will typically not display them as normal messages
|
|
||||||
if message.get("auto-submitted"):
|
|
||||||
_, from_addr = parseaddr(message.get("from").strip())
|
|
||||||
if from_addr.lower().startswith("mailer-daemon@"):
|
|
||||||
if message.get_content_type() == "multipart/report":
|
|
||||||
return
|
|
||||||
|
|
||||||
for recipient in envelope.rcpt_tos:
|
|
||||||
user = self.config.get_user(recipient)
|
|
||||||
if user is None or user.is_incoming_cleartext_ok():
|
|
||||||
continue
|
|
||||||
|
|
||||||
print("Rejected unencrypted mail.", file=sys.stderr)
|
|
||||||
return ENCRYPTION_NEEDED_523
|
|
||||||
|
|
||||||
|
|
||||||
class SendRateLimiter:
|
|
||||||
def __init__(self):
|
|
||||||
self.addr2timestamps = {}
|
|
||||||
|
|
||||||
def is_sending_allowed(self, mail_from, max_send_per_minute):
|
|
||||||
last = self.addr2timestamps.setdefault(mail_from, [])
|
|
||||||
now = time.time()
|
|
||||||
last[:] = [ts for ts in last if ts >= (now - 60)]
|
|
||||||
if len(last) <= max_send_per_minute:
|
|
||||||
last.append(now)
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def log_info(msg):
|
|
||||||
print(msg, file=sys.stderr)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
args = sys.argv[1:]
|
|
||||||
assert len(args) == 2
|
|
||||||
config = read_config(args[0])
|
|
||||||
mode = args[1]
|
|
||||||
loop = asyncio.new_event_loop()
|
|
||||||
asyncio.set_event_loop(loop)
|
|
||||||
assert mode in ["incoming", "outgoing"]
|
|
||||||
task = asyncmain_beforequeue(config, mode)
|
|
||||||
loop.create_task(task)
|
|
||||||
log_info("entering serving loop")
|
|
||||||
loop.run_forever()
|
|
||||||
@@ -13,9 +13,20 @@ to show storage summaries only for first 1000 mailboxes
|
|||||||
|
|
||||||
python -m chatmaild.fsreport /path/to/chatmail.ini --maxnum 1000
|
python -m chatmaild.fsreport /path/to/chatmail.ini --maxnum 1000
|
||||||
|
|
||||||
|
to write Prometheus textfile for node_exporter
|
||||||
|
|
||||||
|
python -m chatmaild.fsreport --textfile /var/lib/prometheus/node-exporter/
|
||||||
|
|
||||||
|
writes to /var/lib/prometheus/node-exporter/fsreport.prom
|
||||||
|
|
||||||
|
to also write legacy metrics.py style output (default: /var/www/html/metrics):
|
||||||
|
|
||||||
|
python -m chatmaild.fsreport --textfile /var/lib/prometheus/node-exporter/ --legacy-metrics
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
import tempfile
|
||||||
from argparse import ArgumentParser
|
from argparse import ArgumentParser
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
@@ -48,7 +59,19 @@ class Report:
|
|||||||
self.num_ci_logins = self.num_all_logins = 0
|
self.num_ci_logins = self.num_all_logins = 0
|
||||||
self.login_buckets = {x: 0 for x in (1, 10, 30, 40, 80, 100, 150)}
|
self.login_buckets = {x: 0 for x in (1, 10, 30, 40, 80, 100, 150)}
|
||||||
|
|
||||||
self.message_buckets = {x: 0 for x in (0, 160000, 500000, 2000000)}
|
KiB = 1024
|
||||||
|
MiB = 1024 * KiB
|
||||||
|
self.message_size_thresholds = (
|
||||||
|
0,
|
||||||
|
100 * KiB,
|
||||||
|
MiB // 2,
|
||||||
|
1 * MiB,
|
||||||
|
2 * MiB,
|
||||||
|
5 * MiB,
|
||||||
|
10 * MiB,
|
||||||
|
)
|
||||||
|
self.message_buckets = {x: 0 for x in self.message_size_thresholds}
|
||||||
|
self.message_count_buckets = {x: 0 for x in self.message_size_thresholds}
|
||||||
|
|
||||||
def process_mailbox_stat(self, mailbox):
|
def process_mailbox_stat(self, mailbox):
|
||||||
# categorize login times
|
# categorize login times
|
||||||
@@ -68,9 +91,10 @@ class Report:
|
|||||||
for size in self.message_buckets:
|
for size in self.message_buckets:
|
||||||
for msg in mailbox.messages:
|
for msg in mailbox.messages:
|
||||||
if msg.size >= size:
|
if msg.size >= size:
|
||||||
if self.mdir and not msg.relpath.startswith(self.mdir):
|
if self.mdir and f"/{self.mdir}/" not in msg.path:
|
||||||
continue
|
continue
|
||||||
self.message_buckets[size] += msg.size
|
self.message_buckets[size] += msg.size
|
||||||
|
self.message_count_buckets[size] += 1
|
||||||
|
|
||||||
self.size_messages += sum(entry.size for entry in mailbox.messages)
|
self.size_messages += sum(entry.size for entry in mailbox.messages)
|
||||||
self.size_extra += sum(entry.size for entry in mailbox.extrafiles)
|
self.size_extra += sum(entry.size for entry in mailbox.extrafiles)
|
||||||
@@ -93,9 +117,10 @@ class Report:
|
|||||||
|
|
||||||
pref = f"[{self.mdir}] " if self.mdir else ""
|
pref = f"[{self.mdir}] " if self.mdir else ""
|
||||||
for minsize, sumsize in self.message_buckets.items():
|
for minsize, sumsize in self.message_buckets.items():
|
||||||
|
count = self.message_count_buckets[minsize]
|
||||||
percent = (sumsize / all_messages * 100) if all_messages else 0
|
percent = (sumsize / all_messages * 100) if all_messages else 0
|
||||||
print(
|
print(
|
||||||
f"{pref}larger than {HSize(minsize)}: {HSize(sumsize)} ({percent:.2f}%)"
|
f"{pref}larger than {HSize(minsize)}: {HSize(sumsize)} ({percent:.2f}%), {count} msgs"
|
||||||
)
|
)
|
||||||
|
|
||||||
user_logins = self.num_all_logins - self.num_ci_logins
|
user_logins = self.num_all_logins - self.num_ci_logins
|
||||||
@@ -111,6 +136,75 @@ class Report:
|
|||||||
for days, active in self.login_buckets.items():
|
for days, active in self.login_buckets.items():
|
||||||
print(f"last {days:3} days: {HSize(active)} {p(active)}")
|
print(f"last {days:3} days: {HSize(active)} {p(active)}")
|
||||||
|
|
||||||
|
def _write_atomic(self, filepath, content):
|
||||||
|
"""Atomically write content to filepath via tmp+rename."""
|
||||||
|
dirpath = os.path.dirname(os.path.abspath(filepath))
|
||||||
|
fd, tmppath = tempfile.mkstemp(dir=dirpath, suffix=".tmp")
|
||||||
|
try:
|
||||||
|
with os.fdopen(fd, "w") as f:
|
||||||
|
f.write(content)
|
||||||
|
os.chmod(tmppath, 0o644)
|
||||||
|
os.rename(tmppath, filepath)
|
||||||
|
except BaseException:
|
||||||
|
try:
|
||||||
|
os.unlink(tmppath)
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
raise
|
||||||
|
|
||||||
|
def dump_textfile(self, filepath):
|
||||||
|
"""Dump metrics in Prometheus exposition format."""
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
lines.append("# HELP chatmail_storage_bytes Mailbox storage in bytes.")
|
||||||
|
lines.append("# TYPE chatmail_storage_bytes gauge")
|
||||||
|
lines.append(f'chatmail_storage_bytes{{kind="messages"}} {self.size_messages}')
|
||||||
|
lines.append(f'chatmail_storage_bytes{{kind="extra"}} {self.size_extra}')
|
||||||
|
total = self.size_extra + self.size_messages
|
||||||
|
lines.append(f'chatmail_storage_bytes{{kind="total"}} {total}')
|
||||||
|
|
||||||
|
lines.append("# HELP chatmail_messages_bytes Sum of msg bytes >= threshold.")
|
||||||
|
lines.append("# TYPE chatmail_messages_bytes gauge")
|
||||||
|
for minsize, sumsize in self.message_buckets.items():
|
||||||
|
lines.append(f'chatmail_messages_bytes{{min_size="{minsize}"}} {sumsize}')
|
||||||
|
|
||||||
|
lines.append("# HELP chatmail_messages_count Number of msgs >= size threshold.")
|
||||||
|
lines.append("# TYPE chatmail_messages_count gauge")
|
||||||
|
for minsize, count in self.message_count_buckets.items():
|
||||||
|
lines.append(f'chatmail_messages_count{{min_size="{minsize}"}} {count}')
|
||||||
|
|
||||||
|
lines.append("# HELP chatmail_accounts Number of accounts.")
|
||||||
|
lines.append("# TYPE chatmail_accounts gauge")
|
||||||
|
user_logins = self.num_all_logins - self.num_ci_logins
|
||||||
|
lines.append(f'chatmail_accounts{{kind="all"}} {self.num_all_logins}')
|
||||||
|
lines.append(f'chatmail_accounts{{kind="ci"}} {self.num_ci_logins}')
|
||||||
|
lines.append(f'chatmail_accounts{{kind="user"}} {user_logins}')
|
||||||
|
|
||||||
|
lines.append(
|
||||||
|
"# HELP chatmail_accounts_active Non-CI accounts active within N days."
|
||||||
|
)
|
||||||
|
lines.append("# TYPE chatmail_accounts_active gauge")
|
||||||
|
for days, active in self.login_buckets.items():
|
||||||
|
lines.append(f'chatmail_accounts_active{{days="{days}"}} {active}')
|
||||||
|
|
||||||
|
self._write_atomic(filepath, "\n".join(lines) + "\n")
|
||||||
|
|
||||||
|
def dump_compat_textfile(self, filepath):
|
||||||
|
"""Dump legacy metrics.py style metrics."""
|
||||||
|
user_logins = self.num_all_logins - self.num_ci_logins
|
||||||
|
lines = [
|
||||||
|
"# HELP total number of accounts",
|
||||||
|
"# TYPE accounts gauge",
|
||||||
|
f"accounts {self.num_all_logins}",
|
||||||
|
"# HELP number of CI accounts",
|
||||||
|
"# TYPE ci_accounts gauge",
|
||||||
|
f"ci_accounts {self.num_ci_logins}",
|
||||||
|
"# HELP number of non-CI accounts",
|
||||||
|
"# TYPE nonci_accounts gauge",
|
||||||
|
f"nonci_accounts {user_logins}",
|
||||||
|
]
|
||||||
|
self._write_atomic(filepath, "\n".join(lines) + "\n")
|
||||||
|
|
||||||
|
|
||||||
def main(args=None):
|
def main(args=None):
|
||||||
"""Report about filesystem storage usage of all mailboxes and messages"""
|
"""Report about filesystem storage usage of all mailboxes and messages"""
|
||||||
@@ -127,19 +221,21 @@ def main(args=None):
|
|||||||
"--days",
|
"--days",
|
||||||
default=0,
|
default=0,
|
||||||
action="store",
|
action="store",
|
||||||
help="assume date to be days older than now",
|
help="assume date to be DAYS older than now",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--min-login-age",
|
"--min-login-age",
|
||||||
default=0,
|
default=0,
|
||||||
|
metavar="DAYS",
|
||||||
dest="min_login_age",
|
dest="min_login_age",
|
||||||
action="store",
|
action="store",
|
||||||
help="only sum up message size if last login is at least min-login-age days old",
|
help="only sum up message size if last login is at least DAYS days old",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--mdir",
|
"--mdir",
|
||||||
|
metavar="{cur,new,tmp}",
|
||||||
action="store",
|
action="store",
|
||||||
help="only consider 'cur' or 'new' or 'tmp' messages for summary",
|
help="only consider messages in specified Maildir subdirectory for summary",
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
@@ -148,6 +244,21 @@ def main(args=None):
|
|||||||
action="store",
|
action="store",
|
||||||
help="maximum number of mailboxes to iterate on",
|
help="maximum number of mailboxes to iterate on",
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--textfile",
|
||||||
|
metavar="PATH",
|
||||||
|
default=None,
|
||||||
|
help="write Prometheus textfile to PATH (directory or file); "
|
||||||
|
"if PATH is a directory, writes 'fsreport.prom' inside it",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--legacy-metrics",
|
||||||
|
metavar="FILENAME",
|
||||||
|
nargs="?",
|
||||||
|
const="/var/www/html/metrics",
|
||||||
|
default=None,
|
||||||
|
help="write legacy metrics.py textfile (default: /var/www/html/metrics)",
|
||||||
|
)
|
||||||
|
|
||||||
args = parser.parse_args(args)
|
args = parser.parse_args(args)
|
||||||
|
|
||||||
@@ -161,7 +272,15 @@ def main(args=None):
|
|||||||
rep = Report(now=now, min_login_age=int(args.min_login_age), mdir=args.mdir)
|
rep = Report(now=now, min_login_age=int(args.min_login_age), mdir=args.mdir)
|
||||||
for mbox in iter_mailboxes(str(config.mailboxes_dir), maxnum=maxnum):
|
for mbox in iter_mailboxes(str(config.mailboxes_dir), maxnum=maxnum):
|
||||||
rep.process_mailbox_stat(mbox)
|
rep.process_mailbox_stat(mbox)
|
||||||
rep.dump_summary()
|
if args.textfile:
|
||||||
|
path = args.textfile
|
||||||
|
if os.path.isdir(path):
|
||||||
|
path = os.path.join(path, "fsreport.prom")
|
||||||
|
rep.dump_textfile(path)
|
||||||
|
if args.legacy_metrics:
|
||||||
|
rep.dump_compat_textfile(args.legacy_metrics)
|
||||||
|
if not args.textfile and not args.legacy_metrics:
|
||||||
|
rep.dump_summary()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|||||||
@@ -11,11 +11,14 @@ mail_domain = {mail_domain}
|
|||||||
# Restrictions on user addresses
|
# Restrictions on user addresses
|
||||||
#
|
#
|
||||||
|
|
||||||
# how many mails a user can send out per minute
|
# email sending rate per user and minute
|
||||||
max_user_send_per_minute = 60
|
max_user_send_per_minute = 60
|
||||||
|
|
||||||
|
# per-user max burst size for sending rate limiting (GCRA bucket capacity)
|
||||||
|
max_user_send_burst_size = 10
|
||||||
|
|
||||||
# maximum mailbox size of a chatmail address
|
# maximum mailbox size of a chatmail address
|
||||||
max_mailbox_size = 100M
|
max_mailbox_size = 500M
|
||||||
|
|
||||||
# maximum message size for an e-mail in bytes
|
# maximum message size for an e-mail in bytes
|
||||||
max_message_size = 31457280
|
max_message_size = 31457280
|
||||||
@@ -43,9 +46,16 @@ passthrough_senders =
|
|||||||
|
|
||||||
# list of e-mail recipients for which to accept outbound un-encrypted mails
|
# list of e-mail recipients for which to accept outbound un-encrypted mails
|
||||||
# (space-separated, item may start with "@" to whitelist whole recipient domains)
|
# (space-separated, item may start with "@" to whitelist whole recipient domains)
|
||||||
passthrough_recipients = echo@{mail_domain}
|
passthrough_recipients =
|
||||||
|
|
||||||
# path to www directory - documented here: https://github.com/chatmail/relay/#custom-web-pages
|
# Use externally managed TLS certificates instead of built-in acmetool.
|
||||||
|
# Paths refer to files on the deployment server (not the build machine).
|
||||||
|
# Both files must already exist before running cmdeploy.
|
||||||
|
# Certificate renewal is your responsibility; changed files are
|
||||||
|
# picked up automatically by all relay services.
|
||||||
|
# tls_external_cert_and_key = /path/to/fullchain.pem /path/to/privkey.pem
|
||||||
|
|
||||||
|
# path to www directory - documented here: https://chatmail.at/doc/relay/getting_started.html#custom-web-pages
|
||||||
#www_folder = www
|
#www_folder = www
|
||||||
|
|
||||||
#
|
#
|
||||||
@@ -99,6 +109,12 @@ acme_email =
|
|||||||
# so use this option with caution on production servers.
|
# so use this option with caution on production servers.
|
||||||
imap_rawlog = false
|
imap_rawlog = false
|
||||||
|
|
||||||
|
# set to true if you want to enable the IMAP COMPRESS Extension,
|
||||||
|
# which allows IMAP connections to be efficiently compressed.
|
||||||
|
# WARNING: Enabling this makes it impossible to hibernate IMAP
|
||||||
|
# processes which will result in much higher memory/RAM usage.
|
||||||
|
imap_compress = false
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Privacy Policy
|
# Privacy Policy
|
||||||
|
|||||||
@@ -13,8 +13,6 @@ class LastLoginDictProxy(DictProxy):
|
|||||||
keyname = parts[1].split("/")
|
keyname = parts[1].split("/")
|
||||||
value = parts[2] if len(parts) > 2 else ""
|
value = parts[2] if len(parts) > 2 else ""
|
||||||
if keyname[0] == "shared" and keyname[1] == "last-login":
|
if keyname[0] == "shared" and keyname[1] == "last-login":
|
||||||
if addr.startswith("echo@"):
|
|
||||||
return True
|
|
||||||
addr = keyname[2]
|
addr = keyname[2]
|
||||||
timestamp = int(value)
|
timestamp = int(value)
|
||||||
user = self.config.get_user(addr)
|
user = self.config.get_user(addr)
|
||||||
|
|||||||
@@ -101,7 +101,11 @@ class MetadataDictProxy(DictProxy):
|
|||||||
# Handle `GETMETADATA "" /shared/vendor/deltachat/irohrelay`
|
# Handle `GETMETADATA "" /shared/vendor/deltachat/irohrelay`
|
||||||
return f"O{self.iroh_relay}\n"
|
return f"O{self.iroh_relay}\n"
|
||||||
elif keyname == "vendor/vendor.dovecot/pvt/server/vendor/deltachat/turn":
|
elif keyname == "vendor/vendor.dovecot/pvt/server/vendor/deltachat/turn":
|
||||||
res = turn_credentials()
|
try:
|
||||||
|
res = turn_credentials()
|
||||||
|
except Exception:
|
||||||
|
logging.exception("failed to get TURN credentials")
|
||||||
|
return "N\n"
|
||||||
port = 3478
|
port = 3478
|
||||||
return f"O{self.turn_hostname}:{port}:{res}\n"
|
return f"O{self.turn_hostname}:{port}:{res}\n"
|
||||||
|
|
||||||
|
|||||||
@@ -1,32 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
import sys
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
|
|
||||||
def main(vmail_dir=None):
|
|
||||||
if vmail_dir is None:
|
|
||||||
vmail_dir = sys.argv[1]
|
|
||||||
|
|
||||||
accounts = 0
|
|
||||||
ci_accounts = 0
|
|
||||||
|
|
||||||
for path in Path(vmail_dir).iterdir():
|
|
||||||
if not path.joinpath("cur").is_dir():
|
|
||||||
continue
|
|
||||||
accounts += 1
|
|
||||||
if path.name[:3] in ("ci-", "ac_"):
|
|
||||||
ci_accounts += 1
|
|
||||||
|
|
||||||
print("# HELP total number of accounts")
|
|
||||||
print("# TYPE accounts gauge")
|
|
||||||
print(f"accounts {accounts}")
|
|
||||||
print("# HELP number of CI accounts")
|
|
||||||
print("# TYPE ci_accounts gauge")
|
|
||||||
print(f"ci_accounts {ci_accounts}")
|
|
||||||
print("# HELP number of non-CI accounts")
|
|
||||||
print("# TYPE nonci_accounts gauge")
|
|
||||||
print(f"nonci_accounts {accounts - ci_accounts}")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -3,9 +3,9 @@
|
|||||||
"""CGI script for creating new accounts."""
|
"""CGI script for creating new accounts."""
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import random
|
|
||||||
import secrets
|
import secrets
|
||||||
import string
|
import string
|
||||||
|
from urllib.parse import quote
|
||||||
|
|
||||||
from chatmaild.config import Config, read_config
|
from chatmaild.config import Config, read_config
|
||||||
|
|
||||||
@@ -15,7 +15,9 @@ ALPHANUMERIC_PUNCT = string.ascii_letters + string.digits + string.punctuation
|
|||||||
|
|
||||||
|
|
||||||
def create_newemail_dict(config: Config):
|
def create_newemail_dict(config: Config):
|
||||||
user = "".join(random.choices(ALPHANUMERIC, k=config.username_max_length))
|
user = "".join(
|
||||||
|
secrets.choice(ALPHANUMERIC) for _ in range(config.username_max_length)
|
||||||
|
)
|
||||||
password = "".join(
|
password = "".join(
|
||||||
secrets.choice(ALPHANUMERIC_PUNCT)
|
secrets.choice(ALPHANUMERIC_PUNCT)
|
||||||
for _ in range(config.password_min_length + 3)
|
for _ in range(config.password_min_length + 3)
|
||||||
@@ -23,13 +25,26 @@ def create_newemail_dict(config: Config):
|
|||||||
return dict(email=f"{user}@{config.mail_domain}", password=f"{password}")
|
return dict(email=f"{user}@{config.mail_domain}", password=f"{password}")
|
||||||
|
|
||||||
|
|
||||||
|
def create_dclogin_url(email, password):
|
||||||
|
"""Build a dclogin: URL with credentials and self-signed cert acceptance.
|
||||||
|
|
||||||
|
Uses ic=3 (AcceptInvalidCertificates) so chatmail clients
|
||||||
|
can connect to servers with self-signed TLS certificates.
|
||||||
|
"""
|
||||||
|
return f"dclogin:{quote(email, safe='@')}?p={quote(password, safe='')}&v=1&ic=3"
|
||||||
|
|
||||||
|
|
||||||
def print_new_account():
|
def print_new_account():
|
||||||
config = read_config(CONFIG_PATH)
|
config = read_config(CONFIG_PATH)
|
||||||
creds = create_newemail_dict(config)
|
creds = create_newemail_dict(config)
|
||||||
|
|
||||||
|
result = dict(email=creds["email"], password=creds["password"])
|
||||||
|
if config.tls_cert_mode == "self":
|
||||||
|
result["dclogin_url"] = create_dclogin_url(creds["email"], creds["password"])
|
||||||
|
|
||||||
print("Content-Type: application/json")
|
print("Content-Type: application/json")
|
||||||
print("")
|
print("")
|
||||||
print(json.dumps(creds))
|
print(json.dumps(result))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ def test_read_config_testrun(make_config):
|
|||||||
assert config.filtermail_smtp_port == 10080
|
assert config.filtermail_smtp_port == 10080
|
||||||
assert config.postfix_reinject_port == 10025
|
assert config.postfix_reinject_port == 10025
|
||||||
assert config.max_user_send_per_minute == 60
|
assert config.max_user_send_per_minute == 60
|
||||||
assert config.max_mailbox_size == "100M"
|
assert config.max_mailbox_size == "500M"
|
||||||
assert config.delete_mails_after == "20"
|
assert config.delete_mails_after == "20"
|
||||||
assert config.delete_large_after == "7"
|
assert config.delete_large_after == "7"
|
||||||
assert config.username_min_length == 9
|
assert config.username_min_length == 9
|
||||||
@@ -73,3 +73,51 @@ def test_config_userstate_paths(make_config, tmp_path):
|
|||||||
def test_config_max_message_size(make_config, tmp_path):
|
def test_config_max_message_size(make_config, tmp_path):
|
||||||
config = make_config("something.testrun.org", dict(max_message_size="10000"))
|
config = make_config("something.testrun.org", dict(max_message_size="10000"))
|
||||||
assert config.max_message_size == 10000
|
assert config.max_message_size == 10000
|
||||||
|
|
||||||
|
|
||||||
|
def test_config_tls_default_acme(make_config):
|
||||||
|
config = make_config("chat.example.org")
|
||||||
|
assert config.tls_cert_mode == "acme"
|
||||||
|
assert config.tls_cert_path == "/var/lib/acme/live/chat.example.org/fullchain"
|
||||||
|
assert config.tls_key_path == "/var/lib/acme/live/chat.example.org/privkey"
|
||||||
|
|
||||||
|
|
||||||
|
def test_config_tls_self(make_config):
|
||||||
|
config = make_config("_test.example.org")
|
||||||
|
assert config.tls_cert_mode == "self"
|
||||||
|
assert config.tls_cert_path == "/etc/ssl/certs/mailserver.pem"
|
||||||
|
assert config.tls_key_path == "/etc/ssl/private/mailserver.key"
|
||||||
|
|
||||||
|
|
||||||
|
def test_config_tls_external(make_config):
|
||||||
|
config = make_config(
|
||||||
|
"chat.example.org",
|
||||||
|
{
|
||||||
|
"tls_external_cert_and_key": "/custom/fullchain.pem /custom/privkey.pem",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
assert config.tls_cert_mode == "external"
|
||||||
|
assert config.tls_cert_path == "/custom/fullchain.pem"
|
||||||
|
assert config.tls_key_path == "/custom/privkey.pem"
|
||||||
|
|
||||||
|
|
||||||
|
def test_config_tls_external_overrides_underscore(make_config):
|
||||||
|
config = make_config(
|
||||||
|
"_test.example.org",
|
||||||
|
{
|
||||||
|
"tls_external_cert_and_key": "/certs/fullchain.pem /certs/privkey.pem",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
assert config.tls_cert_mode == "external"
|
||||||
|
assert config.tls_cert_path == "/certs/fullchain.pem"
|
||||||
|
assert config.tls_key_path == "/certs/privkey.pem"
|
||||||
|
|
||||||
|
|
||||||
|
def test_config_tls_external_bad_format(make_config):
|
||||||
|
with pytest.raises(ValueError, match="two space-separated"):
|
||||||
|
make_config(
|
||||||
|
"chat.example.org",
|
||||||
|
{
|
||||||
|
"tls_external_cert_and_key": "/only/one/path.pem",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|||||||
@@ -120,6 +120,60 @@ def test_handle_dovecot_protocol_iterate(gencreds, example_config):
|
|||||||
assert not lines[2]
|
assert not lines[2]
|
||||||
|
|
||||||
|
|
||||||
|
def test_invalid_localpart_characters(make_config):
|
||||||
|
"""Test that is_allowed_to_create rejects localparts with invalid characters."""
|
||||||
|
config = make_config("chat.example.org", {"username_min_length": "3"})
|
||||||
|
password = "zequ0Aimuchoodaechik"
|
||||||
|
domain = config.mail_domain
|
||||||
|
|
||||||
|
# valid localparts
|
||||||
|
assert is_allowed_to_create(config, f"abc123@{domain}", password)
|
||||||
|
assert is_allowed_to_create(config, f"a.b-c_d@{domain}", password)
|
||||||
|
|
||||||
|
# uppercase rejected
|
||||||
|
assert not is_allowed_to_create(config, f"Abc123@{domain}", password)
|
||||||
|
assert not is_allowed_to_create(config, f"ABCDEFG@{domain}", password)
|
||||||
|
|
||||||
|
# spaces and special chars rejected
|
||||||
|
assert not is_allowed_to_create(config, f"a b cde@{domain}", password)
|
||||||
|
assert not is_allowed_to_create(config, f"abc+def@{domain}", password)
|
||||||
|
assert not is_allowed_to_create(config, f"abc!def@{domain}", password)
|
||||||
|
assert not is_allowed_to_create(config, f"ab@cdef@{domain}", password)
|
||||||
|
assert not is_allowed_to_create(config, f"abc/def@{domain}", password)
|
||||||
|
assert not is_allowed_to_create(config, f"abc\\def@{domain}", password)
|
||||||
|
|
||||||
|
|
||||||
|
def test_concurrent_creation_same_account(dictproxy):
|
||||||
|
"""Test that concurrent creation of the same account doesn't corrupt password."""
|
||||||
|
addr = "racetest1@chat.example.org"
|
||||||
|
password = "zequ0Aimuchoodaechik"
|
||||||
|
num_threads = 10
|
||||||
|
results = queue.Queue()
|
||||||
|
|
||||||
|
def create():
|
||||||
|
try:
|
||||||
|
res = dictproxy.lookup_passdb(addr, password)
|
||||||
|
results.put(("ok", res))
|
||||||
|
except Exception:
|
||||||
|
results.put(("err", traceback.format_exc()))
|
||||||
|
|
||||||
|
threads = [threading.Thread(target=create, daemon=True) for _ in range(num_threads)]
|
||||||
|
for t in threads:
|
||||||
|
t.start()
|
||||||
|
for t in threads:
|
||||||
|
t.join(timeout=10)
|
||||||
|
|
||||||
|
passwords_seen = set()
|
||||||
|
for _ in range(num_threads):
|
||||||
|
status, res = results.get()
|
||||||
|
if status == "err":
|
||||||
|
pytest.fail(f"concurrent creation failed\n{res}")
|
||||||
|
passwords_seen.add(res["password"])
|
||||||
|
|
||||||
|
# all threads must see the same password hash
|
||||||
|
assert len(passwords_seen) == 1
|
||||||
|
|
||||||
|
|
||||||
def test_50_concurrent_lookups_different_accounts(gencreds, dictproxy):
|
def test_50_concurrent_lookups_different_accounts(gencreds, dictproxy):
|
||||||
num_threads = 50
|
num_threads = 50
|
||||||
req_per_thread = 5
|
req_per_thread = 5
|
||||||
|
|||||||
@@ -17,19 +17,17 @@ from chatmaild.expire import main as expiry_main
|
|||||||
from chatmaild.fsreport import main as report_main
|
from chatmaild.fsreport import main as report_main
|
||||||
|
|
||||||
|
|
||||||
def fill_mbox(basedir):
|
def fill_mbox(folderdir):
|
||||||
basedir1 = basedir.joinpath("mailbox1@example.org")
|
password = folderdir.joinpath("password")
|
||||||
basedir1.mkdir()
|
|
||||||
password = basedir1.joinpath("password")
|
|
||||||
password.write_text("xxx")
|
password.write_text("xxx")
|
||||||
basedir1.joinpath("maildirsize").write_text("xxx")
|
folderdir.joinpath("maildirsize").write_text("xxx")
|
||||||
|
|
||||||
garbagedir = basedir1.joinpath("garbagedir")
|
garbagedir = folderdir.joinpath("garbagedir")
|
||||||
garbagedir.mkdir()
|
garbagedir.mkdir()
|
||||||
|
garbagedir.joinpath("bimbum").write_text("hello")
|
||||||
|
|
||||||
create_new_messages(basedir1, ["cur/msg1"], size=500)
|
create_new_messages(folderdir, ["cur/msg1"], size=500)
|
||||||
create_new_messages(basedir1, ["new/msg2"], size=600)
|
create_new_messages(folderdir, ["new/msg2"], size=600)
|
||||||
return basedir1
|
|
||||||
|
|
||||||
|
|
||||||
def create_new_messages(basedir, relpaths, size=1000, days=0):
|
def create_new_messages(basedir, relpaths, size=1000, days=0):
|
||||||
@@ -45,8 +43,21 @@ def create_new_messages(basedir, relpaths, size=1000, days=0):
|
|||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def mbox1(example_config):
|
def mbox1(example_config):
|
||||||
basedir1 = fill_mbox(example_config.mailboxes_dir)
|
mboxdir = example_config.mailboxes_dir.joinpath("mailbox1@example.org")
|
||||||
return MailboxStat(basedir1)
|
mboxdir.mkdir()
|
||||||
|
fill_mbox(mboxdir)
|
||||||
|
return MailboxStat(mboxdir)
|
||||||
|
|
||||||
|
|
||||||
|
def test_deltachat_folder(example_config):
|
||||||
|
"""Test old setups that might have a .DeltaChat folder where messages also need to get removed."""
|
||||||
|
mboxdir = example_config.mailboxes_dir.joinpath("mailbox1@example.org")
|
||||||
|
mboxdir.mkdir()
|
||||||
|
mbox2dir = mboxdir.joinpath(".DeltaChat")
|
||||||
|
mbox2dir.mkdir()
|
||||||
|
fill_mbox(mbox2dir)
|
||||||
|
mb = MailboxStat(mboxdir)
|
||||||
|
assert len(mb.messages) == 2
|
||||||
|
|
||||||
|
|
||||||
def test_filentry_ordering(tmp_path):
|
def test_filentry_ordering(tmp_path):
|
||||||
@@ -76,7 +87,7 @@ def test_stats_mailbox(mbox1):
|
|||||||
create_new_messages(mbox1.basedir, ["large-extra"], size=1000)
|
create_new_messages(mbox1.basedir, ["large-extra"], size=1000)
|
||||||
create_new_messages(mbox1.basedir, ["index-something"], size=3)
|
create_new_messages(mbox1.basedir, ["index-something"], size=3)
|
||||||
mbox2 = MailboxStat(mbox1.basedir)
|
mbox2 = MailboxStat(mbox1.basedir)
|
||||||
assert len(mbox2.extrafiles) == 4
|
assert len(mbox2.extrafiles) == 5
|
||||||
assert mbox2.extrafiles[0].size == 1000
|
assert mbox2.extrafiles[0].size == 1000
|
||||||
|
|
||||||
# cope well with mailbox dirs that have no password (for whatever reason)
|
# cope well with mailbox dirs that have no password (for whatever reason)
|
||||||
@@ -101,6 +112,43 @@ def test_report(mbox1, example_config):
|
|||||||
report_main(args)
|
report_main(args)
|
||||||
|
|
||||||
|
|
||||||
|
def test_report_mdir_filters_by_path(mbox1, example_config):
|
||||||
|
"""Test that Report with mdir='cur' only counts messages in cur/ subdirectory."""
|
||||||
|
from chatmaild.fsreport import Report
|
||||||
|
|
||||||
|
now = datetime.utcnow().timestamp()
|
||||||
|
|
||||||
|
# Set password mtime to old enough so min_login_age check passes
|
||||||
|
password = Path(mbox1.basedir).joinpath("password")
|
||||||
|
old_time = now - 86400 * 10 # 10 days ago
|
||||||
|
os.utime(password, (old_time, old_time))
|
||||||
|
|
||||||
|
# Reload mailbox with updated mtime
|
||||||
|
from chatmaild.expire import MailboxStat
|
||||||
|
|
||||||
|
mbox = MailboxStat(mbox1.basedir)
|
||||||
|
|
||||||
|
# Report without mdir — should count all messages
|
||||||
|
rep_all = Report(now=now, min_login_age=1, mdir=None)
|
||||||
|
rep_all.process_mailbox_stat(mbox)
|
||||||
|
total_all = rep_all.message_buckets[0]
|
||||||
|
|
||||||
|
# Report with mdir='cur' — should only count cur/ messages
|
||||||
|
rep_cur = Report(now=now, min_login_age=1, mdir="cur")
|
||||||
|
rep_cur.process_mailbox_stat(mbox)
|
||||||
|
total_cur = rep_cur.message_buckets[0]
|
||||||
|
|
||||||
|
# Report with mdir='new' — should only count new/ messages
|
||||||
|
rep_new = Report(now=now, min_login_age=1, mdir="new")
|
||||||
|
rep_new.process_mailbox_stat(mbox)
|
||||||
|
total_new = rep_new.message_buckets[0]
|
||||||
|
|
||||||
|
# cur has 500-byte msg, new has 600-byte msg (from fill_mbox)
|
||||||
|
assert total_cur == 500
|
||||||
|
assert total_new == 600
|
||||||
|
assert total_all == 500 + 600
|
||||||
|
|
||||||
|
|
||||||
def test_expiry_cli_basic(example_config, mbox1):
|
def test_expiry_cli_basic(example_config, mbox1):
|
||||||
args = (str(example_config._inipath),)
|
args = (str(example_config._inipath),)
|
||||||
expiry_main(args)
|
expiry_main(args)
|
||||||
|
|||||||
@@ -1,361 +0,0 @@
|
|||||||
import pytest
|
|
||||||
|
|
||||||
from chatmaild.filtermail import (
|
|
||||||
IncomingBeforeQueueHandler,
|
|
||||||
OutgoingBeforeQueueHandler,
|
|
||||||
SendRateLimiter,
|
|
||||||
check_armored_payload,
|
|
||||||
check_encrypted,
|
|
||||||
is_securejoin,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def maildomain():
|
|
||||||
# let's not depend on a real chatmail instance for the offline tests below
|
|
||||||
return "chatmail.example.org"
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def handler(make_config, maildomain):
|
|
||||||
config = make_config(maildomain)
|
|
||||||
return OutgoingBeforeQueueHandler(config)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def inhandler(make_config, maildomain):
|
|
||||||
config = make_config(maildomain)
|
|
||||||
return IncomingBeforeQueueHandler(config)
|
|
||||||
|
|
||||||
|
|
||||||
def test_reject_forged_from(maildata, gencreds, handler):
|
|
||||||
class env:
|
|
||||||
mail_from = gencreds()[0]
|
|
||||||
rcpt_tos = [gencreds()[0]]
|
|
||||||
|
|
||||||
# test that the filter lets good mail through
|
|
||||||
to_addr = gencreds()[0]
|
|
||||||
env.content = maildata(
|
|
||||||
"encrypted.eml", from_addr=env.mail_from, to_addr=to_addr
|
|
||||||
).as_bytes()
|
|
||||||
|
|
||||||
assert not handler.check_DATA(envelope=env)
|
|
||||||
|
|
||||||
# test that the filter rejects forged mail
|
|
||||||
env.content = maildata(
|
|
||||||
"encrypted.eml", from_addr="forged@c3.testrun.org", to_addr=to_addr
|
|
||||||
).as_bytes()
|
|
||||||
error = handler.check_DATA(envelope=env)
|
|
||||||
assert "500" in error
|
|
||||||
|
|
||||||
|
|
||||||
def test_filtermail_no_encryption_detection(maildata):
|
|
||||||
msg = maildata(
|
|
||||||
"plain.eml", from_addr="some@example.org", to_addr="other@example.org"
|
|
||||||
)
|
|
||||||
assert not check_encrypted(msg)
|
|
||||||
|
|
||||||
# https://xkcd.com/1181/
|
|
||||||
msg = maildata(
|
|
||||||
"fake-encrypted.eml", from_addr="some@example.org", to_addr="other@example.org"
|
|
||||||
)
|
|
||||||
assert not check_encrypted(msg)
|
|
||||||
|
|
||||||
|
|
||||||
def test_filtermail_securejoin_detection(maildata):
|
|
||||||
msg = maildata(
|
|
||||||
"securejoin-vc.eml", from_addr="some@example.org", to_addr="other@example.org"
|
|
||||||
)
|
|
||||||
assert is_securejoin(msg)
|
|
||||||
|
|
||||||
msg = maildata(
|
|
||||||
"securejoin-vc-fake.eml",
|
|
||||||
from_addr="some@example.org",
|
|
||||||
to_addr="other@example.org",
|
|
||||||
)
|
|
||||||
assert not is_securejoin(msg)
|
|
||||||
|
|
||||||
|
|
||||||
def test_filtermail_encryption_detection(maildata):
|
|
||||||
msg = maildata(
|
|
||||||
"encrypted.eml",
|
|
||||||
from_addr="1@example.org",
|
|
||||||
to_addr="2@example.org",
|
|
||||||
subject="Subject does not matter, will be replaced anyway",
|
|
||||||
)
|
|
||||||
assert check_encrypted(msg)
|
|
||||||
|
|
||||||
|
|
||||||
def test_filtermail_no_literal_packets(maildata):
|
|
||||||
"""Test that literal OpenPGP packet is not considered an encrypted mail."""
|
|
||||||
msg = maildata("literal.eml", from_addr="1@example.org", to_addr="2@example.org")
|
|
||||||
assert not check_encrypted(msg)
|
|
||||||
|
|
||||||
|
|
||||||
def test_filtermail_unencrypted_mdn(maildata, gencreds):
|
|
||||||
"""Unencrypted MDNs should not pass."""
|
|
||||||
from_addr = gencreds()[0]
|
|
||||||
to_addr = gencreds()[0] + ".other"
|
|
||||||
msg = maildata("mdn.eml", from_addr=from_addr, to_addr=to_addr)
|
|
||||||
|
|
||||||
assert not check_encrypted(msg)
|
|
||||||
|
|
||||||
|
|
||||||
def test_send_rate_limiter():
|
|
||||||
limiter = SendRateLimiter()
|
|
||||||
for i in range(100):
|
|
||||||
if limiter.is_sending_allowed("some@example.org", 10):
|
|
||||||
if i <= 10:
|
|
||||||
continue
|
|
||||||
pytest.fail("limiter didn't work")
|
|
||||||
else:
|
|
||||||
assert i == 11
|
|
||||||
break
|
|
||||||
|
|
||||||
|
|
||||||
def test_cleartext_excempt_privacy(maildata, gencreds, handler):
|
|
||||||
from_addr = gencreds()[0]
|
|
||||||
to_addr = "privacy@testrun.org"
|
|
||||||
handler.config.passthrough_recipients = [to_addr]
|
|
||||||
false_to = "privacy@something.org"
|
|
||||||
|
|
||||||
msg = maildata("plain.eml", from_addr=from_addr, to_addr=to_addr)
|
|
||||||
|
|
||||||
class env:
|
|
||||||
mail_from = from_addr
|
|
||||||
rcpt_tos = [to_addr]
|
|
||||||
content = msg.as_bytes()
|
|
||||||
|
|
||||||
# assert that None/no error is returned
|
|
||||||
assert not handler.check_DATA(envelope=env)
|
|
||||||
|
|
||||||
class env2:
|
|
||||||
mail_from = from_addr
|
|
||||||
rcpt_tos = [to_addr, false_to]
|
|
||||||
content = msg.as_bytes()
|
|
||||||
|
|
||||||
assert "523" in handler.check_DATA(envelope=env2)
|
|
||||||
|
|
||||||
|
|
||||||
def test_cleartext_self_send_autocrypt_setup_message(maildata, gencreds, handler):
|
|
||||||
from_addr = gencreds()[0]
|
|
||||||
to_addr = from_addr
|
|
||||||
|
|
||||||
msg = maildata("asm.eml", from_addr=from_addr, to_addr=to_addr)
|
|
||||||
|
|
||||||
class env:
|
|
||||||
mail_from = from_addr
|
|
||||||
rcpt_tos = [to_addr]
|
|
||||||
content = msg.as_bytes()
|
|
||||||
|
|
||||||
assert not handler.check_DATA(envelope=env)
|
|
||||||
|
|
||||||
|
|
||||||
def test_cleartext_send_fails(maildata, gencreds, handler):
|
|
||||||
from_addr = gencreds()[0]
|
|
||||||
to_addr = gencreds()[0]
|
|
||||||
|
|
||||||
msg = maildata("plain.eml", from_addr=from_addr, to_addr=to_addr)
|
|
||||||
|
|
||||||
class env:
|
|
||||||
mail_from = from_addr
|
|
||||||
rcpt_tos = [to_addr]
|
|
||||||
content = msg.as_bytes()
|
|
||||||
|
|
||||||
res = handler.check_DATA(envelope=env)
|
|
||||||
assert "523 Encryption Needed" in res
|
|
||||||
|
|
||||||
|
|
||||||
def test_cleartext_incoming_fails(maildata, gencreds, inhandler):
|
|
||||||
from_addr = gencreds()[0]
|
|
||||||
to_addr, password = gencreds()
|
|
||||||
|
|
||||||
msg = maildata("plain.eml", from_addr=from_addr, to_addr=to_addr)
|
|
||||||
|
|
||||||
class env:
|
|
||||||
mail_from = from_addr
|
|
||||||
rcpt_tos = [to_addr]
|
|
||||||
content = msg.as_bytes()
|
|
||||||
|
|
||||||
user = inhandler.config.get_user(to_addr)
|
|
||||||
user.set_password(password)
|
|
||||||
res = inhandler.check_DATA(envelope=env)
|
|
||||||
assert "523 Encryption Needed" in res
|
|
||||||
|
|
||||||
user.allow_incoming_cleartext()
|
|
||||||
assert not inhandler.check_DATA(envelope=env)
|
|
||||||
|
|
||||||
|
|
||||||
def test_cleartext_incoming_mailer_daemon(maildata, gencreds, inhandler):
|
|
||||||
from_addr = "mailer-daemon@example.org"
|
|
||||||
to_addr = gencreds()[0]
|
|
||||||
|
|
||||||
msg = maildata("mailer-daemon.eml", from_addr=from_addr, to_addr=to_addr)
|
|
||||||
|
|
||||||
class env:
|
|
||||||
mail_from = from_addr
|
|
||||||
rcpt_tos = [to_addr]
|
|
||||||
content = msg.as_bytes()
|
|
||||||
|
|
||||||
assert not inhandler.check_DATA(envelope=env)
|
|
||||||
|
|
||||||
|
|
||||||
def test_cleartext_passthrough_domains(maildata, gencreds, handler):
|
|
||||||
from_addr = gencreds()[0]
|
|
||||||
to_addr = "privacy@x.y.z"
|
|
||||||
handler.config.passthrough_recipients = ["@x.y.z"]
|
|
||||||
false_to = "something@x.y"
|
|
||||||
|
|
||||||
msg = maildata("plain.eml", from_addr=from_addr, to_addr=to_addr)
|
|
||||||
|
|
||||||
class env:
|
|
||||||
mail_from = from_addr
|
|
||||||
rcpt_tos = [to_addr]
|
|
||||||
content = msg.as_bytes()
|
|
||||||
|
|
||||||
# assert that None/no error is returned
|
|
||||||
assert not handler.check_DATA(envelope=env)
|
|
||||||
|
|
||||||
class env2:
|
|
||||||
mail_from = from_addr
|
|
||||||
rcpt_tos = [to_addr, false_to]
|
|
||||||
content = msg.as_bytes()
|
|
||||||
|
|
||||||
assert "523" in handler.check_DATA(envelope=env2)
|
|
||||||
|
|
||||||
|
|
||||||
def test_cleartext_passthrough_senders(gencreds, handler, maildata):
|
|
||||||
acc1 = gencreds()[0]
|
|
||||||
to_addr = "recipient@something.org"
|
|
||||||
handler.config.passthrough_senders = [acc1]
|
|
||||||
|
|
||||||
msg = maildata("plain.eml", from_addr=acc1, to_addr=to_addr)
|
|
||||||
|
|
||||||
class env:
|
|
||||||
mail_from = acc1
|
|
||||||
rcpt_tos = to_addr
|
|
||||||
content = msg.as_bytes()
|
|
||||||
|
|
||||||
# assert that None/no error is returned
|
|
||||||
assert not handler.check_DATA(envelope=env)
|
|
||||||
|
|
||||||
|
|
||||||
def test_check_armored_payload():
|
|
||||||
prefix = "-----BEGIN PGP MESSAGE-----\r\n"
|
|
||||||
comment = "Version: ProtonMail\r\n"
|
|
||||||
payload = """\r
|
|
||||||
wU4DSqFx0d1yqAoSAQdAYkX/ZN/Az4B0k7X47zKyWrXxlDEdS3WOy0Yf2+GJTFgg\r
|
|
||||||
Zk5ql0mLG8Ze+ZifCS0XMO4otlemSyJ0K1ZPdFMGzUDBTgNqzkFabxXoXRIBB0AM\r
|
|
||||||
755wlX41X6Ay3KhnwBq7yEqSykVH6F3x11iHPKraLCAGZoaS8bKKNy/zg5slda1X\r
|
|
||||||
pt14b4aC1VwtSnYhcRRELNLD/wE2TFif+g7poMmFY50VyMPLYjVP96Z5QCT4+z4H\r
|
|
||||||
Ikh/pRRN8S3JNMrRJHc6prooSJmLcx47Y5un7VFy390MsJ+LiUJuQMDdYWRAinfs\r
|
|
||||||
Ebm89Ezjm7F03qbFPXE0X4ZNzVXS/eKO0uhJQdiov/vmbn41rNtHmNpqjaO0vi5+\r
|
|
||||||
sS9tR7yDUrIXiCUCN78eBLVioxtktsPZm5cDORbQWzv+7nmCEz9/JowCUcBVdCGn\r
|
|
||||||
1ofOaH82JCAX/cRx08pLaDNj6iolVBsi56Dd+2bGxJOZOG2AMcEyz0pXY0dOAJCD\r
|
|
||||||
iUThcQeGIdRnU3j8UBcnIEsjLu2+C+rrwMZQESMWKnJ0rnqTk0pK5kXScr6F/L0L\r
|
|
||||||
UE49ccIexNm3xZvYr5drszr6wz3Tv5fdue87P4etBt90gF/Vzknck+g1LLlkzZkp\r
|
|
||||||
d8dI0k2tOSPjUbDPnSy1x+X73WGpPZmj0kWT+RGvq0nH6UkJj3AQTG2qf1T8jK+3\r
|
|
||||||
rTp3LR9vDkMwDjX4R8SA9c0wdnUzzr79OYQC9lTnzcx+fM6BBmgQ2GrS33jaFLp7\r
|
|
||||||
L6/DFpCl5zhnPjM/2dKvMkw/Kd6XS/vjwsO405FQdjSDiQEEAZA+ZvAfcjdccbbU\r
|
|
||||||
yCO+x0QNdeBsufDVnh3xvzuWy4CICdTQT4s1AWRPCzjOj+SGmx5WqCLWfsd8Ma0+\r
|
|
||||||
w/C7SfTYu1FDQILLM+llpq1M/9GPley4QZ8JQjo262AyPXsPF/OW48uuZz0Db1xT\r
|
|
||||||
Yh4iHBztj4VSdy7l2+IyaIf7cnL4EEBFxv/MwmVDXvDlxyvfAfIsd3D9SvJESzKZ\r
|
|
||||||
VWDYwaocgeCN+ojKu1p885lu1EfRbX3fr3YO02K5/c2JYDkc0Py0W3wUP/J1XUax\r
|
|
||||||
pbKpzwlkxEgtmzsGqsOfMJqBV3TNDrOA2uBsa+uBqP5MGYLZ49S/4v/bW9I01Cr1\r
|
|
||||||
D2ZkV510Y1Vgo66WlP8mRqOTyt/5WRhPD+MxXdk67BNN/PmO6tMlVoJDuk+XwWPR\r
|
|
||||||
t2TvNaND/yabT9eYI55Og4fzKD6RIjouUX8DvKLkm+7aXxVs2uuLQ3Jco3O82z55\r
|
|
||||||
dbShU1jYsrw9oouXUz06MHPbkdhNbF/2hfhZ2qA31sNeovJw65iUv7sDKX3LVWgJ\r
|
|
||||||
10jlywcDwqlU8CO7WC9lGixYTbnOkYZpXCGEl8e6Jbs79l42YFo4ogYpFK1NXFhV\r
|
|
||||||
kOXRmDf/wmfj+c/ld3L2PkvwlgofhCudOQknZbo3ub1gjiTn7L+lMGHIj/3suMIl\r
|
|
||||||
ID4EUxAXScIM1ZEz2fjtW5jATlqYcLjLTbf/olw6HFyPNH+9IssqXeZNKnGwPUB9\r
|
|
||||||
3lTXsg0tpzl+x7F/2WjEw1DSNhjC0KnHt1vEYNMkUGDGFdN9y3ERLqX/FIgiASUb\r
|
|
||||||
bTvAVupnAK3raBezGmhrs6LsQtLS9P0VvQiLU3uDhMqw8Z4SISLpcD+NnVBHzQqm\r
|
|
||||||
6W5Qn/8xsCL6av18yUVTi2G3igt3QCNoYx9evt2ZcIkNoyyagUVjfZe5GHXh8Dnz\r
|
|
||||||
GaBXW/hg3HlXLRGaQu4RYCzBMJILcO25OhZOg6jbkCLiEexQlm2e9krB5cXR49Al\r
|
|
||||||
UN4fiB0KR9JyG2ayUdNJVkXZSZLnHyRgiaadlpUo16LVvw==\r
|
|
||||||
=b5Kp\r
|
|
||||||
-----END PGP MESSAGE-----\r
|
|
||||||
\r
|
|
||||||
\r
|
|
||||||
"""
|
|
||||||
|
|
||||||
commented_payload = prefix + comment + payload
|
|
||||||
assert check_armored_payload(commented_payload, outgoing=False) == True
|
|
||||||
assert check_armored_payload(commented_payload, outgoing=True) == False
|
|
||||||
|
|
||||||
payload = prefix + payload
|
|
||||||
assert check_armored_payload(payload, outgoing=False) == True
|
|
||||||
assert check_armored_payload(payload, outgoing=True) == True
|
|
||||||
|
|
||||||
payload = payload.removesuffix("\r\n")
|
|
||||||
assert check_armored_payload(payload, outgoing=False) == True
|
|
||||||
assert check_armored_payload(payload, outgoing=True) == True
|
|
||||||
|
|
||||||
payload = payload.removesuffix("\r\n")
|
|
||||||
assert check_armored_payload(payload, outgoing=False) == True
|
|
||||||
assert check_armored_payload(payload, outgoing=True) == True
|
|
||||||
|
|
||||||
payload = payload.removesuffix("\r\n")
|
|
||||||
assert check_armored_payload(payload, outgoing=False) == True
|
|
||||||
assert check_armored_payload(payload, outgoing=True) == True
|
|
||||||
|
|
||||||
payload = """-----BEGIN PGP MESSAGE-----\r
|
|
||||||
\r
|
|
||||||
HELLOWORLD
|
|
||||||
-----END PGP MESSAGE-----\r
|
|
||||||
\r
|
|
||||||
"""
|
|
||||||
assert check_armored_payload(payload, outgoing=False) == False
|
|
||||||
assert check_armored_payload(payload, outgoing=True) == False
|
|
||||||
|
|
||||||
payload = """-----BEGIN PGP MESSAGE-----\r
|
|
||||||
\r
|
|
||||||
=njUN
|
|
||||||
-----END PGP MESSAGE-----\r
|
|
||||||
\r
|
|
||||||
"""
|
|
||||||
assert check_armored_payload(payload, outgoing=False) == False
|
|
||||||
assert check_armored_payload(payload, outgoing=True) == False
|
|
||||||
|
|
||||||
# Test payload using partial body length
|
|
||||||
# as generated by GopenPGP.
|
|
||||||
payload = """-----BEGIN PGP MESSAGE-----\r
|
|
||||||
\r
|
|
||||||
wV4DdCVjRfOT3TQSAQdAY5+pjT6mlCxPGdR3be4w7oJJRUGIPI/Vnh+mJxGSm34w\r
|
|
||||||
LNlVc89S1g22uQYFif2sUJsQWbpoHpNkuWpkSgOaHmNvrZiY/YU5iv+cZ3LbmtUG\r
|
|
||||||
0uoBisSHh9O1c+5sYZSbrvYZ1NOwlD7Fv/U5/Mw4E5+CjxfdgNGp5o3DDddzPK78\r
|
|
||||||
jseDhdSXxnaiIJC93hxNX6R1RPt3G2gukyzx69wciPQShcF8zf3W3o75Ed7B8etV\r
|
|
||||||
QEeB16xzdFhKa9JxdjTu3osgCs21IO7wpcFkjc7nZzlW6jPnELJJaNmv4yOOCjMp\r
|
|
||||||
6YAkaN/BkL+jHTznHDuDsT5ilnTXpwHDU1Cm9PIx/KFcNCQnIB+2DcdIHPHUH1ci\r
|
|
||||||
jvqoeXAVWjKXEjS7PqPFuP/xGbrWG2ugs+toXJOKbgRkExvKs1dwPFKrgghvCVbW\r
|
|
||||||
AcKejQKAPArLwpkA7aD875TZQShvGt74fNs45XBlGOYOnNOAJ1KAmzrXLIDViyyB\r
|
|
||||||
kDsmTBk785xofuCkjBpXSe6vsMprPzCteDfaUibh8FHeJjucxPerwuOPEmnogNaf\r
|
|
||||||
YyL4+iy8H8I9/p7pmUqILprxTG0jTOtlk0bTVzeiF56W1xbtSEMuOo4oFbQTyOM2\r
|
|
||||||
bKXaYo774Jm+rRtKAnnI2dtf9RpK19cog6YNzfYjesLKbXDsPZbN5rmwyFiCvvxC\r
|
|
||||||
kQ6JLob+B2fPdY2gzy7LypxktS8Zi1HJcWDHJGVmQodaDLqKUObb4M26bXDe6oxI\r
|
|
||||||
NS8PJz5exVbM3KhZnUOEn6PJRBBf5a/ZqxlhZPcQo/oBuhKpBRpO5kSDwPIUByu3\r
|
|
||||||
UlXLSkpMqe9pUarAOEuQjfl2RVY7U+RrQYp4YP5keMO+i8NCefAFbowTTufO1JIq\r
|
|
||||||
2nVgCi/QVnxZyEc9OYt/8AE3g4cdojE+vsSDifZLSWYIetpfrohHv3dT3StD1QRG\r
|
|
||||||
0QE6qq6oKpg/IL0cjvuX4c7a7bslv2fXp8t75y37RU6253qdIebhxc/cRhPbc/yu\r
|
|
||||||
p0YLyD4SrvKTLP2ZV95jT4IPEpqm4AN3QmiOzdtqR2gLyb62L8QfqI/FdwsIiRiM\r
|
|
||||||
hqydwoqt/lfSqG1WKPh+6EkMkH+TDiCC1BQdbN1MNcyUtcjb35PR2c8Ld2TF3guA\r
|
|
||||||
jLIqMt/Vb7hBoMb2FcsOYY25ka9oV62OwgKWLXnFzk+modMR5fzb4kxVVAYEqP+D\r
|
|
||||||
T5KO1Vs76v1fyPGOq6BbBCvLwTqe/e6IZInJles4v5jrhnLcGKmNGivCUDe6X6NY\r
|
|
||||||
UKNt5RsZllwDQpaAb5dMNhyrk8SgIE7TBI7rvqIdUCE52Vy+0JDxFg5olRpFUfO6\r
|
|
||||||
/MyTW3Yo/ekk/npHr7iYYqJTCc21bDGLWQcIo/XO7WPxrKNWGBNPFnkRdw0MaKr4\r
|
|
||||||
+cEM3V8NFnSEpC12xA+RX/CezuJtwXZK5MpG76eYqMO6qyC+c25YcFecEufDZDxx\r
|
|
||||||
ZLqRszVRyxyWPtk/oIeQK2v9wOqY6N9/ff01gHz69vqYqN5bUw/QKZsmx1zW+gPw\r
|
|
||||||
6x2tDK2BHeYl182gCbhlKISRFwCtbjqZSkiKWao/VtygHkw0fK34avJuyQ/X9YaN\r
|
|
||||||
BRy+7Lf3VA53pnB5WJ1xwRXN8VDvmZeXzv2krHveCMemj0OjnRoCLu117xN0A5m9\r
|
|
||||||
Fm/RoDix5PolDHtWTtr2m1n2hp2LHnj8at9lFEd0SKhAYHVL9KjzycwWODZRXt+x\r
|
|
||||||
zGDDuooEeTvdY5NLyKcl4gETz1ZP4Ez5jGGjhPSwSpq1mU7UaJ9ZXXdr4KHyifW6\r
|
|
||||||
ggNzNsGhXTap7IWZpTtqXABydfiBshmH2NjqtNDwBweJVSgP10+r0WhMWlaZs6xl\r
|
|
||||||
V3o5yskJt6GlkwpJxZrTvN6Tiww/eW7HFV6NGf7IRSWY5tJc/iA7/92tOmkdvJ1q\r
|
|
||||||
myLbG7cJB787QjplEyVe2P/JBO6xYvbkJLf9Q+HaviTO25rugRSrYsoKMDfO8VlQ\r
|
|
||||||
1CcnTPVtApPZJEQzAWJEgVAM8uIlkqWJJMgyWT34sTkdBeCUFGloXQFs9Yxd0AGf\r
|
|
||||||
/zHEkYZSTKpVSvAIGu4=\r
|
|
||||||
=6iHb\r
|
|
||||||
-----END PGP MESSAGE-----\r
|
|
||||||
"""
|
|
||||||
assert check_armored_payload(payload, outgoing=False) == True
|
|
||||||
assert check_armored_payload(payload, outgoing=True) == True
|
|
||||||
@@ -1,9 +1,15 @@
|
|||||||
|
import shutil
|
||||||
import smtplib
|
import smtplib
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
|
pytestmark = pytest.mark.skipif(
|
||||||
|
shutil.which("filtermail") is None,
|
||||||
|
reason="filtermail binary not found",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def smtpserver():
|
def smtpserver():
|
||||||
@@ -41,6 +47,8 @@ def test_one_mail(
|
|||||||
make_config, make_popen, smtpserver, maildata, filtermail_mode, monkeypatch
|
make_config, make_popen, smtpserver, maildata, filtermail_mode, monkeypatch
|
||||||
):
|
):
|
||||||
monkeypatch.setenv("PYTHONUNBUFFERED", "1")
|
monkeypatch.setenv("PYTHONUNBUFFERED", "1")
|
||||||
|
# DKIM is tested by cmdeploy tests.
|
||||||
|
monkeypatch.setenv("FILTERMAIL_SKIP_DKIM", "1")
|
||||||
smtp_inject_port = 20025
|
smtp_inject_port = 20025
|
||||||
if filtermail_mode == "outgoing":
|
if filtermail_mode == "outgoing":
|
||||||
settings = dict(
|
settings = dict(
|
||||||
@@ -58,6 +66,10 @@ def test_one_mail(
|
|||||||
|
|
||||||
popen = make_popen(["filtermail", path, filtermail_mode])
|
popen = make_popen(["filtermail", path, filtermail_mode])
|
||||||
line = popen.stderr.readline().strip()
|
line = popen.stderr.readline().strip()
|
||||||
|
|
||||||
|
# skip a warning that FILTERMAIL_SKIP_DKIM shouldn't be used in prod
|
||||||
|
if b"DKIM verification DISABLED!" in line:
|
||||||
|
line = popen.stderr.readline().strip()
|
||||||
if b"loop" not in line:
|
if b"loop" not in line:
|
||||||
print(line.decode("ascii"), file=sys.stderr)
|
print(line.decode("ascii"), file=sys.stderr)
|
||||||
pytest.fail("starting filtermail failed")
|
pytest.fail("starting filtermail failed")
|
||||||
|
|||||||
@@ -36,29 +36,3 @@ def test_handle_dovecot_request_last_login(testaddr, example_config):
|
|||||||
res = dictproxy.handle_dovecot_request(msg, dictproxy_transactions)
|
res = dictproxy.handle_dovecot_request(msg, dictproxy_transactions)
|
||||||
assert res == "O\n"
|
assert res == "O\n"
|
||||||
assert len(dictproxy_transactions) == 0
|
assert len(dictproxy_transactions) == 0
|
||||||
|
|
||||||
|
|
||||||
def test_handle_dovecot_request_last_login_echobot(example_config):
|
|
||||||
dictproxy = LastLoginDictProxy(config=example_config)
|
|
||||||
|
|
||||||
authproxy = AuthDictProxy(config=example_config)
|
|
||||||
testaddr = f"echo@{example_config.mail_domain}"
|
|
||||||
authproxy.lookup_passdb(testaddr, "ignore")
|
|
||||||
user = dictproxy.config.get_user(testaddr)
|
|
||||||
|
|
||||||
transactions = {}
|
|
||||||
|
|
||||||
# set last-login info for user
|
|
||||||
tx = "1111"
|
|
||||||
msg = f"B{tx}\t{testaddr}"
|
|
||||||
res = dictproxy.handle_dovecot_request(msg, transactions)
|
|
||||||
assert not res
|
|
||||||
assert transactions == {tx: dict(addr=testaddr, res="O\n")}
|
|
||||||
|
|
||||||
timestamp = int(time.time())
|
|
||||||
msg = f"S{tx}\tshared/last-login/{testaddr}\t{timestamp}"
|
|
||||||
res = dictproxy.handle_dovecot_request(msg, transactions)
|
|
||||||
assert not res
|
|
||||||
assert len(transactions) == 1
|
|
||||||
read_timestamp = user.get_last_login_timestamp()
|
|
||||||
assert read_timestamp is None
|
|
||||||
|
|||||||
@@ -314,6 +314,51 @@ def test_persistent_queue_items(tmp_path, testaddr, token):
|
|||||||
assert not queue_item < item2 and not item2 < queue_item
|
assert not queue_item < item2 and not item2 < queue_item
|
||||||
|
|
||||||
|
|
||||||
|
def test_turn_credentials_exception_returns_N(notifier, metadata, monkeypatch):
|
||||||
|
"""Test that turn_credentials() failure returns N\\n instead of crashing."""
|
||||||
|
import chatmaild.metadata
|
||||||
|
|
||||||
|
dictproxy = MetadataDictProxy(
|
||||||
|
notifier=notifier,
|
||||||
|
metadata=metadata,
|
||||||
|
turn_hostname="turn.example.org",
|
||||||
|
)
|
||||||
|
|
||||||
|
def mock_turn_credentials():
|
||||||
|
raise ConnectionRefusedError("socket not available")
|
||||||
|
|
||||||
|
monkeypatch.setattr(chatmaild.metadata, "turn_credentials", mock_turn_credentials)
|
||||||
|
|
||||||
|
transactions = {}
|
||||||
|
res = dictproxy.handle_dovecot_request(
|
||||||
|
"Lshared/0123/vendor/vendor.dovecot/pvt/server/vendor/deltachat/turn"
|
||||||
|
"\tuser@example.org",
|
||||||
|
transactions,
|
||||||
|
)
|
||||||
|
assert res == "N\n"
|
||||||
|
|
||||||
|
|
||||||
|
def test_turn_credentials_success(notifier, metadata, monkeypatch):
|
||||||
|
"""Test that valid turn_credentials() returns TURN URI."""
|
||||||
|
import chatmaild.metadata
|
||||||
|
|
||||||
|
dictproxy = MetadataDictProxy(
|
||||||
|
notifier=notifier,
|
||||||
|
metadata=metadata,
|
||||||
|
turn_hostname="turn.example.org",
|
||||||
|
)
|
||||||
|
|
||||||
|
monkeypatch.setattr(chatmaild.metadata, "turn_credentials", lambda: "user:pass")
|
||||||
|
|
||||||
|
transactions = {}
|
||||||
|
res = dictproxy.handle_dovecot_request(
|
||||||
|
"Lshared/0123/vendor/vendor.dovecot/pvt/server/vendor/deltachat/turn"
|
||||||
|
"\tuser@example.org",
|
||||||
|
transactions,
|
||||||
|
)
|
||||||
|
assert res == "Oturn.example.org:3478:user:pass\n"
|
||||||
|
|
||||||
|
|
||||||
def test_iroh_relay(dictproxy):
|
def test_iroh_relay(dictproxy):
|
||||||
rfile = io.BytesIO(
|
rfile = io.BytesIO(
|
||||||
b"\n".join(
|
b"\n".join(
|
||||||
|
|||||||
@@ -1,24 +0,0 @@
|
|||||||
from chatmaild.metrics import main
|
|
||||||
|
|
||||||
|
|
||||||
def test_main(tmp_path, capsys):
|
|
||||||
paths = []
|
|
||||||
for x in ("ci-asllkj", "ac_12l3kj", "qweqwe", "ci-l1k2j31l2k3"):
|
|
||||||
p = tmp_path.joinpath(x)
|
|
||||||
p.mkdir()
|
|
||||||
p.joinpath("cur").mkdir()
|
|
||||||
paths.append(p)
|
|
||||||
|
|
||||||
tmp_path.joinpath("nomailbox").mkdir()
|
|
||||||
|
|
||||||
main(tmp_path)
|
|
||||||
out, _ = capsys.readouterr()
|
|
||||||
d = {}
|
|
||||||
for line in out.split("\n"):
|
|
||||||
if line.strip() and not line.startswith("#"):
|
|
||||||
name, num = line.split()
|
|
||||||
d[name] = int(num)
|
|
||||||
|
|
||||||
assert d["accounts"] == 4
|
|
||||||
assert d["ci_accounts"] == 3
|
|
||||||
assert d["nonci_accounts"] == 1
|
|
||||||
@@ -1,7 +1,11 @@
|
|||||||
import json
|
import json
|
||||||
|
|
||||||
import chatmaild
|
import chatmaild
|
||||||
from chatmaild.newemail import create_newemail_dict, print_new_account
|
from chatmaild.newemail import (
|
||||||
|
create_dclogin_url,
|
||||||
|
create_newemail_dict,
|
||||||
|
print_new_account,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_create_newemail_dict(example_config):
|
def test_create_newemail_dict(example_config):
|
||||||
@@ -15,6 +19,18 @@ def test_create_newemail_dict(example_config):
|
|||||||
assert ac1["password"] != ac2["password"]
|
assert ac1["password"] != ac2["password"]
|
||||||
|
|
||||||
|
|
||||||
|
def test_create_dclogin_url():
|
||||||
|
url = create_dclogin_url("user@example.org", "p@ss w+rd")
|
||||||
|
assert url.startswith("dclogin:")
|
||||||
|
assert "v=1" in url
|
||||||
|
assert "ic=3" in url
|
||||||
|
|
||||||
|
assert "user@example.org" in url
|
||||||
|
# password special chars must be encoded
|
||||||
|
assert "p%40ss" in url
|
||||||
|
assert "w%2Brd" in url
|
||||||
|
|
||||||
|
|
||||||
def test_print_new_account(capsys, monkeypatch, maildomain, tmpdir, example_config):
|
def test_print_new_account(capsys, monkeypatch, maildomain, tmpdir, example_config):
|
||||||
monkeypatch.setattr(chatmaild.newemail, "CONFIG_PATH", str(example_config._inipath))
|
monkeypatch.setattr(chatmaild.newemail, "CONFIG_PATH", str(example_config._inipath))
|
||||||
print_new_account()
|
print_new_account()
|
||||||
@@ -25,3 +41,20 @@ def test_print_new_account(capsys, monkeypatch, maildomain, tmpdir, example_conf
|
|||||||
dic = json.loads(lines[2])
|
dic = json.loads(lines[2])
|
||||||
assert dic["email"].endswith(f"@{example_config.mail_domain}")
|
assert dic["email"].endswith(f"@{example_config.mail_domain}")
|
||||||
assert len(dic["password"]) >= 10
|
assert len(dic["password"]) >= 10
|
||||||
|
# default tls_cert=acme should not include dclogin_url
|
||||||
|
assert "dclogin_url" not in dic
|
||||||
|
|
||||||
|
|
||||||
|
def test_print_new_account_self_signed(capsys, monkeypatch, make_config):
|
||||||
|
config = make_config("_test.example.org")
|
||||||
|
monkeypatch.setattr(chatmaild.newemail, "CONFIG_PATH", str(config._inipath))
|
||||||
|
print_new_account()
|
||||||
|
out, err = capsys.readouterr()
|
||||||
|
lines = out.split("\n")
|
||||||
|
dic = json.loads(lines[2])
|
||||||
|
assert "dclogin_url" in dic
|
||||||
|
url = dic["dclogin_url"]
|
||||||
|
assert url.startswith("dclogin:")
|
||||||
|
assert "ic=3" in url
|
||||||
|
|
||||||
|
assert dic["email"].split("@")[0] in url
|
||||||
|
|||||||
73
chatmaild/src/chatmaild/tests/test_turnserver.py
Normal file
73
chatmaild/src/chatmaild/tests/test_turnserver.py
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
import socket
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
from unittest.mock import patch
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from chatmaild.turnserver import turn_credentials
|
||||||
|
|
||||||
|
SOCKET_PATH = "/run/chatmail-turn/turn.socket"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def turn_socket(tmp_path):
|
||||||
|
"""Create a real Unix socket server at a temp path."""
|
||||||
|
sock_path = str(tmp_path / "turn.socket")
|
||||||
|
server = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||||
|
server.bind(sock_path)
|
||||||
|
server.listen(1)
|
||||||
|
yield sock_path, server
|
||||||
|
server.close()
|
||||||
|
|
||||||
|
|
||||||
|
def _call_turn_credentials(sock_path):
|
||||||
|
"""Call turn_credentials but connect to sock_path instead of hardcoded path."""
|
||||||
|
original_connect = socket.socket.connect
|
||||||
|
|
||||||
|
def patched_connect(self, address):
|
||||||
|
if address == SOCKET_PATH:
|
||||||
|
address = sock_path
|
||||||
|
return original_connect(self, address)
|
||||||
|
|
||||||
|
with patch.object(socket.socket, "connect", patched_connect):
|
||||||
|
return turn_credentials()
|
||||||
|
|
||||||
|
|
||||||
|
def test_turn_credentials_timeout(turn_socket):
|
||||||
|
"""Server accepts but never responds — must raise socket.timeout."""
|
||||||
|
sock_path, server = turn_socket
|
||||||
|
|
||||||
|
def accept_and_hang():
|
||||||
|
conn, _ = server.accept()
|
||||||
|
time.sleep(30)
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
t = threading.Thread(target=accept_and_hang, daemon=True)
|
||||||
|
t.start()
|
||||||
|
|
||||||
|
with pytest.raises(socket.timeout):
|
||||||
|
_call_turn_credentials(sock_path)
|
||||||
|
|
||||||
|
|
||||||
|
def test_turn_credentials_connection_refused(tmp_path):
|
||||||
|
"""Socket file doesn't exist — must raise ConnectionRefusedError or FileNotFoundError."""
|
||||||
|
missing = str(tmp_path / "nonexistent.socket")
|
||||||
|
with pytest.raises((ConnectionRefusedError, FileNotFoundError)):
|
||||||
|
_call_turn_credentials(missing)
|
||||||
|
|
||||||
|
|
||||||
|
def test_turn_credentials_success(turn_socket):
|
||||||
|
"""Server responds with credentials — must return stripped string."""
|
||||||
|
sock_path, server = turn_socket
|
||||||
|
|
||||||
|
def respond():
|
||||||
|
conn, _ = server.accept()
|
||||||
|
conn.sendall(b"testuser:testpass\n")
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
t = threading.Thread(target=respond, daemon=True)
|
||||||
|
t.start()
|
||||||
|
|
||||||
|
result = _call_turn_credentials(sock_path)
|
||||||
|
assert result == "testuser:testpass"
|
||||||
@@ -4,6 +4,7 @@ import socket
|
|||||||
|
|
||||||
def turn_credentials() -> str:
|
def turn_credentials() -> str:
|
||||||
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as client_socket:
|
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as client_socket:
|
||||||
|
client_socket.settimeout(5)
|
||||||
client_socket.connect("/run/chatmail-turn/turn.socket")
|
client_socket.connect("/run/chatmail-turn/turn.socket")
|
||||||
with client_socket.makefile("rb") as file:
|
with client_socket.makefile("rb") as file:
|
||||||
return file.readline().decode("utf-8").strip()
|
return file.readline().decode("utf-8").strip()
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ class User:
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def can_track(self):
|
def can_track(self):
|
||||||
return "@" in self.addr and not self.addr.startswith("echo@")
|
return "@" in self.addr
|
||||||
|
|
||||||
def get_userdb_dict(self):
|
def get_userdb_dict(self):
|
||||||
"""Return a non-empty dovecot 'userdb' style dict
|
"""Return a non-empty dovecot 'userdb' style dict
|
||||||
@@ -55,11 +55,9 @@ class User:
|
|||||||
try:
|
try:
|
||||||
write_bytes_atomic(self.password_path, password)
|
write_bytes_atomic(self.password_path, password)
|
||||||
except PermissionError:
|
except PermissionError:
|
||||||
if not self.addr.startswith("echo@"):
|
logging.error(f"could not write password for: {self.addr}")
|
||||||
logging.error(f"could not write password for: {self.addr}")
|
raise
|
||||||
raise
|
self.enforce_E2EE_path.touch()
|
||||||
if not self.addr.startswith("echo@"):
|
|
||||||
self.enforce_E2EE_path.touch()
|
|
||||||
|
|
||||||
def set_last_login_timestamp(self, timestamp):
|
def set_last_login_timestamp(self, timestamp):
|
||||||
"""Track login time with daily granularity
|
"""Track login time with daily granularity
|
||||||
|
|||||||
94
cliff.toml
Normal file
94
cliff.toml
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
# git-cliff ~ configuration file
|
||||||
|
# https://git-cliff.org/docs/configuration
|
||||||
|
|
||||||
|
|
||||||
|
[changelog]
|
||||||
|
# A Tera template to be rendered for each release in the changelog.
|
||||||
|
# See https://keats.github.io/tera/docs/#introduction
|
||||||
|
body = """
|
||||||
|
{% if version %}\
|
||||||
|
## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }}
|
||||||
|
{% else %}\
|
||||||
|
## [unreleased]
|
||||||
|
{% endif %}\
|
||||||
|
{% for group, commits in commits | group_by(attribute="group") %}
|
||||||
|
### {{ group | striptags | trim | upper_first }}
|
||||||
|
{% for commit in commits %}
|
||||||
|
- {% if commit.scope %}*({{ commit.scope }})* {% endif %}\
|
||||||
|
{% if commit.breaking %}[**breaking**] {% endif %}\
|
||||||
|
{{ commit.message | upper_first }}\
|
||||||
|
{% endfor %}
|
||||||
|
{% endfor %}
|
||||||
|
"""
|
||||||
|
# Remove leading and trailing whitespaces from the changelog's body.
|
||||||
|
trim = true
|
||||||
|
# Render body even when there are no releases to process.
|
||||||
|
render_always = true
|
||||||
|
# An array of regex based postprocessors to modify the changelog.
|
||||||
|
postprocessors = [
|
||||||
|
# Replace the placeholder <REPO> with a URL.
|
||||||
|
#{ pattern = '<REPO>', replace = "https://github.com/orhun/git-cliff" },
|
||||||
|
]
|
||||||
|
# render body even when there are no releases to process
|
||||||
|
# render_always = true
|
||||||
|
# output file path
|
||||||
|
# output = "test.md"
|
||||||
|
|
||||||
|
[git]
|
||||||
|
# Parse commits according to the conventional commits specification.
|
||||||
|
# See https://www.conventionalcommits.org
|
||||||
|
conventional_commits = true
|
||||||
|
# Exclude commits that do not match the conventional commits specification.
|
||||||
|
filter_unconventional = true
|
||||||
|
# Require all commits to be conventional.
|
||||||
|
# Takes precedence over filter_unconventional.
|
||||||
|
require_conventional = false
|
||||||
|
# Split commits on newlines, treating each line as an individual commit.
|
||||||
|
split_commits = false
|
||||||
|
# An array of regex based parsers to modify commit messages prior to further processing.
|
||||||
|
commit_preprocessors = [
|
||||||
|
# Replace issue numbers with link templates to be updated in `changelog.postprocessors`.
|
||||||
|
#{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](<REPO>/issues/${2}))"},
|
||||||
|
# Check spelling of the commit message using https://github.com/crate-ci/typos.
|
||||||
|
# If the spelling is incorrect, it will be fixed automatically.
|
||||||
|
#{ pattern = '.*', replace_command = 'typos --write-changes -' },
|
||||||
|
]
|
||||||
|
# Prevent commits that are breaking from being excluded by commit parsers.
|
||||||
|
protect_breaking_commits = false
|
||||||
|
# An array of regex based parsers for extracting data from the commit message.
|
||||||
|
# Assigns commits to groups.
|
||||||
|
# Optionally sets the commit's scope and can decide to exclude commits from further processing.
|
||||||
|
commit_parsers = [
|
||||||
|
{ message = "^feat", group = "Features" },
|
||||||
|
{ message = "^fix", group = "Bug Fixes" },
|
||||||
|
{ message = "^docs", group = "Documentation" },
|
||||||
|
{ message = "^perf", group = "Performance" },
|
||||||
|
{ message = "^refactor", group = "Refactor" },
|
||||||
|
{ message = "^style", group = "Styling" },
|
||||||
|
{ message = "^test", group = "Testing" },
|
||||||
|
{ message = "^chore\\(release\\): prepare for", skip = true },
|
||||||
|
{ message = "^chore\\(deps.*\\)", skip = true },
|
||||||
|
{ message = "^chore\\(pr\\)", skip = true },
|
||||||
|
{ message = "^chore\\(pull\\)", skip = true },
|
||||||
|
{ message = "^chore|^ci", group = "Miscellaneous Tasks" },
|
||||||
|
{ body = ".*security", group = "Security" },
|
||||||
|
{ message = "^revert", group = "Revert" },
|
||||||
|
{ message = ".*", group = "Other" },
|
||||||
|
]
|
||||||
|
# Exclude commits that are not matched by any commit parser.
|
||||||
|
filter_commits = false
|
||||||
|
# Fail on a commit that is not matched by any commit parser.
|
||||||
|
fail_on_unmatched_commit = false
|
||||||
|
# An array of link parsers for extracting external references, and turning them into URLs, using regex.
|
||||||
|
link_parsers = []
|
||||||
|
# Include only the tags that belong to the current branch.
|
||||||
|
use_branch_tags = false
|
||||||
|
# Order releases topologically instead of chronologically.
|
||||||
|
topo_order = false
|
||||||
|
# Order commits topologically instead of chronologically.
|
||||||
|
topo_order_commits = true
|
||||||
|
# Order of commits in each group/release within the changelog.
|
||||||
|
# Allowed values: newest, oldest
|
||||||
|
sort_commits = "oldest"
|
||||||
|
# Process submodules commits
|
||||||
|
recurse_submodules = false
|
||||||
@@ -20,6 +20,7 @@ dependencies = [
|
|||||||
"pytest-xdist",
|
"pytest-xdist",
|
||||||
"execnet",
|
"execnet",
|
||||||
"imap_tools",
|
"imap_tools",
|
||||||
|
"deltachat-rpc-client",
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.scripts]
|
[project.scripts]
|
||||||
|
|||||||
@@ -61,6 +61,19 @@ class AcmetoolDeployer(Deployer):
|
|||||||
mode="644",
|
mode="644",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
server.shell(
|
||||||
|
name=f"Remove old acmetool desired files for {self.domains[0]}",
|
||||||
|
commands=[f"rm -f /var/lib/acme/desired/{self.domains[0]}-*"],
|
||||||
|
)
|
||||||
|
files.template(
|
||||||
|
src=importlib.resources.files(__package__).joinpath("desired.yaml.j2"),
|
||||||
|
dest=f"/var/lib/acme/desired/{self.domains[0]}", # 0 is mailhost TLD
|
||||||
|
user="root",
|
||||||
|
group="root",
|
||||||
|
mode="644",
|
||||||
|
domains=self.domains,
|
||||||
|
)
|
||||||
|
|
||||||
service_file = files.put(
|
service_file = files.put(
|
||||||
src=importlib.resources.files(__package__).joinpath(
|
src=importlib.resources.files(__package__).joinpath(
|
||||||
"acmetool-redirector.service"
|
"acmetool-redirector.service"
|
||||||
@@ -123,6 +136,6 @@ class AcmetoolDeployer(Deployer):
|
|||||||
self.need_restart_reconcile_timer = False
|
self.need_restart_reconcile_timer = False
|
||||||
|
|
||||||
server.shell(
|
server.shell(
|
||||||
name=f"Request certificate for: {', '.join(self.domains)}",
|
name=f"Reconcile certificates for: {', '.join(self.domains)}",
|
||||||
commands=[f"acmetool want --xlog.severity=debug {' '.join(self.domains)}"],
|
commands=["acmetool --batch --xlog.severity=debug reconcile"],
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ Description=acmetool HTTP redirector
|
|||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=notify
|
Type=notify
|
||||||
ExecStart=/usr/bin/acmetool redirector --service.uid=daemon
|
ExecStart=/usr/bin/acmetool redirector --service.uid=daemon --bind=127.0.0.1:402
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=30
|
RestartSec=30
|
||||||
|
|
||||||
|
|||||||
6
cmdeploy/src/cmdeploy/acmetool/desired.yaml.j2
Normal file
6
cmdeploy/src/cmdeploy/acmetool/desired.yaml.j2
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
satisfy:
|
||||||
|
names:
|
||||||
|
{%- for domain in domains %}
|
||||||
|
- {{ domain }}
|
||||||
|
{%- endfor %}
|
||||||
|
|
||||||
@@ -1,2 +1,2 @@
|
|||||||
"acme-enter-email": "{{ email }}"
|
"acme-enter-email": "{{ email }}"
|
||||||
"acme-agreement:https://letsencrypt.org/documents/LE-SA-v1.5-February-24-2025.pdf": true
|
"acme-agreement:https://letsencrypt.org/documents/LE-SA-v1.6-August-18-2025.pdf": true
|
||||||
|
|||||||
@@ -1,6 +1,65 @@
|
|||||||
|
import importlib.resources
|
||||||
|
import io
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from pyinfra.operations import server
|
from pyinfra.operations import files, server, systemd
|
||||||
|
|
||||||
|
|
||||||
|
def has_systemd():
|
||||||
|
"""Returns False during Docker image builds or any other non-systemd environment."""
|
||||||
|
return os.path.isdir("/run/systemd/system")
|
||||||
|
|
||||||
|
|
||||||
|
def get_resource(arg, pkg=__package__):
|
||||||
|
return importlib.resources.files(pkg).joinpath(arg)
|
||||||
|
|
||||||
|
|
||||||
|
def configure_remote_units(mail_domain, units) -> None:
|
||||||
|
remote_base_dir = "/usr/local/lib/chatmaild"
|
||||||
|
remote_venv_dir = f"{remote_base_dir}/venv"
|
||||||
|
remote_chatmail_inipath = f"{remote_base_dir}/chatmail.ini"
|
||||||
|
root_owned = dict(user="root", group="root", mode="644")
|
||||||
|
|
||||||
|
# install systemd units
|
||||||
|
for fn in units:
|
||||||
|
params = dict(
|
||||||
|
execpath=f"{remote_venv_dir}/bin/{fn}",
|
||||||
|
config_path=remote_chatmail_inipath,
|
||||||
|
remote_venv_dir=remote_venv_dir,
|
||||||
|
mail_domain=mail_domain,
|
||||||
|
)
|
||||||
|
|
||||||
|
basename = fn if "." in fn else f"{fn}.service"
|
||||||
|
|
||||||
|
source_path = get_resource(f"service/{basename}.f")
|
||||||
|
content = source_path.read_text().format(**params).encode()
|
||||||
|
|
||||||
|
files.put(
|
||||||
|
name=f"Upload {basename}",
|
||||||
|
src=io.BytesIO(content),
|
||||||
|
dest=f"/etc/systemd/system/{basename}",
|
||||||
|
**root_owned,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def activate_remote_units(units) -> None:
|
||||||
|
# activate systemd units
|
||||||
|
for fn in units:
|
||||||
|
basename = fn if "." in fn else f"{fn}.service"
|
||||||
|
|
||||||
|
if fn == "chatmail-expire" or fn == "chatmail-fsreport":
|
||||||
|
# don't auto-start but let the corresponding timer trigger execution
|
||||||
|
enabled = False
|
||||||
|
else:
|
||||||
|
enabled = True
|
||||||
|
systemd.service(
|
||||||
|
name=f"Setup {basename}",
|
||||||
|
service=basename,
|
||||||
|
running=enabled,
|
||||||
|
enabled=enabled,
|
||||||
|
restarted=enabled,
|
||||||
|
daemon_reload=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class Deployment:
|
class Deployment:
|
||||||
|
|||||||
@@ -1,30 +0,0 @@
|
|||||||
;
|
|
||||||
; Required DNS entries for chatmail servers
|
|
||||||
;
|
|
||||||
{% if A %}
|
|
||||||
{{ mail_domain }}. A {{ A }}
|
|
||||||
{% endif %}
|
|
||||||
{% if AAAA %}
|
|
||||||
{{ mail_domain }}. AAAA {{ AAAA }}
|
|
||||||
{% endif %}
|
|
||||||
{{ mail_domain }}. MX 10 {{ mail_domain }}.
|
|
||||||
_mta-sts.{{ mail_domain }}. TXT "v=STSv1; id={{ sts_id }}"
|
|
||||||
mta-sts.{{ mail_domain }}. CNAME {{ mail_domain }}.
|
|
||||||
www.{{ mail_domain }}. CNAME {{ mail_domain }}.
|
|
||||||
{{ dkim_entry }}
|
|
||||||
|
|
||||||
;
|
|
||||||
; Recommended DNS entries for interoperability and security-hardening
|
|
||||||
;
|
|
||||||
{{ mail_domain }}. TXT "v=spf1 a ~all"
|
|
||||||
_dmarc.{{ mail_domain }}. TXT "v=DMARC1;p=reject;adkim=s;aspf=s"
|
|
||||||
|
|
||||||
{% if acme_account_url %}
|
|
||||||
{{ mail_domain }}. CAA 0 issue "letsencrypt.org;accounturi={{ acme_account_url }}"
|
|
||||||
{% endif %}
|
|
||||||
_adsp._domainkey.{{ mail_domain }}. TXT "dkim=discardable"
|
|
||||||
|
|
||||||
_submission._tcp.{{ mail_domain }}. SRV 0 1 587 {{ mail_domain }}.
|
|
||||||
_submissions._tcp.{{ mail_domain }}. SRV 0 1 465 {{ mail_domain }}.
|
|
||||||
_imap._tcp.{{ mail_domain }}. SRV 0 1 143 {{ mail_domain }}.
|
|
||||||
_imaps._tcp.{{ mail_domain }}. SRV 0 1 993 {{ mail_domain }}.
|
|
||||||
@@ -5,12 +5,13 @@ along with command line option and subcommand parsing.
|
|||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import importlib.resources
|
import importlib.resources
|
||||||
import importlib.util
|
|
||||||
import os
|
import os
|
||||||
import pathlib
|
import pathlib
|
||||||
import shutil
|
import shutil
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
|
import time
|
||||||
|
from contextlib import contextmanager
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import pyinfra
|
import pyinfra
|
||||||
@@ -19,7 +20,24 @@ from packaging import version
|
|||||||
from termcolor import colored
|
from termcolor import colored
|
||||||
|
|
||||||
from . import dns, remote
|
from . import dns, remote
|
||||||
from .sshexec import LocalExec, SSHExec
|
from .lxc.cli import ( # noqa: F401
|
||||||
|
lxc_start_cmd,
|
||||||
|
lxc_start_cmd_options,
|
||||||
|
lxc_status_cmd,
|
||||||
|
lxc_status_cmd_options,
|
||||||
|
lxc_stop_cmd,
|
||||||
|
lxc_stop_cmd_options,
|
||||||
|
lxc_test_cmd,
|
||||||
|
lxc_test_cmd_options,
|
||||||
|
)
|
||||||
|
from .sshexec import (
|
||||||
|
LocalExec,
|
||||||
|
SSHExec,
|
||||||
|
resolve_host_from_ssh_config,
|
||||||
|
resolve_key_from_ssh_config,
|
||||||
|
)
|
||||||
|
from .util import build_chatmaild_sdist
|
||||||
|
from .www import main as webdev_main
|
||||||
|
|
||||||
#
|
#
|
||||||
# cmdeploy sub commands and options
|
# cmdeploy sub commands and options
|
||||||
@@ -71,6 +89,11 @@ def run_cmd_options(parser):
|
|||||||
action="store_true",
|
action="store_true",
|
||||||
help="install/upgrade the server, but disable postfix & dovecot for now",
|
help="install/upgrade the server, but disable postfix & dovecot for now",
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--website-only",
|
||||||
|
action="store_true",
|
||||||
|
help="only update/deploy the website, skipping full server upgrade/deployment, useful when you only changed/updated the web pages and don't need to re-run a full server upgrade",
|
||||||
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--skip-dns-check",
|
"--skip-dns-check",
|
||||||
dest="dns_check_disabled",
|
dest="dns_check_disabled",
|
||||||
@@ -78,28 +101,54 @@ def run_cmd_options(parser):
|
|||||||
help="disable checks nslookup for dns",
|
help="disable checks nslookup for dns",
|
||||||
)
|
)
|
||||||
add_ssh_host_option(parser)
|
add_ssh_host_option(parser)
|
||||||
|
add_ssh_config_option(parser)
|
||||||
|
|
||||||
|
|
||||||
def run_cmd(args, out):
|
def run_cmd(args, out):
|
||||||
"""Deploy chatmail services on the remote server."""
|
"""Deploy chatmail services on the remote server."""
|
||||||
|
|
||||||
ssh_host = args.ssh_host if args.ssh_host else args.config.mail_domain
|
ssh_host = args.ssh_host if args.ssh_host else args.config.mail_domain
|
||||||
sshexec = get_sshexec(ssh_host)
|
sshexec = get_sshexec(ssh_host, ssh_config=args.ssh_config)
|
||||||
require_iroh = args.config.enable_iroh_relay
|
require_iroh = args.config.enable_iroh_relay
|
||||||
|
strict_tls = args.config.tls_cert_mode == "acme"
|
||||||
if not args.dns_check_disabled:
|
if not args.dns_check_disabled:
|
||||||
remote_data = dns.get_initial_remote_data(sshexec, args.config.mail_domain)
|
remote_data = dns.get_initial_remote_data(sshexec, args.config.mail_domain)
|
||||||
if not dns.check_initial_remote_data(remote_data, print=out.red):
|
if not dns.check_initial_remote_data(
|
||||||
|
remote_data, strict_tls=strict_tls, print=out.red
|
||||||
|
):
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
env = os.environ.copy()
|
env = os.environ.copy()
|
||||||
env["CHATMAIL_INI"] = args.inipath
|
env["CHATMAIL_INI"] = args.inipath
|
||||||
|
env["CHATMAIL_WEBSITE_ONLY"] = "True" if args.website_only else ""
|
||||||
env["CHATMAIL_DISABLE_MAIL"] = "True" if args.disable_mail else ""
|
env["CHATMAIL_DISABLE_MAIL"] = "True" if args.disable_mail else ""
|
||||||
env["CHATMAIL_REQUIRE_IROH"] = "True" if require_iroh else ""
|
env["CHATMAIL_REQUIRE_IROH"] = "True" if require_iroh else ""
|
||||||
|
|
||||||
|
if not args.website_only:
|
||||||
|
build_chatmaild_sdist()
|
||||||
|
if not args.dns_check_disabled:
|
||||||
|
env["CHATMAIL_ADDR_V4"] = remote_data.get("A") or ""
|
||||||
|
env["CHATMAIL_ADDR_V6"] = remote_data.get("AAAA") or ""
|
||||||
deploy_path = importlib.resources.files(__package__).joinpath("run.py").resolve()
|
deploy_path = importlib.resources.files(__package__).joinpath("run.py").resolve()
|
||||||
pyinf = "pyinfra --dry" if args.dry_run else "pyinfra"
|
pyinf = "pyinfra --dry" if args.dry_run else "pyinfra"
|
||||||
|
|
||||||
cmd = f"{pyinf} --ssh-user root {ssh_host} {deploy_path} -y"
|
cmd = f"{pyinf} --ssh-user root {ssh_host} {deploy_path} -y"
|
||||||
|
ssh_config = args.ssh_config
|
||||||
|
if ssh_config:
|
||||||
|
ssh_config = str(Path(ssh_config).resolve())
|
||||||
|
|
||||||
|
# Use pyinfra's native SSH data keys to configure the connection directly
|
||||||
|
# rather than relying on paramiko config parsing (see also sshexec.py)
|
||||||
|
ip = resolve_host_from_ssh_config(ssh_host, ssh_config)
|
||||||
|
key = resolve_key_from_ssh_config(ssh_host, ssh_config)
|
||||||
|
data_args = f"--data ssh_hostname={ip} --data ssh_known_hosts_file=/dev/null"
|
||||||
|
if key:
|
||||||
|
data_args += f" --data ssh_key={key}"
|
||||||
|
cmd = f"{pyinf} --ssh-user root {ssh_host} {deploy_path} -y {data_args}"
|
||||||
if ssh_host in ["localhost", "@docker"]:
|
if ssh_host in ["localhost", "@docker"]:
|
||||||
|
if ssh_host == "@docker":
|
||||||
|
env["CHATMAIL_NOPORTCHECK"] = "True"
|
||||||
|
env["CHATMAIL_NOSYSCTL"] = "True"
|
||||||
cmd = f"{pyinf} @local {deploy_path} -y"
|
cmd = f"{pyinf} @local {deploy_path} -y"
|
||||||
|
|
||||||
if version.parse(pyinfra.__version__) < version.parse("3"):
|
if version.parse(pyinfra.__version__) < version.parse("3"):
|
||||||
@@ -107,28 +156,22 @@ def run_cmd(args, out):
|
|||||||
return 1
|
return 1
|
||||||
|
|
||||||
try:
|
try:
|
||||||
retcode = out.check_call(cmd, env=env)
|
out.check_call(cmd, env=env)
|
||||||
if retcode == 0:
|
if args.website_only:
|
||||||
if not args.disable_mail:
|
out.green("Website deployment completed.")
|
||||||
print("\nYou can try out the relay by talking to this echo bot: ")
|
elif (
|
||||||
sshexec = SSHExec(args.config.mail_domain, verbose=args.verbose)
|
not args.dns_check_disabled
|
||||||
print(
|
and strict_tls
|
||||||
sshexec(
|
and not remote_data["acme_account_url"]
|
||||||
call=remote.rshell.shell,
|
):
|
||||||
kwargs=dict(command="cat /var/lib/echobot/invite-link.txt"),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
out.green("Deploy completed, call `cmdeploy dns` next.")
|
|
||||||
elif not remote_data["acme_account_url"]:
|
|
||||||
out.red("Deploy completed but letsencrypt not configured")
|
out.red("Deploy completed but letsencrypt not configured")
|
||||||
out.red("Run 'cmdeploy run' again")
|
out.red("Run 'cmdeploy run' again")
|
||||||
retcode = 0
|
|
||||||
else:
|
else:
|
||||||
out.red("Deploy failed")
|
out.green("Deploy completed, call `cmdeploy dns` next.")
|
||||||
|
return 0
|
||||||
except subprocess.CalledProcessError:
|
except subprocess.CalledProcessError:
|
||||||
out.red("Deploy failed")
|
out.red("Deploy failed")
|
||||||
retcode = 1
|
return 1
|
||||||
return retcode
|
|
||||||
|
|
||||||
|
|
||||||
def dns_cmd_options(parser):
|
def dns_cmd_options(parser):
|
||||||
@@ -137,20 +180,23 @@ def dns_cmd_options(parser):
|
|||||||
dest="zonefile",
|
dest="zonefile",
|
||||||
type=pathlib.Path,
|
type=pathlib.Path,
|
||||||
default=None,
|
default=None,
|
||||||
help="write out a zonefile",
|
help="write DNS records in standard BIND format to the given file",
|
||||||
)
|
)
|
||||||
add_ssh_host_option(parser)
|
add_ssh_host_option(parser)
|
||||||
|
add_ssh_config_option(parser)
|
||||||
|
|
||||||
|
|
||||||
def dns_cmd(args, out):
|
def dns_cmd(args, out):
|
||||||
"""Check DNS entries and optionally generate dns zone file."""
|
"""Check DNS entries and optionally generate dns zone file."""
|
||||||
ssh_host = args.ssh_host if args.ssh_host else args.config.mail_domain
|
ssh_host = args.ssh_host if args.ssh_host else args.config.mail_domain
|
||||||
sshexec = get_sshexec(ssh_host, verbose=args.verbose)
|
sshexec = get_sshexec(ssh_host, verbose=args.verbose, ssh_config=args.ssh_config)
|
||||||
|
tls_cert_mode = args.config.tls_cert_mode
|
||||||
|
strict_tls = tls_cert_mode == "acme"
|
||||||
remote_data = dns.get_initial_remote_data(sshexec, args.config.mail_domain)
|
remote_data = dns.get_initial_remote_data(sshexec, args.config.mail_domain)
|
||||||
if not remote_data:
|
if not dns.check_initial_remote_data(remote_data, strict_tls=strict_tls):
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
if not remote_data["acme_account_url"]:
|
if strict_tls and not remote_data["acme_account_url"]:
|
||||||
out.red("could not get letsencrypt account url, please run 'cmdeploy run'")
|
out.red("could not get letsencrypt account url, please run 'cmdeploy run'")
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
@@ -158,6 +204,7 @@ def dns_cmd(args, out):
|
|||||||
out.red("could not determine dkim_entry, please run 'cmdeploy run'")
|
out.red("could not determine dkim_entry, please run 'cmdeploy run'")
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
|
remote_data["strict_tls"] = strict_tls
|
||||||
zonefile = dns.get_filled_zone_file(remote_data)
|
zonefile = dns.get_filled_zone_file(remote_data)
|
||||||
|
|
||||||
if args.zonefile:
|
if args.zonefile:
|
||||||
@@ -173,13 +220,14 @@ def dns_cmd(args, out):
|
|||||||
|
|
||||||
def status_cmd_options(parser):
|
def status_cmd_options(parser):
|
||||||
add_ssh_host_option(parser)
|
add_ssh_host_option(parser)
|
||||||
|
add_ssh_config_option(parser)
|
||||||
|
|
||||||
|
|
||||||
def status_cmd(args, out):
|
def status_cmd(args, out):
|
||||||
"""Display status for online chatmail instance."""
|
"""Display status for online chatmail instance."""
|
||||||
|
|
||||||
ssh_host = args.ssh_host if args.ssh_host else args.config.mail_domain
|
ssh_host = args.ssh_host if args.ssh_host else args.config.mail_domain
|
||||||
sshexec = get_sshexec(ssh_host, verbose=args.verbose)
|
sshexec = get_sshexec(ssh_host, verbose=args.verbose, ssh_config=args.ssh_config)
|
||||||
|
|
||||||
out.green(f"chatmail domain: {args.config.mail_domain}")
|
out.green(f"chatmail domain: {args.config.mail_domain}")
|
||||||
if args.config.privacy_mail:
|
if args.config.privacy_mail:
|
||||||
@@ -198,17 +246,15 @@ def test_cmd_options(parser):
|
|||||||
action="store_true",
|
action="store_true",
|
||||||
help="also run slow tests",
|
help="also run slow tests",
|
||||||
)
|
)
|
||||||
|
add_ssh_host_option(parser)
|
||||||
|
add_ssh_config_option(parser)
|
||||||
|
|
||||||
|
|
||||||
def test_cmd(args, out):
|
def test_cmd(args, out):
|
||||||
"""Run local and online tests for chatmail deployment.
|
"""Run local and online tests for chatmail deployment."""
|
||||||
|
|
||||||
This will automatically pip-install 'deltachat' if it's not available.
|
env = os.environ.copy()
|
||||||
"""
|
env["CHATMAIL_INI"] = str(args.inipath.resolve())
|
||||||
|
|
||||||
x = importlib.util.find_spec("deltachat")
|
|
||||||
if x is None:
|
|
||||||
out.check_call(f"{sys.executable} -m pip install deltachat")
|
|
||||||
|
|
||||||
pytest_path = shutil.which("pytest")
|
pytest_path = shutil.which("pytest")
|
||||||
pytest_args = [
|
pytest_args = [
|
||||||
@@ -222,7 +268,11 @@ def test_cmd(args, out):
|
|||||||
]
|
]
|
||||||
if args.slow:
|
if args.slow:
|
||||||
pytest_args.append("--slow")
|
pytest_args.append("--slow")
|
||||||
ret = out.run_ret(pytest_args)
|
if args.ssh_host:
|
||||||
|
pytest_args.extend(["--ssh-host", args.ssh_host])
|
||||||
|
if args.ssh_config:
|
||||||
|
pytest_args.extend(["--ssh-config", str(Path(args.ssh_config).resolve())])
|
||||||
|
ret = out.run_ret(pytest_args, env=env)
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
||||||
@@ -273,9 +323,7 @@ def bench_cmd(args, out):
|
|||||||
|
|
||||||
def webdev_cmd(args, out):
|
def webdev_cmd(args, out):
|
||||||
"""Run local web development loop for static web pages."""
|
"""Run local web development loop for static web pages."""
|
||||||
from .www import main
|
webdev_main()
|
||||||
|
|
||||||
main()
|
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
@@ -284,17 +332,44 @@ def webdev_cmd(args, out):
|
|||||||
|
|
||||||
|
|
||||||
class Out:
|
class Out:
|
||||||
"""Convenience output printer providing coloring."""
|
"""Convenience output printer providing coloring and section formatting."""
|
||||||
|
|
||||||
|
SECTION_WIDTH = 72
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.section_timings = []
|
||||||
|
|
||||||
def red(self, msg, file=sys.stderr):
|
def red(self, msg, file=sys.stderr):
|
||||||
print(colored(msg, "red"), file=file)
|
print(colored(msg, "red"), file=file, flush=True)
|
||||||
|
|
||||||
def green(self, msg, file=sys.stderr):
|
def green(self, msg, file=sys.stderr):
|
||||||
print(colored(msg, "green"), file=file)
|
print(colored(msg, "green"), file=file, flush=True)
|
||||||
|
|
||||||
|
def print(self, msg="", **kwargs):
|
||||||
|
"""Print to stdout with automatic flush."""
|
||||||
|
print(msg, flush=True, **kwargs)
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def section(self, title):
|
||||||
|
"""Context manager that prints a section header and records elapsed time."""
|
||||||
|
bar = "\u2501" * (self.SECTION_WIDTH - len(title) - 5)
|
||||||
|
self.green(f"\u2501\u2501\u2501 {title} {bar}")
|
||||||
|
t0 = time.time()
|
||||||
|
yield
|
||||||
|
elapsed = time.time() - t0
|
||||||
|
self.section_timings.append((title, elapsed))
|
||||||
|
self.print(f"{'':>{self.SECTION_WIDTH - 10}}({elapsed:.1f}s)")
|
||||||
|
self.print()
|
||||||
|
|
||||||
|
def section_line(self, title):
|
||||||
|
"""Print a section header without timing."""
|
||||||
|
bar = "\u2501" * (self.SECTION_WIDTH - len(title) - 5)
|
||||||
|
self.green(f"\u2501\u2501\u2501 {title} {bar}")
|
||||||
|
self.print()
|
||||||
|
|
||||||
def __call__(self, msg, red=False, green=False, file=sys.stdout):
|
def __call__(self, msg, red=False, green=False, file=sys.stdout):
|
||||||
color = "red" if red else ("green" if green else None)
|
color = "red" if red else ("green" if green else None)
|
||||||
print(colored(msg, color), file=file)
|
print(colored(msg, color), file=file, flush=True)
|
||||||
|
|
||||||
def check_call(self, arg, env=None, quiet=False):
|
def check_call(self, arg, env=None, quiet=False):
|
||||||
if not quiet:
|
if not quiet:
|
||||||
@@ -318,15 +393,26 @@ def add_ssh_host_option(parser):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def add_ssh_config_option(parser):
|
||||||
|
parser.add_argument(
|
||||||
|
"--ssh-config",
|
||||||
|
dest="ssh_config",
|
||||||
|
type=Path,
|
||||||
|
default=None,
|
||||||
|
help="Path to an SSH config file (e.g. lxconfigs/ssh-config).",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def add_config_option(parser):
|
def add_config_option(parser):
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--config",
|
"--config",
|
||||||
dest="inipath",
|
dest="inipath",
|
||||||
action="store",
|
action="store",
|
||||||
default=Path("chatmail.ini"),
|
default=Path(os.environ.get("CHATMAIL_INI", "chatmail.ini")),
|
||||||
type=Path,
|
type=Path,
|
||||||
help="path to the chatmail.ini file",
|
help="path to the chatmail.ini file",
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--verbose",
|
"--verbose",
|
||||||
"-v",
|
"-v",
|
||||||
@@ -337,15 +423,16 @@ def add_config_option(parser):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def add_subcommand(subparsers, func):
|
def add_subcommand(subparsers, func, add_config=True):
|
||||||
name = func.__name__
|
name = func.__name__
|
||||||
assert name.endswith("_cmd")
|
assert name.endswith("_cmd")
|
||||||
name = name[:-4]
|
name = name[:-4].replace("_", "-")
|
||||||
doc = func.__doc__.strip()
|
doc = func.__doc__.strip()
|
||||||
help = doc.split("\n")[0].strip(".")
|
help = doc.split("\n")[0].strip(".")
|
||||||
p = subparsers.add_parser(name, description=doc, help=help)
|
p = subparsers.add_parser(name, description=doc, help=help)
|
||||||
p.set_defaults(func=func)
|
p.set_defaults(func=func)
|
||||||
add_config_option(p)
|
if add_config:
|
||||||
|
add_config_option(p)
|
||||||
return p
|
return p
|
||||||
|
|
||||||
|
|
||||||
@@ -359,13 +446,15 @@ def get_parser():
|
|||||||
"""Return an ArgumentParser for the 'cmdeploy' CLI"""
|
"""Return an ArgumentParser for the 'cmdeploy' CLI"""
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description=description.strip())
|
parser = argparse.ArgumentParser(description=description.strip())
|
||||||
|
parser.set_defaults(func=None, inipath=None)
|
||||||
subparsers = parser.add_subparsers(title="subcommands")
|
subparsers = parser.add_subparsers(title="subcommands")
|
||||||
|
|
||||||
# find all subcommands in the module namespace
|
# find all subcommands in the module namespace
|
||||||
glob = globals()
|
glob = globals()
|
||||||
for name, func in glob.items():
|
for name, func in glob.items():
|
||||||
if name.endswith("_cmd"):
|
if name.endswith("_cmd"):
|
||||||
subparser = add_subcommand(subparsers, func)
|
needs_config = not name.startswith("lxc_")
|
||||||
|
subparser = add_subcommand(subparsers, func, add_config=needs_config)
|
||||||
addopts = glob.get(name + "_options")
|
addopts = glob.get(name + "_options")
|
||||||
if addopts is not None:
|
if addopts is not None:
|
||||||
addopts(subparser)
|
addopts(subparser)
|
||||||
@@ -373,26 +462,27 @@ def get_parser():
|
|||||||
return parser
|
return parser
|
||||||
|
|
||||||
|
|
||||||
def get_sshexec(ssh_host: str, verbose=True):
|
def get_sshexec(ssh_host: str, verbose=True, ssh_config=None):
|
||||||
if ssh_host in ["localhost", "@local"]:
|
if ssh_host in ["localhost", "@local"]:
|
||||||
return LocalExec(verbose, docker=False)
|
return LocalExec(verbose, docker=False)
|
||||||
elif ssh_host == "@docker":
|
elif ssh_host == "@docker":
|
||||||
return LocalExec(verbose, docker=True)
|
return LocalExec(verbose, docker=True)
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f"[ssh] login to {ssh_host}")
|
print(f"[ssh] login to {ssh_host}")
|
||||||
return SSHExec(ssh_host, verbose=verbose)
|
return SSHExec(ssh_host, verbose=verbose, ssh_config=ssh_config)
|
||||||
|
|
||||||
|
|
||||||
def main(args=None):
|
def main(args=None):
|
||||||
"""Provide main entry point for 'cmdeploy' CLI invocation."""
|
"""Provide main entry point for 'cmdeploy' CLI invocation."""
|
||||||
parser = get_parser()
|
parser = get_parser()
|
||||||
args = parser.parse_args(args=args)
|
args = parser.parse_args(args=args)
|
||||||
if not hasattr(args, "func"):
|
if args.func is None:
|
||||||
return parser.parse_args(["-h"])
|
return parser.parse_args(["-h"])
|
||||||
|
|
||||||
out = Out()
|
out = Out()
|
||||||
kwargs = {}
|
kwargs = {}
|
||||||
if args.func.__name__ not in ("init_cmd", "fmt_cmd"):
|
|
||||||
|
if args.inipath is not None and args.func.__name__ not in ("init_cmd", "fmt_cmd"):
|
||||||
if not args.inipath.exists():
|
if not args.inipath.exists():
|
||||||
out.red(f"expecting {args.inipath} to exist, run init first?")
|
out.red(f"expecting {args.inipath} to exist, run init first?")
|
||||||
raise SystemExit(1)
|
raise SystemExit(1)
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,25 +1,40 @@
|
|||||||
import datetime
|
import datetime
|
||||||
import importlib
|
|
||||||
|
|
||||||
from jinja2 import Template
|
|
||||||
|
|
||||||
from . import remote
|
from . import remote
|
||||||
|
|
||||||
|
|
||||||
|
def parse_zone_records(text):
|
||||||
|
"""Yield ``(name, ttl, rtype, rdata)`` from standard BIND-format text.
|
||||||
|
|
||||||
|
Skips comment lines (starting with ``;``) and blank lines.
|
||||||
|
Each record line must have the format ``name TTL IN type rdata``.
|
||||||
|
"""
|
||||||
|
for raw_line in text.splitlines():
|
||||||
|
line = raw_line.strip()
|
||||||
|
if not line or line.startswith(";"):
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
name, ttl, _in, rtype, rdata = line.split(None, 4)
|
||||||
|
except ValueError:
|
||||||
|
raise ValueError(f"Bad zone record line: {line!r}") from None
|
||||||
|
name = name.rstrip(".")
|
||||||
|
yield name, ttl, rtype.upper(), rdata
|
||||||
|
|
||||||
|
|
||||||
def get_initial_remote_data(sshexec, mail_domain):
|
def get_initial_remote_data(sshexec, mail_domain):
|
||||||
return sshexec.logged(
|
return sshexec.logged(
|
||||||
call=remote.rdns.perform_initial_checks, kwargs=dict(mail_domain=mail_domain)
|
call=remote.rdns.perform_initial_checks, kwargs=dict(mail_domain=mail_domain)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def check_initial_remote_data(remote_data, *, print=print):
|
def check_initial_remote_data(remote_data, *, strict_tls=True, print=print):
|
||||||
mail_domain = remote_data["mail_domain"]
|
mail_domain = remote_data["mail_domain"]
|
||||||
if not remote_data["A"] and not remote_data["AAAA"]:
|
if not remote_data["A"] and not remote_data["AAAA"]:
|
||||||
print(f"Missing A and/or AAAA DNS records for {mail_domain}!")
|
print(f"Missing A and/or AAAA DNS records for {mail_domain}!")
|
||||||
elif remote_data["MTA_STS"] != f"{mail_domain}.":
|
elif strict_tls and remote_data["MTA_STS"] != f"{mail_domain}.":
|
||||||
print("Missing MTA-STS CNAME record:")
|
print("Missing MTA-STS CNAME record:")
|
||||||
print(f"mta-sts.{mail_domain}. CNAME {mail_domain}.")
|
print(f"mta-sts.{mail_domain}. CNAME {mail_domain}.")
|
||||||
elif remote_data["WWW"] != f"{mail_domain}.":
|
elif strict_tls and remote_data["WWW"] != f"{mail_domain}.":
|
||||||
print("Missing www CNAME record:")
|
print("Missing www CNAME record:")
|
||||||
print(f"www.{mail_domain}. CNAME {mail_domain}.")
|
print(f"www.{mail_domain}. CNAME {mail_domain}.")
|
||||||
else:
|
else:
|
||||||
@@ -31,13 +46,36 @@ def get_filled_zone_file(remote_data):
|
|||||||
if not sts_id:
|
if not sts_id:
|
||||||
remote_data["sts_id"] = datetime.datetime.now().strftime("%Y%m%d%H%M")
|
remote_data["sts_id"] = datetime.datetime.now().strftime("%Y%m%d%H%M")
|
||||||
|
|
||||||
template = importlib.resources.files(__package__).joinpath("chatmail.zone.j2")
|
d = remote_data["mail_domain"]
|
||||||
content = template.read_text()
|
lines = ["; Required DNS entries"]
|
||||||
zonefile = Template(content).render(**remote_data)
|
if remote_data.get("A"):
|
||||||
lines = [x.strip() for x in zonefile.split("\n") if x.strip()]
|
lines.append(f"{d}. 3600 IN A {remote_data['A']}")
|
||||||
|
if remote_data.get("AAAA"):
|
||||||
|
lines.append(f"{d}. 3600 IN AAAA {remote_data['AAAA']}")
|
||||||
|
lines.append(f"{d}. 3600 IN MX 10 {d}.")
|
||||||
|
if remote_data.get("strict_tls"):
|
||||||
|
lines.append(
|
||||||
|
f'_mta-sts.{d}. 3600 IN TXT "v=STSv1; id={remote_data["sts_id"]}"'
|
||||||
|
)
|
||||||
|
lines.append(f"mta-sts.{d}. 3600 IN CNAME {d}.")
|
||||||
|
lines.append(f"www.{d}. 3600 IN CNAME {d}.")
|
||||||
|
lines.append(remote_data["dkim_entry"])
|
||||||
lines.append("")
|
lines.append("")
|
||||||
zonefile = "\n".join(lines)
|
lines.append("; Recommended DNS entries")
|
||||||
return zonefile
|
lines.append(f'{d}. 3600 IN TXT "v=spf1 a ~all"')
|
||||||
|
lines.append(f'_dmarc.{d}. 3600 IN TXT "v=DMARC1;p=reject;adkim=s;aspf=s"')
|
||||||
|
if remote_data.get("acme_account_url"):
|
||||||
|
lines.append(
|
||||||
|
f"{d}. 3600 IN CAA 0 issue"
|
||||||
|
f' "letsencrypt.org;accounturi={remote_data["acme_account_url"]}"'
|
||||||
|
)
|
||||||
|
lines.append(f'_adsp._domainkey.{d}. 3600 IN TXT "dkim=discardable"')
|
||||||
|
lines.append(f"_submission._tcp.{d}. 3600 IN SRV 0 1 587 {d}.")
|
||||||
|
lines.append(f"_submissions._tcp.{d}. 3600 IN SRV 0 1 465 {d}.")
|
||||||
|
lines.append(f"_imap._tcp.{d}. 3600 IN SRV 0 1 143 {d}.")
|
||||||
|
lines.append(f"_imaps._tcp.{d}. 3600 IN SRV 0 1 993 {d}.")
|
||||||
|
lines.append("")
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
|
||||||
def check_full_zone(sshexec, remote_data, out, zonefile) -> int:
|
def check_full_zone(sshexec, remote_data, out, zonefile) -> int:
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ iterate_prefix = userdb/
|
|||||||
|
|
||||||
default_pass_scheme = plain
|
default_pass_scheme = plain
|
||||||
# %E escapes characters " (double quote), ' (single quote) and \ (backslash) with \ (backslash).
|
# %E escapes characters " (double quote), ' (single quote) and \ (backslash) with \ (backslash).
|
||||||
# See <https://doc.dovecot.org/configuration_manual/config_file/config_variables/#modifiers>
|
# See <https://doc.dovecot.org/2.3/configuration_manual/config_file/config_variables/#modifiers>
|
||||||
# for documentation.
|
# for documentation.
|
||||||
#
|
#
|
||||||
# We escape user-provided input and use double quote as a separator.
|
# We escape user-provided input and use double quote as a separator.
|
||||||
|
|||||||
172
cmdeploy/src/cmdeploy/dovecot/deployer.py
Normal file
172
cmdeploy/src/cmdeploy/dovecot/deployer.py
Normal file
@@ -0,0 +1,172 @@
|
|||||||
|
import os
|
||||||
|
import urllib.request
|
||||||
|
|
||||||
|
from chatmaild.config import Config
|
||||||
|
from pyinfra import host
|
||||||
|
from pyinfra.facts.server import Arch, Sysctl
|
||||||
|
from pyinfra.facts.systemd import SystemdEnabled
|
||||||
|
from pyinfra.operations import apt, files, server, systemd
|
||||||
|
|
||||||
|
from cmdeploy.basedeploy import (
|
||||||
|
Deployer,
|
||||||
|
activate_remote_units,
|
||||||
|
configure_remote_units,
|
||||||
|
get_resource,
|
||||||
|
has_systemd,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class DovecotDeployer(Deployer):
|
||||||
|
daemon_reload = False
|
||||||
|
|
||||||
|
def __init__(self, config, disable_mail):
|
||||||
|
self.config = config
|
||||||
|
self.disable_mail = disable_mail
|
||||||
|
self.units = ["doveauth"]
|
||||||
|
|
||||||
|
def install(self):
|
||||||
|
arch = host.get_fact(Arch)
|
||||||
|
if has_systemd() and "dovecot.service" in host.get_fact(SystemdEnabled):
|
||||||
|
return # already installed and running
|
||||||
|
_install_dovecot_package("core", arch)
|
||||||
|
_install_dovecot_package("imapd", arch)
|
||||||
|
_install_dovecot_package("lmtpd", arch)
|
||||||
|
|
||||||
|
def configure(self):
|
||||||
|
configure_remote_units(self.config.mail_domain, self.units)
|
||||||
|
self.need_restart, self.daemon_reload = _configure_dovecot(self.config)
|
||||||
|
|
||||||
|
def activate(self):
|
||||||
|
activate_remote_units(self.units)
|
||||||
|
|
||||||
|
restart = False if self.disable_mail else self.need_restart
|
||||||
|
|
||||||
|
systemd.service(
|
||||||
|
name="Disable dovecot for now"
|
||||||
|
if self.disable_mail
|
||||||
|
else "Start and enable Dovecot",
|
||||||
|
service="dovecot.service",
|
||||||
|
running=False if self.disable_mail else True,
|
||||||
|
enabled=False if self.disable_mail else True,
|
||||||
|
restarted=restart,
|
||||||
|
daemon_reload=self.daemon_reload,
|
||||||
|
)
|
||||||
|
self.need_restart = False
|
||||||
|
|
||||||
|
|
||||||
|
def _pick_url(primary, fallback):
|
||||||
|
try:
|
||||||
|
req = urllib.request.Request(primary, method="HEAD")
|
||||||
|
urllib.request.urlopen(req, timeout=10)
|
||||||
|
return primary
|
||||||
|
except Exception:
|
||||||
|
return fallback
|
||||||
|
|
||||||
|
|
||||||
|
def _install_dovecot_package(package: str, arch: str):
|
||||||
|
arch = "amd64" if arch == "x86_64" else arch
|
||||||
|
arch = "arm64" if arch == "aarch64" else arch
|
||||||
|
primary_url = f"https://download.delta.chat/dovecot/dovecot-{package}_2.3.21%2Bdfsg1-3_{arch}.deb"
|
||||||
|
fallback_url = f"https://github.com/chatmail/dovecot/releases/download/upstream%2F2.3.21%2Bdfsg1/dovecot-{package}_2.3.21%2Bdfsg1-3_{arch}.deb"
|
||||||
|
url = _pick_url(primary_url, fallback_url)
|
||||||
|
deb_filename = "/root/" + url.split("/")[-1]
|
||||||
|
|
||||||
|
match (package, arch):
|
||||||
|
case ("core", "amd64"):
|
||||||
|
sha256 = "dd060706f52a306fa863d874717210b9fe10536c824afe1790eec247ded5b27d"
|
||||||
|
case ("core", "arm64"):
|
||||||
|
sha256 = "e7548e8a82929722e973629ecc40fcfa886894cef3db88f23535149e7f730dc9"
|
||||||
|
case ("imapd", "amd64"):
|
||||||
|
sha256 = "8d8dc6fc00bbb6cdb25d345844f41ce2f1c53f764b79a838eb2a03103eebfa86"
|
||||||
|
case ("imapd", "arm64"):
|
||||||
|
sha256 = "178fa877ddd5df9930e8308b518f4b07df10e759050725f8217a0c1fb3fd707f"
|
||||||
|
case ("lmtpd", "amd64"):
|
||||||
|
sha256 = "2f69ba5e35363de50962d42cccbfe4ed8495265044e244007d7ccddad77513ab"
|
||||||
|
case ("lmtpd", "arm64"):
|
||||||
|
sha256 = "89f52fb36524f5877a177dff4a713ba771fd3f91f22ed0af7238d495e143b38f"
|
||||||
|
case _:
|
||||||
|
apt.packages(packages=[f"dovecot-{package}"])
|
||||||
|
return
|
||||||
|
|
||||||
|
files.download(
|
||||||
|
name=f"Download dovecot-{package}",
|
||||||
|
src=url,
|
||||||
|
dest=deb_filename,
|
||||||
|
sha256sum=sha256,
|
||||||
|
cache_time=60 * 60 * 24 * 365 * 10, # never redownload the package
|
||||||
|
)
|
||||||
|
|
||||||
|
apt.deb(name=f"Install dovecot-{package}", src=deb_filename)
|
||||||
|
|
||||||
|
|
||||||
|
def _configure_dovecot(config: Config, debug: bool = False) -> (bool, bool):
|
||||||
|
"""Configures Dovecot IMAP server."""
|
||||||
|
need_restart = False
|
||||||
|
daemon_reload = False
|
||||||
|
|
||||||
|
main_config = files.template(
|
||||||
|
src=get_resource("dovecot/dovecot.conf.j2"),
|
||||||
|
dest="/etc/dovecot/dovecot.conf",
|
||||||
|
user="root",
|
||||||
|
group="root",
|
||||||
|
mode="644",
|
||||||
|
config=config,
|
||||||
|
debug=debug,
|
||||||
|
disable_ipv6=config.disable_ipv6,
|
||||||
|
)
|
||||||
|
need_restart |= main_config.changed
|
||||||
|
auth_config = files.put(
|
||||||
|
src=get_resource("dovecot/auth.conf"),
|
||||||
|
dest="/etc/dovecot/auth.conf",
|
||||||
|
user="root",
|
||||||
|
group="root",
|
||||||
|
mode="644",
|
||||||
|
)
|
||||||
|
need_restart |= auth_config.changed
|
||||||
|
lua_push_notification_script = files.put(
|
||||||
|
src=get_resource("dovecot/push_notification.lua"),
|
||||||
|
dest="/etc/dovecot/push_notification.lua",
|
||||||
|
user="root",
|
||||||
|
group="root",
|
||||||
|
mode="644",
|
||||||
|
)
|
||||||
|
need_restart |= lua_push_notification_script.changed
|
||||||
|
|
||||||
|
# as per https://doc.dovecot.org/2.3/configuration_manual/os/
|
||||||
|
# it is recommended to set the following inotify limits
|
||||||
|
if not os.environ.get("CHATMAIL_NOSYSCTL"):
|
||||||
|
for name in ("max_user_instances", "max_user_watches"):
|
||||||
|
key = f"fs.inotify.{name}"
|
||||||
|
if host.get_fact(Sysctl)[key] > 65535:
|
||||||
|
# Skip updating limits if already sufficient
|
||||||
|
# (enables running in incus containers where sysctl readonly)
|
||||||
|
continue
|
||||||
|
server.sysctl(
|
||||||
|
name=f"Change {key}",
|
||||||
|
key=key,
|
||||||
|
value=65535,
|
||||||
|
persist=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
timezone_env = files.line(
|
||||||
|
name="Set TZ environment variable",
|
||||||
|
path="/etc/environment",
|
||||||
|
line="TZ=:/etc/localtime",
|
||||||
|
)
|
||||||
|
need_restart |= timezone_env.changed
|
||||||
|
|
||||||
|
restart_conf = files.put(
|
||||||
|
name="dovecot: restart automatically on failure",
|
||||||
|
src=get_resource("service/10_restart.conf"),
|
||||||
|
dest="/etc/systemd/system/dovecot.service.d/10_restart.conf",
|
||||||
|
)
|
||||||
|
daemon_reload |= restart_conf.changed
|
||||||
|
|
||||||
|
# Validate dovecot configuration before restart
|
||||||
|
if need_restart:
|
||||||
|
server.shell(
|
||||||
|
name="Validate dovecot configuration",
|
||||||
|
commands=["doveconf -n >/dev/null"],
|
||||||
|
)
|
||||||
|
|
||||||
|
return need_restart, daemon_reload
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
## Dovecot configuration file
|
## Dovecot configuration file
|
||||||
|
|
||||||
{% if disable_ipv6 %}
|
{% if disable_ipv6 %}
|
||||||
listen = *
|
listen = 0.0.0.0
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
protocols = imap lmtp
|
protocols = imap lmtp
|
||||||
@@ -26,7 +26,7 @@ default_client_limit = 20000
|
|||||||
# Increase number of logged in IMAP connections.
|
# Increase number of logged in IMAP connections.
|
||||||
# Each connection is handled by a separate `imap` process.
|
# Each connection is handled by a separate `imap` process.
|
||||||
# `imap` process should have `client_limit=1` as described in
|
# `imap` process should have `client_limit=1` as described in
|
||||||
# <https://doc.dovecot.org/configuration_manual/service_configuration/#service-limits>
|
# <https://doc.dovecot.org/2.3/configuration_manual/service_configuration/#service-limits>
|
||||||
# so each logged in IMAP session will need its own `imap` process.
|
# so each logged in IMAP session will need its own `imap` process.
|
||||||
#
|
#
|
||||||
# If this limit is reached,
|
# If this limit is reached,
|
||||||
@@ -44,11 +44,11 @@ mail_server_comment = Chatmail server
|
|||||||
|
|
||||||
# `zlib` enables compressing messages stored in the maildir.
|
# `zlib` enables compressing messages stored in the maildir.
|
||||||
# See
|
# See
|
||||||
# <https://doc.dovecot.org/configuration_manual/zlib_plugin/>
|
# <https://doc.dovecot.org/2.3/configuration_manual/zlib_plugin/>
|
||||||
# for documentation.
|
# for documentation.
|
||||||
#
|
#
|
||||||
# quota plugin documentation:
|
# quota plugin documentation:
|
||||||
# <https://doc.dovecot.org/configuration_manual/quota_plugin/>
|
# <https://doc.dovecot.org/2.3/configuration_manual/quota_plugin/>
|
||||||
mail_plugins = zlib quota
|
mail_plugins = zlib quota
|
||||||
|
|
||||||
imap_capability = +XDELTAPUSH XCHATMAIL
|
imap_capability = +XDELTAPUSH XCHATMAIL
|
||||||
@@ -113,7 +113,7 @@ mail_attribute_dict = proxy:/run/chatmail-metadata/metadata.socket:metadata
|
|||||||
# `imap_zlib` enables IMAP COMPRESS (RFC 4978).
|
# `imap_zlib` enables IMAP COMPRESS (RFC 4978).
|
||||||
# <https://datatracker.ietf.org/doc/html/rfc4978.html>
|
# <https://datatracker.ietf.org/doc/html/rfc4978.html>
|
||||||
protocol imap {
|
protocol imap {
|
||||||
mail_plugins = $mail_plugins imap_zlib imap_quota last_login
|
mail_plugins = $mail_plugins imap_quota last_login {% if config.imap_compress %}imap_zlib{% endif %}
|
||||||
imap_metadata = yes
|
imap_metadata = yes
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -125,13 +125,13 @@ plugin {
|
|||||||
|
|
||||||
protocol lmtp {
|
protocol lmtp {
|
||||||
# notify plugin is a dependency of push_notification plugin:
|
# notify plugin is a dependency of push_notification plugin:
|
||||||
# <https://doc.dovecot.org/settings/plugin/notify-plugin/>
|
# <https://doc.dovecot.org/2.3/settings/plugin/notify-plugin/>
|
||||||
#
|
#
|
||||||
# push_notification plugin documentation:
|
# push_notification plugin documentation:
|
||||||
# <https://doc.dovecot.org/configuration_manual/push_notification/>
|
# <https://doc.dovecot.org/2.3/configuration_manual/push_notification/>
|
||||||
#
|
#
|
||||||
# mail_lua and push_notification_lua are needed for Lua push notification handler.
|
# mail_lua and push_notification_lua are needed for Lua push notification handler.
|
||||||
# <https://doc.dovecot.org/configuration_manual/push_notification/#configuration>
|
# <https://doc.dovecot.org/2.3/configuration_manual/push_notification/#configuration>
|
||||||
mail_plugins = $mail_plugins mail_lua notify push_notification push_notification_lua
|
mail_plugins = $mail_plugins mail_lua notify push_notification push_notification_lua
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -154,7 +154,7 @@ plugin {
|
|||||||
|
|
||||||
# push_notification configuration
|
# push_notification configuration
|
||||||
plugin {
|
plugin {
|
||||||
# <https://doc.dovecot.org/configuration_manual/push_notification/#lua-lua>
|
# <https://doc.dovecot.org/2.3/configuration_manual/push_notification/#lua-lua>
|
||||||
push_notification_driver = lua:file=/etc/dovecot/push_notification.lua
|
push_notification_driver = lua:file=/etc/dovecot/push_notification.lua
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -168,6 +168,8 @@ service lmtp {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
lmtp_add_received_header = no
|
||||||
|
|
||||||
service auth {
|
service auth {
|
||||||
unix_listener /var/spool/postfix/private/auth {
|
unix_listener /var/spool/postfix/private/auth {
|
||||||
mode = 0660
|
mode = 0660
|
||||||
@@ -226,8 +228,8 @@ service anvil {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ssl = required
|
ssl = required
|
||||||
ssl_cert = </var/lib/acme/live/{{ config.mail_domain }}/fullchain
|
ssl_cert = <{{ config.tls_cert_path }}
|
||||||
ssl_key = </var/lib/acme/live/{{ config.mail_domain }}/privkey
|
ssl_key = <{{ config.tls_key_path }}
|
||||||
ssl_dh = </usr/share/dovecot/dh.pem
|
ssl_dh = </usr/share/dovecot/dh.pem
|
||||||
ssl_min_protocol = TLSv1.3
|
ssl_min_protocol = TLSv1.3
|
||||||
ssl_prefer_server_ciphers = yes
|
ssl_prefer_server_ciphers = yes
|
||||||
@@ -252,3 +254,181 @@ protocol imap {
|
|||||||
rawlog_dir = %h
|
rawlog_dir = %h
|
||||||
}
|
}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
{% if not config.imap_compress %}
|
||||||
|
# Hibernate IDLE users to save memory and CPU resources
|
||||||
|
# NOTE: this will have no effect if imap_zlib plugin is used
|
||||||
|
imap_hibernate_timeout = 30s
|
||||||
|
service imap {
|
||||||
|
# Note that this change will allow any process running as
|
||||||
|
# $default_internal_user (dovecot) to access mails as any other user.
|
||||||
|
# This may be insecure in some installations, which is why this isn't
|
||||||
|
# done by default.
|
||||||
|
unix_listener imap-master {
|
||||||
|
user = $default_internal_user
|
||||||
|
}
|
||||||
|
}
|
||||||
|
# The following is the default already in v2.3.1+:
|
||||||
|
service imap {
|
||||||
|
extra_groups = $default_internal_group
|
||||||
|
}
|
||||||
|
service imap-hibernate {
|
||||||
|
unix_listener imap-hibernate {
|
||||||
|
mode = 0660
|
||||||
|
group = $default_internal_group
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if config.mtail_address %}
|
||||||
|
#
|
||||||
|
# Dovecot Statistics
|
||||||
|
#
|
||||||
|
# OpenMetrics endpoint at http://{{- config.mtail_address}}:3904/metrics
|
||||||
|
service stats {
|
||||||
|
inet_listener http {
|
||||||
|
port = 3904
|
||||||
|
address = {{- config.mtail_address}}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# IMAP Command Metrics
|
||||||
|
# - Bytes in/out for compression efficiency analysis
|
||||||
|
# - Lock wait time for contention debugging
|
||||||
|
# - Grouped by command name and reply state
|
||||||
|
metric imap_command {
|
||||||
|
filter = event=imap_command_finished
|
||||||
|
fields = bytes_in bytes_out lock_wait_usecs running_usecs
|
||||||
|
group_by = cmd_name tagged_reply_state
|
||||||
|
}
|
||||||
|
|
||||||
|
# Duration buckets for latency histograms (base 10: 10us, 100us, 1ms, 10ms, 100ms, 1s, 10s, 100s)
|
||||||
|
metric imap_command_duration {
|
||||||
|
filter = event=imap_command_finished
|
||||||
|
group_by = cmd_name duration:exponential:1:8:10
|
||||||
|
}
|
||||||
|
|
||||||
|
# Slow command outliers (>1 second = 1000000 usecs)
|
||||||
|
# Useful for alerting without high cardinality
|
||||||
|
metric imap_command_slow {
|
||||||
|
filter = event=imap_command_finished AND duration>1000000 AND NOT cmd_name=IDLE
|
||||||
|
group_by = cmd_name
|
||||||
|
}
|
||||||
|
|
||||||
|
# IDLE-specific Metrics
|
||||||
|
|
||||||
|
metric imap_idle {
|
||||||
|
filter = event=imap_command_finished AND cmd_name=IDLE
|
||||||
|
fields = bytes_in bytes_out running_usecs
|
||||||
|
group_by = tagged_reply_state
|
||||||
|
}
|
||||||
|
|
||||||
|
metric imap_idle_duration {
|
||||||
|
filter = event=imap_command_finished AND cmd_name=IDLE
|
||||||
|
# Base 10: 100ms to 27h (covers short wakeups to long idle sessions)
|
||||||
|
group_by = duration:exponential:5:11:10
|
||||||
|
}
|
||||||
|
|
||||||
|
metric imap_idle_commands {
|
||||||
|
filter = event=imap_command_finished AND cmd_name=IDLE
|
||||||
|
group_by = tagged_reply_state
|
||||||
|
}
|
||||||
|
|
||||||
|
metric imap_idle_failed {
|
||||||
|
filter = event=imap_command_finished AND cmd_name=IDLE AND NOT tagged_reply_state=OK
|
||||||
|
}
|
||||||
|
|
||||||
|
# Hibernation Metrics (requires imap_hibernate_timeout)
|
||||||
|
|
||||||
|
metric imap_hibernated {
|
||||||
|
filter = event=imap_client_hibernated
|
||||||
|
}
|
||||||
|
|
||||||
|
metric imap_hibernated_failed {
|
||||||
|
filter = event=imap_client_hibernated AND error=*
|
||||||
|
}
|
||||||
|
|
||||||
|
metric imap_unhibernated {
|
||||||
|
filter = event=imap_client_unhibernated
|
||||||
|
fields = hibernation_usecs
|
||||||
|
}
|
||||||
|
|
||||||
|
metric imap_unhibernated_reason {
|
||||||
|
filter = event=imap_client_unhibernated
|
||||||
|
group_by = reason
|
||||||
|
fields = hibernation_usecs
|
||||||
|
}
|
||||||
|
|
||||||
|
metric imap_unhibernated_reason_sleep {
|
||||||
|
filter = event=imap_client_unhibernated
|
||||||
|
group_by = reason hibernation_usecs:exponential:4:8:10
|
||||||
|
}
|
||||||
|
|
||||||
|
metric imap_unhibernated_failed {
|
||||||
|
filter = event=imap_client_unhibernated AND error=*
|
||||||
|
}
|
||||||
|
|
||||||
|
# Hibernation duration buckets (how long clients stayed hibernated)
|
||||||
|
# Base 10: 100ms to 27h
|
||||||
|
metric imap_hibernation_duration {
|
||||||
|
filter = event=imap_client_unhibernated
|
||||||
|
group_by = reason duration:exponential:5:11:10
|
||||||
|
}
|
||||||
|
|
||||||
|
# Authentication / Login Metrics
|
||||||
|
|
||||||
|
metric auth_request {
|
||||||
|
filter = event=auth_request_finished
|
||||||
|
group_by = success
|
||||||
|
}
|
||||||
|
|
||||||
|
metric auth_request_duration {
|
||||||
|
filter = event=auth_request_finished
|
||||||
|
group_by = success duration:exponential:2:6:10
|
||||||
|
}
|
||||||
|
|
||||||
|
metric auth_failed {
|
||||||
|
filter = event=auth_request_finished AND success=no
|
||||||
|
}
|
||||||
|
|
||||||
|
# Passdb cache effectiveness
|
||||||
|
metric auth_passdb {
|
||||||
|
filter = event=auth_passdb_request_finished
|
||||||
|
group_by = result cache
|
||||||
|
}
|
||||||
|
|
||||||
|
# Master login (post-auth userdb lookup)
|
||||||
|
metric auth_master_login {
|
||||||
|
filter = event=auth_master_client_login_finished
|
||||||
|
}
|
||||||
|
|
||||||
|
metric auth_master_login_failed {
|
||||||
|
filter = event=auth_master_client_login_finished AND error=*
|
||||||
|
}
|
||||||
|
|
||||||
|
# Mail Delivery (LMTP) - affects IDLE wakeup latency
|
||||||
|
|
||||||
|
metric mail_delivery {
|
||||||
|
filter = event=mail_delivery_finished
|
||||||
|
}
|
||||||
|
|
||||||
|
metric mail_delivery_duration {
|
||||||
|
filter = event=mail_delivery_finished
|
||||||
|
group_by = duration:exponential:3:7:10
|
||||||
|
}
|
||||||
|
|
||||||
|
metric mail_delivery_failed {
|
||||||
|
filter = event=mail_delivery_finished AND error=*
|
||||||
|
}
|
||||||
|
|
||||||
|
# Connection Events
|
||||||
|
|
||||||
|
metric client_connected {
|
||||||
|
filter = event=client_connection_connected AND category="service:imap"
|
||||||
|
}
|
||||||
|
|
||||||
|
metric client_disconnected {
|
||||||
|
filter = event=client_connection_disconnected AND category="service:imap"
|
||||||
|
fields = bytes_in bytes_out
|
||||||
|
}
|
||||||
|
{% endif %}
|
||||||
|
|||||||
67
cmdeploy/src/cmdeploy/external/deployer.py
vendored
Normal file
67
cmdeploy/src/cmdeploy/external/deployer.py
vendored
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
import io
|
||||||
|
|
||||||
|
from pyinfra import host
|
||||||
|
from pyinfra.facts.files import File
|
||||||
|
from pyinfra.operations import files, systemd
|
||||||
|
|
||||||
|
from cmdeploy.basedeploy import Deployer, get_resource
|
||||||
|
|
||||||
|
|
||||||
|
class ExternalTlsDeployer(Deployer):
|
||||||
|
"""Expects TLS certificates to be managed on the server.
|
||||||
|
|
||||||
|
Validates that the configured certificate and key files
|
||||||
|
exist on the remote host. Installs a systemd path unit
|
||||||
|
that watches the certificate file and automatically
|
||||||
|
restarts/reloads affected services when it changes.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, cert_path, key_path):
|
||||||
|
self.cert_path = cert_path
|
||||||
|
self.key_path = key_path
|
||||||
|
|
||||||
|
def configure(self):
|
||||||
|
# Verify cert and key exist on the remote host using pyinfra facts.
|
||||||
|
for path in (self.cert_path, self.key_path):
|
||||||
|
info = host.get_fact(File, path=path)
|
||||||
|
if info is None:
|
||||||
|
raise Exception(f"External TLS file not found on server: {path}")
|
||||||
|
|
||||||
|
# Deploy the .path unit (templated with the cert path).
|
||||||
|
# pkg=__package__ is required here because the resource files
|
||||||
|
# live in cmdeploy.external, not the default cmdeploy package.
|
||||||
|
source = get_resource("tls-cert-reload.path.f", pkg=__package__)
|
||||||
|
content = source.read_text().format(cert_path=self.cert_path).encode()
|
||||||
|
|
||||||
|
path_unit = files.put(
|
||||||
|
name="Upload tls-cert-reload.path",
|
||||||
|
src=io.BytesIO(content),
|
||||||
|
dest="/etc/systemd/system/tls-cert-reload.path",
|
||||||
|
user="root",
|
||||||
|
group="root",
|
||||||
|
mode="644",
|
||||||
|
)
|
||||||
|
|
||||||
|
service_unit = files.put(
|
||||||
|
name="Upload tls-cert-reload.service",
|
||||||
|
src=get_resource("tls-cert-reload.service", pkg=__package__),
|
||||||
|
dest="/etc/systemd/system/tls-cert-reload.service",
|
||||||
|
user="root",
|
||||||
|
group="root",
|
||||||
|
mode="644",
|
||||||
|
)
|
||||||
|
|
||||||
|
if path_unit.changed or service_unit.changed:
|
||||||
|
self.need_restart = True
|
||||||
|
|
||||||
|
def activate(self):
|
||||||
|
systemd.service(
|
||||||
|
name="Enable tls-cert-reload path watcher",
|
||||||
|
service="tls-cert-reload.path",
|
||||||
|
running=True,
|
||||||
|
enabled=True,
|
||||||
|
restarted=self.need_restart,
|
||||||
|
daemon_reload=self.need_restart,
|
||||||
|
)
|
||||||
|
# No explicit reload needed here: dovecot/nginx read the cert
|
||||||
|
# on startup, and the .path watcher handles live changes.
|
||||||
15
cmdeploy/src/cmdeploy/external/tls-cert-reload.path.f
vendored
Normal file
15
cmdeploy/src/cmdeploy/external/tls-cert-reload.path.f
vendored
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
# Watch the TLS certificate file for changes.
|
||||||
|
# When the cert is updated (e.g. renewed by an external process),
|
||||||
|
# this triggers tls-cert-reload.service to reload the affected services.
|
||||||
|
#
|
||||||
|
# NOTE: changes to the certificates are not detected if they cross bind-mount boundaries.
|
||||||
|
# After cert renewal, you must then trigger the reload explicitly:
|
||||||
|
# systemctl start tls-cert-reload.service
|
||||||
|
[Unit]
|
||||||
|
Description=Watch TLS certificate for changes
|
||||||
|
|
||||||
|
[Path]
|
||||||
|
PathChanged={cert_path}
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
15
cmdeploy/src/cmdeploy/external/tls-cert-reload.service
vendored
Normal file
15
cmdeploy/src/cmdeploy/external/tls-cert-reload.service
vendored
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
# Reload services that cache the TLS certificate.
|
||||||
|
#
|
||||||
|
# dovecot: caches the cert at startup; reload re-reads SSL certs
|
||||||
|
# without dropping existing connections.
|
||||||
|
# nginx: caches the cert at startup; reload gracefully picks up
|
||||||
|
# the new cert for new connections.
|
||||||
|
# postfix: reads the cert fresh on each TLS handshake,
|
||||||
|
# does NOT need a reload/restart.
|
||||||
|
[Unit]
|
||||||
|
Description=Reload TLS services after certificate change
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=oneshot
|
||||||
|
ExecStart=/bin/systemctl try-reload-or-restart dovecot
|
||||||
|
ExecStart=/bin/systemctl try-reload-or-restart nginx
|
||||||
52
cmdeploy/src/cmdeploy/filtermail/deployer.py
Normal file
52
cmdeploy/src/cmdeploy/filtermail/deployer.py
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
from pyinfra import facts, host
|
||||||
|
from pyinfra.operations import files, systemd
|
||||||
|
|
||||||
|
from cmdeploy.basedeploy import Deployer, get_resource
|
||||||
|
|
||||||
|
|
||||||
|
class FiltermailDeployer(Deployer):
|
||||||
|
services = ["filtermail", "filtermail-incoming"]
|
||||||
|
bin_path = "/usr/local/bin/filtermail"
|
||||||
|
config_path = "/usr/local/lib/chatmaild/chatmail.ini"
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.need_restart = False
|
||||||
|
|
||||||
|
def install(self):
|
||||||
|
arch = host.get_fact(facts.server.Arch)
|
||||||
|
url = f"https://github.com/chatmail/filtermail/releases/download/v0.5.2/filtermail-{arch}"
|
||||||
|
sha256sum = {
|
||||||
|
"x86_64": "ce24ca0075aa445510291d775fb3aea8f4411818c7b885ae51a0fe18c5f789ce",
|
||||||
|
"aarch64": "c5d783eefa5332db3d97a0e6a23917d72849e3eb45da3d16ce908a9b4e5a797d",
|
||||||
|
}[arch]
|
||||||
|
self.need_restart |= files.download(
|
||||||
|
name="Download filtermail",
|
||||||
|
src=url,
|
||||||
|
sha256sum=sha256sum,
|
||||||
|
dest=self.bin_path,
|
||||||
|
mode="755",
|
||||||
|
).changed
|
||||||
|
|
||||||
|
def configure(self):
|
||||||
|
for service in self.services:
|
||||||
|
self.need_restart |= files.template(
|
||||||
|
src=get_resource(f"filtermail/{service}.service.j2"),
|
||||||
|
dest=f"/etc/systemd/system/{service}.service",
|
||||||
|
user="root",
|
||||||
|
group="root",
|
||||||
|
mode="644",
|
||||||
|
bin_path=self.bin_path,
|
||||||
|
config_path=self.config_path,
|
||||||
|
).changed
|
||||||
|
|
||||||
|
def activate(self):
|
||||||
|
for service in self.services:
|
||||||
|
systemd.service(
|
||||||
|
name=f"Start and enable {service}",
|
||||||
|
service=f"{service}.service",
|
||||||
|
running=True,
|
||||||
|
enabled=True,
|
||||||
|
restarted=self.need_restart,
|
||||||
|
daemon_reload=True,
|
||||||
|
)
|
||||||
|
self.need_restart = False
|
||||||
@@ -2,11 +2,10 @@
|
|||||||
Description=Incoming Chatmail Postfix before queue filter
|
Description=Incoming Chatmail Postfix before queue filter
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
ExecStart={execpath} {config_path} incoming
|
ExecStart={{ bin_path }} {{ config_path }} incoming
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=30
|
RestartSec=30
|
||||||
User=vmail
|
User=vmail
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
|
|
||||||
@@ -2,7 +2,7 @@
|
|||||||
Description=Outgoing Chatmail Postfix before queue filter
|
Description=Outgoing Chatmail Postfix before queue filter
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
ExecStart={execpath} {config_path} outgoing
|
ExecStart={{ bin_path }} {{ config_path }} outgoing
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=30
|
RestartSec=30
|
||||||
User=vmail
|
User=vmail
|
||||||
558
cmdeploy/src/cmdeploy/lxc/cli.py
Normal file
558
cmdeploy/src/cmdeploy/lxc/cli.py
Normal file
@@ -0,0 +1,558 @@
|
|||||||
|
"""lxc-start/stop/status/test subcommands for testing with local containers."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
|
||||||
|
from ..util import (
|
||||||
|
collapse,
|
||||||
|
get_git_hash,
|
||||||
|
get_version_string,
|
||||||
|
shell,
|
||||||
|
)
|
||||||
|
from .incus import Incus, RelayContainer
|
||||||
|
|
||||||
|
RELAY_NAMES = ("test0", "test1")
|
||||||
|
|
||||||
|
|
||||||
|
# -------------------------------------------------------------------
|
||||||
|
# lxc-start
|
||||||
|
# -------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def lxc_start_cmd_options(parser):
|
||||||
|
_add_name_args(
|
||||||
|
parser,
|
||||||
|
help_text="User relay name(s) to create (default: test0).",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--ipv4-only",
|
||||||
|
dest="ipv4_only",
|
||||||
|
action="store_true",
|
||||||
|
help="Create an IPv4-only container.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--run",
|
||||||
|
action="store_true",
|
||||||
|
help="Run 'cmdeploy run' on each container after starting it.",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def lxc_start_cmd(args, out):
|
||||||
|
"""Create/Ensure and start LXC relay and DNS containers."""
|
||||||
|
ix = Incus()
|
||||||
|
out.green("Ensuring DNS container (ns-localchat) ...")
|
||||||
|
dns_ct = ix.get_dns_container()
|
||||||
|
dns_ct.ensure()
|
||||||
|
if not ix.find_dns_image():
|
||||||
|
with out.section("LXC: publishing DNS image"):
|
||||||
|
dns_ct.publish_as_dns_image()
|
||||||
|
out.print(f" DNS container IP: {dns_ct.ipv4}")
|
||||||
|
|
||||||
|
names = args.names if args.names else RELAY_NAMES
|
||||||
|
relays = list(ix.get_container(n) for n in names)
|
||||||
|
for ct in relays:
|
||||||
|
out.green(f"Ensuring container {ct.name!r} ({ct.domain}) ...")
|
||||||
|
ct.ensure()
|
||||||
|
ip = ct.ipv4
|
||||||
|
|
||||||
|
out.print(" Configuring container hostname ...")
|
||||||
|
ct.configure_hosts(ip)
|
||||||
|
|
||||||
|
out.print(f" Writing {ct.ini.name} ...")
|
||||||
|
ct.write_ini(disable_ipv6=args.ipv4_only)
|
||||||
|
out.print(f" Config: {ct.ini}")
|
||||||
|
if args.ipv4_only:
|
||||||
|
ct.disable_ipv6()
|
||||||
|
ipv6 = None
|
||||||
|
else:
|
||||||
|
output = ct.bash(
|
||||||
|
"ip -6 addr show scope global -deprecated"
|
||||||
|
" | grep -oP '(?<=inet6 )[^/]+'",
|
||||||
|
check=False,
|
||||||
|
)
|
||||||
|
ipv6 = output.strip() if output else None
|
||||||
|
out.print(f" {_format_addrs(ip, ipv6)}")
|
||||||
|
|
||||||
|
out.green(f" Container {ct.name!r} ready: {ct.domain} -> {ip}")
|
||||||
|
out.print()
|
||||||
|
|
||||||
|
# Reset DNS zones only for the containers we just started
|
||||||
|
started_cnames = {ct.name for ct in relays}
|
||||||
|
managed = ix.list_managed()
|
||||||
|
started = [c for c in managed if c["name"] in started_cnames]
|
||||||
|
|
||||||
|
if started:
|
||||||
|
out.print(
|
||||||
|
f"Resetting DNS zones for {len(started)}"
|
||||||
|
" domain(s) (A + AAAA records) ..."
|
||||||
|
)
|
||||||
|
dns_ct.reset_dns_records(dns_ct.ipv4, started)
|
||||||
|
|
||||||
|
for ct in relays:
|
||||||
|
if ct.name in started_cnames:
|
||||||
|
out.print(f" Configuring and testing DNS in {ct.name} ...")
|
||||||
|
ct.configure_dns(dns_ct.ipv4)
|
||||||
|
if not ct.check_dns():
|
||||||
|
out.red(
|
||||||
|
f" DNS check failed for {ct.name}"
|
||||||
|
": cannot resolve external hosts"
|
||||||
|
)
|
||||||
|
return 1
|
||||||
|
|
||||||
|
# Generate the unified SSH config
|
||||||
|
out.green("Writing ssh-config ...")
|
||||||
|
ssh_cfg = ix.write_ssh_config()
|
||||||
|
out.print(f" {ssh_cfg}")
|
||||||
|
|
||||||
|
# Verify SSH via the generated config
|
||||||
|
for ct in relays:
|
||||||
|
out.print(f" Verifying SSH to {ct.name} via ssh-config ...")
|
||||||
|
if ct.verify_ssh(ssh_cfg):
|
||||||
|
out.print(f" SSH OK: ssh -F lxconfigs/ssh-config {ct.domain}")
|
||||||
|
else:
|
||||||
|
out.red(f" WARNING: SSH verification failed for {ct.name}")
|
||||||
|
|
||||||
|
# Print integration suggestions
|
||||||
|
ssh_cfg = ix.ssh_config_path
|
||||||
|
if not ix.check_ssh_include():
|
||||||
|
out.green(
|
||||||
|
"\n (Optional) To use containers from any SSH client, add to ~/.ssh/config:"
|
||||||
|
)
|
||||||
|
out.green(f" Include {ssh_cfg}")
|
||||||
|
|
||||||
|
# Optionally run cmdeploy run on each relay
|
||||||
|
if args.run:
|
||||||
|
for ct in relays:
|
||||||
|
with out.section(f"cmdeploy run: {ct.sname} ({ct.domain})"):
|
||||||
|
ret = _run_cmdeploy("run", ct, ix, out, extra=["--skip-dns-check"])
|
||||||
|
if ret:
|
||||||
|
out.red(f"Deploy to {ct.sname} failed (exit {ret})")
|
||||||
|
return ret
|
||||||
|
|
||||||
|
|
||||||
|
# -------------------------------------------------------------------
|
||||||
|
# lxc-stop
|
||||||
|
# -------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def lxc_stop_cmd_options(parser):
|
||||||
|
parser.add_argument(
|
||||||
|
"--destroy",
|
||||||
|
action="store_true",
|
||||||
|
help="Delete containers and their config files after stopping.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--destroy-all",
|
||||||
|
dest="destroy_all",
|
||||||
|
action="store_true",
|
||||||
|
help="Like --destroy, but also remove the ns-localchat DNS container.",
|
||||||
|
)
|
||||||
|
_add_name_args(
|
||||||
|
parser,
|
||||||
|
help_text="Container name(s) to stop (default: test0 + test1).",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def lxc_stop_cmd(args, out):
|
||||||
|
"""Stop (and optionally destroy) local LXC relay containers."""
|
||||||
|
ix = Incus()
|
||||||
|
names = args.names or RELAY_NAMES
|
||||||
|
destroy = args.destroy or args.destroy_all
|
||||||
|
|
||||||
|
for ct in map(ix.get_container, names):
|
||||||
|
if destroy:
|
||||||
|
out.green(f"Destroying container {ct.name!r} ...")
|
||||||
|
ct.destroy()
|
||||||
|
if hasattr(ct, "image_alias"):
|
||||||
|
out.green(f" Deleting cached image {ct.image_alias!r} ...")
|
||||||
|
ix.run(["image", "delete", ct.image_alias], check=False)
|
||||||
|
else:
|
||||||
|
out.green(f"Stopping container {ct.name!r} ...")
|
||||||
|
ct.stop(force=True)
|
||||||
|
|
||||||
|
if args.destroy_all:
|
||||||
|
dns_ct = ix.get_dns_container()
|
||||||
|
out.green(f"Destroying DNS container {dns_ct.name!r} ...")
|
||||||
|
dns_ct.destroy()
|
||||||
|
ix.delete_images()
|
||||||
|
|
||||||
|
if destroy:
|
||||||
|
ix.write_ssh_config()
|
||||||
|
out.green("LXC containers destroyed.")
|
||||||
|
else:
|
||||||
|
out.green("LXC containers stopped.")
|
||||||
|
|
||||||
|
|
||||||
|
# -------------------------------------------------------------------
|
||||||
|
# lxc-test
|
||||||
|
# -------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def lxc_test_cmd_options(parser):
|
||||||
|
parser.add_argument(
|
||||||
|
"--one",
|
||||||
|
action="store_true",
|
||||||
|
help="Only deploy and test against test0 (skip test1).",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def lxc_test_cmd(args, out):
|
||||||
|
"""Run full LXC pipeline: start, deploy, DNS, zone files, and tests.
|
||||||
|
|
||||||
|
All commands run directly on the host using
|
||||||
|
``--ssh-config lxconfigs/ssh-config`` for SSH access.
|
||||||
|
"""
|
||||||
|
ix = Incus()
|
||||||
|
t_total = time.time()
|
||||||
|
relay_names = list(RELAY_NAMES)
|
||||||
|
if args.one:
|
||||||
|
relay_names = relay_names[:1]
|
||||||
|
|
||||||
|
local_hash = get_git_hash()
|
||||||
|
|
||||||
|
# Per-relay: start containers, then deploy in parallel.
|
||||||
|
ipv4_only_flags = {RELAY_NAMES[0]: False, RELAY_NAMES[1]: True}
|
||||||
|
|
||||||
|
# Phase 1: start all containers (sequential, fast)
|
||||||
|
for ct in map(ix.get_container, relay_names):
|
||||||
|
name = ct.sname
|
||||||
|
ipv4_only = ipv4_only_flags.get(name, False)
|
||||||
|
label = "IPv4-only" if ipv4_only else "dual-stack"
|
||||||
|
|
||||||
|
with out.section(f"LXC: lxc-start {name} ({label})"):
|
||||||
|
args.names = [name]
|
||||||
|
args.ipv4_only = ipv4_only
|
||||||
|
args.run = False
|
||||||
|
ret = lxc_start_cmd(args, out)
|
||||||
|
if ret:
|
||||||
|
return ret
|
||||||
|
|
||||||
|
# Phase 2: deploy all relays in parallel
|
||||||
|
to_deploy = []
|
||||||
|
for ct in map(ix.get_container, relay_names):
|
||||||
|
status = _deploy_status(ct, local_hash, ix)
|
||||||
|
if "IN-SYNC" in status:
|
||||||
|
out.section_line(f"cmdeploy run: {ct.sname}: {status}, skipping")
|
||||||
|
else:
|
||||||
|
to_deploy.append(ct)
|
||||||
|
|
||||||
|
if to_deploy:
|
||||||
|
with out.section("cmdeploy run (parallel)"):
|
||||||
|
ret = _run_cmdeploy_parallel(
|
||||||
|
"run", to_deploy, ix, out, extra=["--skip-dns-check"]
|
||||||
|
)
|
||||||
|
if ret:
|
||||||
|
return ret
|
||||||
|
|
||||||
|
# Phase 3: publish images (sequential, fast)
|
||||||
|
for ct in map(ix.get_container, relay_names):
|
||||||
|
if ct.publish_image():
|
||||||
|
out.section_line(f"LXC: published {ct.sname} image")
|
||||||
|
else:
|
||||||
|
out.section_line(
|
||||||
|
f"LXC: publish {ct.sname} image: skipped, cached",
|
||||||
|
)
|
||||||
|
|
||||||
|
for ct in map(ix.get_container, relay_names):
|
||||||
|
with out.section(f"cmdeploy dns: {ct.sname} ({ct.domain})"):
|
||||||
|
ret = _run_cmdeploy("dns", ct, ix, out, extra=["--zonefile", str(ct.zone)])
|
||||||
|
if ret:
|
||||||
|
out.red(f"DNS for {ct.sname} failed (exit {ret})")
|
||||||
|
return ret
|
||||||
|
|
||||||
|
with out.section("LXC: PowerDNS zone update"):
|
||||||
|
dns_ct = ix.get_dns_container()
|
||||||
|
for ct in map(ix.get_container, relay_names):
|
||||||
|
if ct.zone.exists():
|
||||||
|
zone_data = ct.zone.read_text()
|
||||||
|
out.print(f" Loading {ct.zone} into PowerDNS ...")
|
||||||
|
dns_ct.set_dns_records(zone_data)
|
||||||
|
|
||||||
|
# Run tests in both directions when two relays are available.
|
||||||
|
test_pairs = [(0, 1), (1, 0)] if len(relay_names) > 1 else [(0,)]
|
||||||
|
for pair in test_pairs:
|
||||||
|
first = ix.get_container(relay_names[pair[0]])
|
||||||
|
label = first.sname
|
||||||
|
env = None
|
||||||
|
if len(pair) > 1:
|
||||||
|
second = ix.get_container(relay_names[pair[1]])
|
||||||
|
label = f"{first.sname} \u2194 {second.sname}"
|
||||||
|
env = os.environ.copy()
|
||||||
|
env["CHATMAIL_DOMAIN2"] = second.domain
|
||||||
|
|
||||||
|
with out.section(f"cmdeploy test: {label}"):
|
||||||
|
ret = _run_cmdeploy("test", first, ix, out, **({"env": env} if env else {}))
|
||||||
|
if ret:
|
||||||
|
out.red(f"Tests failed (exit {ret})")
|
||||||
|
return ret
|
||||||
|
|
||||||
|
elapsed = time.time() - t_total
|
||||||
|
out.section_line(f"lxc-test complete ({elapsed:.1f}s)")
|
||||||
|
if out.section_timings:
|
||||||
|
out.print("Section timings:")
|
||||||
|
for name, secs in out.section_timings:
|
||||||
|
out.print(f" {name:.<50s} {secs:5.1f}s")
|
||||||
|
out.print(f" {'total':.<50s} {elapsed:5.1f}s")
|
||||||
|
out.section_timings.clear()
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
# -------------------------------------------------------------------
|
||||||
|
# lxc-status
|
||||||
|
# -------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def lxc_status_cmd_options(parser):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def lxc_status_cmd(args, out):
|
||||||
|
"""Show status of local LXC chatmail containers."""
|
||||||
|
ix = Incus()
|
||||||
|
containers = ix.list_managed()
|
||||||
|
if not containers:
|
||||||
|
out.red("No LXC containers found. Run 'cmdeploy lxc-start' first.")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
local_hash = get_git_hash()
|
||||||
|
|
||||||
|
# Get storage pool path for display
|
||||||
|
storage_path = None
|
||||||
|
data = ix.run_json(["storage", "show", "default"], check=False)
|
||||||
|
if data:
|
||||||
|
storage_path = data.get("config", {}).get("source")
|
||||||
|
if storage_path:
|
||||||
|
out.green(f"Containers: ({storage_path})")
|
||||||
|
else:
|
||||||
|
out.green("Containers:")
|
||||||
|
|
||||||
|
dns_ip = None
|
||||||
|
for c in containers:
|
||||||
|
_print_container_status(out, c, ix, local_hash)
|
||||||
|
if c["name"] == ix.get_dns_container().name:
|
||||||
|
dns_ip = c["ip"]
|
||||||
|
|
||||||
|
_print_ssh_status(out, ix)
|
||||||
|
_print_dns_forwarding_status(out, dns_ip)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def _print_container_status(out, c, ix, local_hash):
|
||||||
|
"""Print name/status, domain/IPs, and RAM for one container."""
|
||||||
|
cname = c["name"]
|
||||||
|
is_running = c.get("status") == "Running"
|
||||||
|
ct = ix.get_container(cname)
|
||||||
|
|
||||||
|
# First line: name + running/STOPPED + deploy status
|
||||||
|
if not is_running:
|
||||||
|
tag = "STOPPED"
|
||||||
|
elif not isinstance(ct, RelayContainer):
|
||||||
|
tag = "running"
|
||||||
|
else:
|
||||||
|
tag = f"running {_deploy_status(ct, local_hash, ix)}"
|
||||||
|
out.print(f" {cname:20s} {tag}")
|
||||||
|
|
||||||
|
# Second line: domain, IPv4, IPv6
|
||||||
|
domain = c.get("domain", "")
|
||||||
|
ip = c.get("ip") or "?"
|
||||||
|
ipv6 = c.get("ipv6")
|
||||||
|
out.print(f" {domain:20s} {_format_addrs(ip, ipv6)}")
|
||||||
|
|
||||||
|
# Third line: RAM (RSS), config
|
||||||
|
indent = " " * 21
|
||||||
|
try:
|
||||||
|
used, total = ct.rss_mib()
|
||||||
|
except Exception:
|
||||||
|
ram_str = "RSS ?"
|
||||||
|
else:
|
||||||
|
ram_str = f"RSS {used}/{total} MiB ({used * 100 // total}%)"
|
||||||
|
|
||||||
|
if isinstance(ct, RelayContainer):
|
||||||
|
detail = f"{ram_str}, config: {os.path.relpath(ct.ini)}"
|
||||||
|
else:
|
||||||
|
detail = ram_str
|
||||||
|
|
||||||
|
out.print(f" {indent}{detail}")
|
||||||
|
out.print()
|
||||||
|
|
||||||
|
|
||||||
|
def _print_ssh_status(out, ix):
|
||||||
|
"""Print SSH integration status."""
|
||||||
|
out.print()
|
||||||
|
ssh_cfg = ix.ssh_config_path
|
||||||
|
if ix.check_ssh_include():
|
||||||
|
out.green("SSH: ~/.ssh/config includes lxconfigs/ssh-config ✓")
|
||||||
|
else:
|
||||||
|
out.red("SSH: ~/.ssh/config does NOT include lxconfigs/ssh-config")
|
||||||
|
out.print(" Add to ~/.ssh/config:")
|
||||||
|
out.print(f" Include {ssh_cfg}")
|
||||||
|
|
||||||
|
|
||||||
|
def _print_dns_forwarding_status(out, dns_ip):
|
||||||
|
"""Print host DNS forwarding status for .localchat."""
|
||||||
|
if not dns_ip:
|
||||||
|
out.red("DNS: ns-localchat container not found")
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
rv = shell("resolvectl status incusbr0", timeout=5)
|
||||||
|
dns_ok = dns_ip in rv.stdout and "localchat" in rv.stdout
|
||||||
|
except (FileNotFoundError, subprocess.TimeoutExpired, OSError):
|
||||||
|
dns_ok = None
|
||||||
|
if dns_ok is True:
|
||||||
|
out.green(f"DNS: .localchat forwarding to {dns_ip} ✓")
|
||||||
|
elif dns_ok is False:
|
||||||
|
out.red("DNS: .localchat forwarding NOT configured")
|
||||||
|
out.print(" Run:")
|
||||||
|
out.print(f" sudo resolvectl dns incusbr0 {dns_ip}")
|
||||||
|
out.print(" sudo resolvectl domain incusbr0 ~localchat")
|
||||||
|
else:
|
||||||
|
out.print(" DNS: .localchat forwarding status UNKNOWN")
|
||||||
|
|
||||||
|
|
||||||
|
# -------------------------------------------------------------------
|
||||||
|
# Internal helpers
|
||||||
|
# -------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def _format_addrs(ip, ipv6=None):
|
||||||
|
parts = [f"IPv4 {ip}"]
|
||||||
|
if ipv6:
|
||||||
|
parts.append(f"IPv6 {ipv6}")
|
||||||
|
return ", ".join(parts)
|
||||||
|
|
||||||
|
|
||||||
|
def _deploy_status(ct, local_hash, ix):
|
||||||
|
"""Return a human-readable deploy status string.
|
||||||
|
|
||||||
|
Compares the full deployed version (hash + diff) against
|
||||||
|
the local state built by :func:`~cmdeploy.util.get_version_string`.
|
||||||
|
"""
|
||||||
|
deployed = ct.deployed_version()
|
||||||
|
if deployed is None:
|
||||||
|
return "NOT DEPLOYED"
|
||||||
|
|
||||||
|
# A container launched from the relay image has the same
|
||||||
|
# git hash but a different domain - always redeploy.
|
||||||
|
deployed_domain = ct.deployed_domain()
|
||||||
|
if deployed_domain and deployed_domain != ct.domain:
|
||||||
|
return f"DOMAIN-MISMATCH (deployed: {deployed_domain})"
|
||||||
|
|
||||||
|
deployed_lines = deployed.splitlines()
|
||||||
|
deployed_hash = deployed_lines[0] if deployed_lines else ""
|
||||||
|
short = deployed_hash[:12]
|
||||||
|
|
||||||
|
if not local_hash:
|
||||||
|
return f"UNKNOWN (deployed: {short})"
|
||||||
|
|
||||||
|
local_short = local_hash[:12]
|
||||||
|
if deployed_hash != local_hash:
|
||||||
|
return f"STALE (deployed: {short}, local: {local_short})"
|
||||||
|
|
||||||
|
# Hash matches - check for uncommitted diffs
|
||||||
|
local_version = get_version_string()
|
||||||
|
if deployed != local_version:
|
||||||
|
return f"DIRTY ({local_short}, undeployed changes)"
|
||||||
|
|
||||||
|
return f"IN-SYNC ({short})"
|
||||||
|
|
||||||
|
|
||||||
|
def _add_name_args(parser, help_text=None):
|
||||||
|
"""Add optional positional NAME arguments."""
|
||||||
|
parser.add_argument(
|
||||||
|
"names",
|
||||||
|
nargs="*",
|
||||||
|
metavar="NAME",
|
||||||
|
help=help_text or "Relay name(s) to operate on.",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _build_cmdeploy_cmd(subcmd, ct, ix, extra=None):
|
||||||
|
"""Build the ``cmdeploy <subcmd>`` command string."""
|
||||||
|
extra_str = " ".join(extra) if extra else ""
|
||||||
|
return collapse(f"""\
|
||||||
|
cmdeploy {subcmd}
|
||||||
|
--config {ct.ini}
|
||||||
|
--ssh-config {ix.ssh_config_path}
|
||||||
|
--ssh-host {ct.domain}
|
||||||
|
{extra_str}
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
def _run_cmdeploy(subcmd, ct, ix, out, extra=None, **kwargs):
|
||||||
|
"""Run ``cmdeploy <subcmd>`` with standard --config/--ssh flags.
|
||||||
|
|
||||||
|
*ct* is a Container (uses ``ct.ini`` and ``ct.domain``).
|
||||||
|
Returns the subprocess exit code.
|
||||||
|
"""
|
||||||
|
cmd = _build_cmdeploy_cmd(subcmd, ct, ix, extra=extra)
|
||||||
|
if "cwd" not in kwargs:
|
||||||
|
kwargs["cwd"] = str(ix.project_root)
|
||||||
|
out.print(f" [$ {cmd}]")
|
||||||
|
return shell(cmd, capture_output=False, **kwargs).returncode
|
||||||
|
|
||||||
|
|
||||||
|
# Number of tail lines to print on failure.
|
||||||
|
_FAIL_CONTEXT_LINES = 40
|
||||||
|
|
||||||
|
|
||||||
|
def _run_cmdeploy_parallel(subcmd, containers, ix, out, extra=None):
|
||||||
|
"""Run ``cmdeploy <subcmd>`` for every container in parallel.
|
||||||
|
|
||||||
|
Output is captured and filtered: only lines containing
|
||||||
|
``"Start operation"`` are printed (prefixed with the relay
|
||||||
|
short-name). On failure the last *_FAIL_CONTEXT_LINES*
|
||||||
|
lines of that process's output are shown.
|
||||||
|
"""
|
||||||
|
procs = [] # list of (container, Popen, collected_lines)
|
||||||
|
cwd = str(ix.project_root)
|
||||||
|
|
||||||
|
for ct in containers:
|
||||||
|
cmd = _build_cmdeploy_cmd(subcmd, ct, ix, extra=extra)
|
||||||
|
out.print(f" [{ct.sname}] $ {cmd}")
|
||||||
|
proc = subprocess.Popen(
|
||||||
|
cmd,
|
||||||
|
shell=True,
|
||||||
|
text=True,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
cwd=cwd,
|
||||||
|
)
|
||||||
|
procs.append((ct, proc, []))
|
||||||
|
|
||||||
|
def _reader(ct, proc, lines):
|
||||||
|
prefix = f" [{ct.sname}]"
|
||||||
|
for raw in proc.stdout:
|
||||||
|
line = raw.rstrip("\n")
|
||||||
|
lines.append(line)
|
||||||
|
if "Starting operation" in line:
|
||||||
|
out.print(f"{prefix} {line}")
|
||||||
|
|
||||||
|
threads = []
|
||||||
|
for ct, proc, lines in procs:
|
||||||
|
t = threading.Thread(
|
||||||
|
target=_reader,
|
||||||
|
args=(ct, proc, lines),
|
||||||
|
daemon=True,
|
||||||
|
)
|
||||||
|
t.start()
|
||||||
|
threads.append(t)
|
||||||
|
|
||||||
|
for t in threads:
|
||||||
|
t.join()
|
||||||
|
for _, proc, _ in procs:
|
||||||
|
proc.wait()
|
||||||
|
|
||||||
|
# Check results
|
||||||
|
first_failure = 0
|
||||||
|
for ct, proc, lines in procs:
|
||||||
|
if proc.returncode:
|
||||||
|
out.red(f"Deploy to {ct.sname} failed " f"(exit {proc.returncode})")
|
||||||
|
tail = lines[-_FAIL_CONTEXT_LINES:]
|
||||||
|
for tl in tail:
|
||||||
|
out.print(f" [{ct.sname}] {tl}")
|
||||||
|
if not first_failure:
|
||||||
|
first_failure = proc.returncode
|
||||||
|
|
||||||
|
return first_failure
|
||||||
754
cmdeploy/src/cmdeploy/lxc/incus.py
Normal file
754
cmdeploy/src/cmdeploy/lxc/incus.py
Normal file
@@ -0,0 +1,754 @@
|
|||||||
|
"""Core Incus operations for local chatmail LXC containers."""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import subprocess
|
||||||
|
import textwrap
|
||||||
|
import time
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from ..util import shell
|
||||||
|
|
||||||
|
LABEL_KEY = "user.localchat-managed"
|
||||||
|
SSH_KEY_NAME = "id_localchat"
|
||||||
|
DOMAIN_SUFFIX = ".localchat"
|
||||||
|
UPSTREAM_IMAGE = "images:debian/12"
|
||||||
|
BASE_IMAGE_ALIAS = "localchat-base"
|
||||||
|
BASE_SETUP_NAME = "localchat-base-setup"
|
||||||
|
DNS_IMAGE_ALIAS = "localchat-ns"
|
||||||
|
|
||||||
|
DNS_CONTAINER_NAME = "ns-localchat"
|
||||||
|
DNS_DOMAIN = "ns.localchat"
|
||||||
|
|
||||||
|
BRIDGE_IPV4 = "10.200.200.1/24"
|
||||||
|
DNS_IP = "10.200.200.2"
|
||||||
|
RELAY_IPS = {
|
||||||
|
"test0": "10.200.200.10",
|
||||||
|
"test1": "10.200.200.11",
|
||||||
|
"test2": "10.200.200.12",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_ip(net_data, family="inet"):
|
||||||
|
"""Extract the first global-scope IP of *family* from network state data.
|
||||||
|
|
||||||
|
*net_data* is the ``state.network`` dict from ``incus list --format=json``.
|
||||||
|
*family* is ``"inet"`` for IPv4 or ``"inet6"`` for IPv6.
|
||||||
|
Returns the address string, or None.
|
||||||
|
"""
|
||||||
|
for iface_name, iface in net_data.items():
|
||||||
|
if iface_name == "lo":
|
||||||
|
continue
|
||||||
|
for addr in iface.get("addresses", []):
|
||||||
|
if addr["family"] == family and addr["scope"] == "global":
|
||||||
|
return addr["address"]
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
class Incus:
|
||||||
|
"""Gateway for all Incus container operations.
|
||||||
|
|
||||||
|
Instantiated once per CLI command and passed around so that
|
||||||
|
all modules share a single entry point for Incus interactions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.project_root = Path(__file__).resolve().parent.parent.parent.parent.parent
|
||||||
|
self.lxconfigs_dir = self.project_root / "lxconfigs"
|
||||||
|
self.lxconfigs_dir.mkdir(exist_ok=True)
|
||||||
|
self.ssh_key_path = self.lxconfigs_dir / SSH_KEY_NAME
|
||||||
|
if not self.ssh_key_path.exists():
|
||||||
|
shell(
|
||||||
|
f"ssh-keygen -t ed25519 -f {self.ssh_key_path} -N '' -C localchat",
|
||||||
|
check=True,
|
||||||
|
)
|
||||||
|
self.ssh_config_path = self.lxconfigs_dir / "ssh-config"
|
||||||
|
|
||||||
|
def write_ssh_config(self):
|
||||||
|
"""Write ``lxconfigs/ssh-config`` mapping all containers to their IPs.
|
||||||
|
|
||||||
|
Each Host block maps the container name, the domain name, and the
|
||||||
|
short relay name (e.g. ``_test0``) to the container's IP, using the
|
||||||
|
shared localchat SSH key. Returns the path to the file.
|
||||||
|
"""
|
||||||
|
containers = self.list_managed()
|
||||||
|
key_path = self.ssh_key_path
|
||||||
|
lines = ["# Auto-generated by cmdeploy lxc-start - do not edit\n"]
|
||||||
|
for c in containers:
|
||||||
|
hosts = [c["name"]]
|
||||||
|
domain = c.get("domain", "")
|
||||||
|
if domain and domain != c["name"]:
|
||||||
|
hosts.append(domain)
|
||||||
|
short = domain.split(".")[0]
|
||||||
|
if short and short not in hosts:
|
||||||
|
hosts.append(short)
|
||||||
|
lines.append(f"\nHost {' '.join(hosts)}\n")
|
||||||
|
lines.append(f" Hostname {c['ip']}\n")
|
||||||
|
lines.append(" User root\n")
|
||||||
|
lines.append(f" IdentityFile {key_path}\n")
|
||||||
|
lines.append(" IdentitiesOnly yes\n")
|
||||||
|
lines.append(" StrictHostKeyChecking accept-new\n")
|
||||||
|
lines.append(" UserKnownHostsFile /dev/null\n")
|
||||||
|
lines.append(" LogLevel ERROR\n")
|
||||||
|
path = self.ssh_config_path
|
||||||
|
path.write_text("".join(lines))
|
||||||
|
return path
|
||||||
|
|
||||||
|
def check_ssh_include(self):
|
||||||
|
"""Check if the user's ~/.ssh/config already includes our ssh-config."""
|
||||||
|
user_ssh_config = Path.home() / ".ssh" / "config"
|
||||||
|
if not user_ssh_config.exists():
|
||||||
|
return False
|
||||||
|
lines = filter(None, map(str.strip, user_ssh_config.open("r")))
|
||||||
|
return f"Include {self.ssh_config_path}" in lines
|
||||||
|
|
||||||
|
def run(self, args, check=True, capture=True, input=None):
|
||||||
|
"""Run an incus command."""
|
||||||
|
cmd = ["incus"] + list(args)
|
||||||
|
kwargs = dict(check=check, text=True, input=input)
|
||||||
|
if capture:
|
||||||
|
kwargs["capture_output"] = True
|
||||||
|
else:
|
||||||
|
kwargs["stdout"] = None
|
||||||
|
kwargs["stderr"] = None
|
||||||
|
return subprocess.run(cmd, **kwargs) # noqa: PLW1510
|
||||||
|
|
||||||
|
def run_json(self, args, check=True):
|
||||||
|
"""Run an incus command with ``--format=json``.
|
||||||
|
|
||||||
|
Returns the parsed JSON on success.
|
||||||
|
When *check* is True raises ``subprocess.CalledProcessError``
|
||||||
|
on non-zero exit; when False returns *None* instead.
|
||||||
|
"""
|
||||||
|
result = self.run(
|
||||||
|
list(args) + ["--format=json"],
|
||||||
|
check=check,
|
||||||
|
)
|
||||||
|
if result.returncode != 0:
|
||||||
|
return None
|
||||||
|
return json.loads(result.stdout)
|
||||||
|
|
||||||
|
def run_output(self, args, check=True):
|
||||||
|
"""Run an incus command and return its stripped stdout.
|
||||||
|
|
||||||
|
When *check* is False, returns *None* on non-zero exit
|
||||||
|
instead of raising.
|
||||||
|
"""
|
||||||
|
result = self.run(args, check=check)
|
||||||
|
if result.returncode != 0:
|
||||||
|
return None
|
||||||
|
return result.stdout.strip()
|
||||||
|
|
||||||
|
def _find_image(self, alias):
|
||||||
|
"""Return *alias* if an image with that alias exists, else None."""
|
||||||
|
images = self.run_json(["image", "list"], check=False) or []
|
||||||
|
for img in images:
|
||||||
|
for a in img.get("aliases", []):
|
||||||
|
if a.get("name") == alias:
|
||||||
|
return alias
|
||||||
|
return None
|
||||||
|
|
||||||
|
def find_dns_image(self):
|
||||||
|
"""Return the DNS image alias if it exists, else None."""
|
||||||
|
return self._find_image(DNS_IMAGE_ALIAS)
|
||||||
|
|
||||||
|
def delete_images(self):
|
||||||
|
"""Delete all cached localchat images."""
|
||||||
|
for alias in (DNS_IMAGE_ALIAS, BASE_IMAGE_ALIAS):
|
||||||
|
self.run(["image", "delete", alias], check=False)
|
||||||
|
for name in RELAY_IPS:
|
||||||
|
self.run(["image", "delete", f"localchat-{name}"], check=False)
|
||||||
|
|
||||||
|
def list_managed(self):
|
||||||
|
"""Return list of dicts with name, ip, ipv6, domain, status, memory_usage."""
|
||||||
|
containers = []
|
||||||
|
for ct in self.run_json(["list"]):
|
||||||
|
config = ct.get("config", {})
|
||||||
|
if config.get(LABEL_KEY) != "true":
|
||||||
|
continue
|
||||||
|
name = ct["name"]
|
||||||
|
state = ct.get("state", {})
|
||||||
|
net = state.get("network") or {}
|
||||||
|
containers.append(
|
||||||
|
{
|
||||||
|
"name": name,
|
||||||
|
"ip": _extract_ip(net, "inet"),
|
||||||
|
"ipv6": _extract_ip(net, "inet6"),
|
||||||
|
"domain": config.get(
|
||||||
|
"user.localchat-domain", f"{name}{DOMAIN_SUFFIX}"
|
||||||
|
),
|
||||||
|
"status": ct.get("status", "Unknown"),
|
||||||
|
"memory_usage": state.get("memory", {}).get("usage", 0),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
return containers
|
||||||
|
|
||||||
|
def ensure_base_image(self):
|
||||||
|
"""Build and cache a base image with openssh and the SSH key.
|
||||||
|
|
||||||
|
The image is published as a local incus image with alias
|
||||||
|
'localchat-base'. Subsequent container launches use this
|
||||||
|
image instead of the upstream Debian 12, skipping the
|
||||||
|
slow apt-get install step.
|
||||||
|
Returns the image alias.
|
||||||
|
"""
|
||||||
|
if self._find_image(BASE_IMAGE_ALIAS):
|
||||||
|
return BASE_IMAGE_ALIAS
|
||||||
|
|
||||||
|
print(" Building base image (one-time setup) ...")
|
||||||
|
|
||||||
|
self.run(["delete", BASE_SETUP_NAME, "--force"], check=False)
|
||||||
|
self.run(["image", "delete", BASE_IMAGE_ALIAS], check=False)
|
||||||
|
self.run(
|
||||||
|
["launch", UPSTREAM_IMAGE, BASE_SETUP_NAME, "-c", "limits.memory=512MiB"]
|
||||||
|
)
|
||||||
|
|
||||||
|
ct = Container(self, BASE_SETUP_NAME, memory="512MiB")
|
||||||
|
ct.wait_ready()
|
||||||
|
|
||||||
|
key_path = self.ssh_key_path
|
||||||
|
pub_key = key_path.with_suffix(".pub").read_text().strip()
|
||||||
|
print(" ── apt-get install (base image) ──")
|
||||||
|
ct.bash(
|
||||||
|
f"""\
|
||||||
|
systemctl disable --now systemd-resolved 2>/dev/null || true
|
||||||
|
rm -f /etc/resolv.conf
|
||||||
|
echo 'nameserver 9.9.9.9' > /etc/resolv.conf
|
||||||
|
while fuser /var/lib/apt/lists/lock >/dev/null 2>&1 ; do
|
||||||
|
echo "Waiting for other apt-get instance to finish..."
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
apt-get -o DPkg::Lock::Timeout=60 update
|
||||||
|
DEBIAN_FRONTEND=noninteractive apt-get install -y openssh-server python3
|
||||||
|
systemctl enable ssh
|
||||||
|
apt-get clean
|
||||||
|
mkdir -p /root/.ssh
|
||||||
|
chmod 700 /root/.ssh
|
||||||
|
echo '{pub_key}' > /root/.ssh/authorized_keys
|
||||||
|
chmod 600 /root/.ssh/authorized_keys
|
||||||
|
""",
|
||||||
|
capture=False,
|
||||||
|
)
|
||||||
|
print(" ── base image install done ──")
|
||||||
|
|
||||||
|
self.run(["stop", BASE_SETUP_NAME])
|
||||||
|
self.run(["publish", BASE_SETUP_NAME, f"--alias={BASE_IMAGE_ALIAS}"])
|
||||||
|
self.run(["delete", BASE_SETUP_NAME, "--force"])
|
||||||
|
print(f" Base image '{BASE_IMAGE_ALIAS}' ready.")
|
||||||
|
return BASE_IMAGE_ALIAS
|
||||||
|
|
||||||
|
def ensure_bridge(self):
|
||||||
|
"""Ensure incusbr0 exists and uses our fixed IPv4 subnet."""
|
||||||
|
bridge = self.run_json(["network", "show", "incusbr0"], check=False)
|
||||||
|
if bridge and bridge.get("config", {}).get("ipv4.address") == BRIDGE_IPV4:
|
||||||
|
return
|
||||||
|
|
||||||
|
print(f" Configuring incusbr0 with static subnet {BRIDGE_IPV4} ...")
|
||||||
|
if not bridge:
|
||||||
|
self.run(["network", "create", "incusbr0"], check=False)
|
||||||
|
|
||||||
|
self.run(
|
||||||
|
[
|
||||||
|
"network",
|
||||||
|
"set",
|
||||||
|
"incusbr0",
|
||||||
|
f"ipv4.address={BRIDGE_IPV4}",
|
||||||
|
"ipv4.nat=true",
|
||||||
|
"ipv6.address=none",
|
||||||
|
"dns.mode=none",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_container(self, name):
|
||||||
|
"""Return a container handle for the given name.
|
||||||
|
|
||||||
|
Accepts both short relay names (``test0``) and full Incus
|
||||||
|
container names (``test0-localchat``). Returns
|
||||||
|
``DNSContainer`` for the DNS container and
|
||||||
|
``RelayContainer`` for everything else.
|
||||||
|
"""
|
||||||
|
if name == DNS_CONTAINER_NAME:
|
||||||
|
return DNSContainer(self)
|
||||||
|
return RelayContainer(self, name.removesuffix("-localchat"))
|
||||||
|
|
||||||
|
def get_dns_container(self):
|
||||||
|
"""Return a DNSContainer handle."""
|
||||||
|
return DNSContainer(self)
|
||||||
|
|
||||||
|
|
||||||
|
class Container:
|
||||||
|
"""Lightweight handle for an Incus container.
|
||||||
|
|
||||||
|
Carries the container *name* and provides convenience methods
|
||||||
|
for running commands, managing lifecycle, and extracting state
|
||||||
|
so callers don't repeat the name everywhere.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, incus, name, domain=None, memory="200MiB", ipv4=None):
|
||||||
|
self.incus = incus
|
||||||
|
self.name = name
|
||||||
|
self.domain = domain or f"{name}{DOMAIN_SUFFIX}"
|
||||||
|
self.memory = memory
|
||||||
|
self.ipv4 = ipv4
|
||||||
|
self.ipv6 = None
|
||||||
|
|
||||||
|
def bash(self, script, check=True, capture=True):
|
||||||
|
"""Returns stdout from executing ``bash -ec <script>`` inside this container.
|
||||||
|
|
||||||
|
*script* is dedented and stripped so callers can use triple-quoted strings.
|
||||||
|
When *check* is False, returns *None* on non-zero exit instead of raising.
|
||||||
|
When *capture* is False, output streams to the terminal and None is returned.
|
||||||
|
"""
|
||||||
|
cmd = ["exec", self.name, "--", "bash", "-ec", textwrap.dedent(script).strip()]
|
||||||
|
if not capture:
|
||||||
|
self.incus.run(cmd, check=check, capture=False)
|
||||||
|
return None
|
||||||
|
return self.incus.run_output(cmd, check=check)
|
||||||
|
|
||||||
|
def run_cmd(self, *args, check=True):
|
||||||
|
"""Return stdout from running a command directly in the container (no shell).
|
||||||
|
|
||||||
|
When *check* is False, returns *None* on non-zero exit instead of raising.
|
||||||
|
"""
|
||||||
|
return self.incus.run_output(
|
||||||
|
["exec", self.name, "--", *args],
|
||||||
|
check=check,
|
||||||
|
)
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
self.incus.run(["start", self.name])
|
||||||
|
|
||||||
|
def stop(self, force=False):
|
||||||
|
cmd = ["stop", self.name]
|
||||||
|
if force:
|
||||||
|
cmd.append("--force")
|
||||||
|
self.incus.run(cmd, check=False)
|
||||||
|
|
||||||
|
def launch(self, image=None):
|
||||||
|
"""Launch from the specified image, or the base image if None."""
|
||||||
|
self.incus.ensure_bridge()
|
||||||
|
if image is None:
|
||||||
|
image = self.incus.ensure_base_image()
|
||||||
|
cfg = []
|
||||||
|
cfg += ("-c", f"{LABEL_KEY}=true")
|
||||||
|
cfg += ("-c", f"user.localchat-domain={self.domain}")
|
||||||
|
cfg += ("-c", f"limits.memory={self.memory}")
|
||||||
|
self.incus.run(["init", image, self.name, *cfg])
|
||||||
|
if self.ipv4:
|
||||||
|
self.incus.run(
|
||||||
|
[
|
||||||
|
"config",
|
||||||
|
"device",
|
||||||
|
"override",
|
||||||
|
self.name,
|
||||||
|
"eth0",
|
||||||
|
f"ipv4.address={self.ipv4}",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
self.incus.run(["start", self.name])
|
||||||
|
return image
|
||||||
|
|
||||||
|
def ensure(self):
|
||||||
|
"""Create/start this container from the cached base image.
|
||||||
|
|
||||||
|
On first call, builds the base image (~30s).
|
||||||
|
Subsequent containers launch in ~2s from the cached image.
|
||||||
|
Returns ``self`` for chaining.
|
||||||
|
"""
|
||||||
|
data = self.incus.run_json(["list", self.name], check=False) or []
|
||||||
|
|
||||||
|
existing = [c for c in data if c["name"] == self.name]
|
||||||
|
image = None
|
||||||
|
if existing:
|
||||||
|
status = existing[0]["status"]
|
||||||
|
if status != "Running":
|
||||||
|
print(f" Starting stopped {self.name} container ...")
|
||||||
|
self.start()
|
||||||
|
else:
|
||||||
|
print(f" {self.name} already running")
|
||||||
|
else:
|
||||||
|
image = self.launch()
|
||||||
|
self.wait_ready()
|
||||||
|
if image:
|
||||||
|
print(f" Ensured {self.name} (launched from {image!r} image)")
|
||||||
|
return self
|
||||||
|
|
||||||
|
def destroy(self):
|
||||||
|
"""Stop, delete, and clean up config files."""
|
||||||
|
self.stop(force=True)
|
||||||
|
self.incus.run(["delete", self.name, "--force"], check=False)
|
||||||
|
|
||||||
|
def push_file_content(self, dest_path, content):
|
||||||
|
"""Write *content* to *dest_path* inside the container.
|
||||||
|
|
||||||
|
*content* is dedented and stripped so callers can use
|
||||||
|
indented triple-quoted strings.
|
||||||
|
"""
|
||||||
|
content = textwrap.dedent(content).strip() + "\n"
|
||||||
|
self.incus.run(
|
||||||
|
["file", "push", "-", f"{self.name}{dest_path}"],
|
||||||
|
input=content,
|
||||||
|
)
|
||||||
|
self.bash(f"chmod 644 {dest_path}")
|
||||||
|
|
||||||
|
def wait_ready(self, timeout=60):
|
||||||
|
"""Wait until the container is running with an IPv4 address.
|
||||||
|
|
||||||
|
Sets ``self.ipv4`` and ``self.ipv6`` (may be *None*),
|
||||||
|
or raises ``TimeoutError``.
|
||||||
|
"""
|
||||||
|
deadline = time.time() + timeout
|
||||||
|
while time.time() < deadline:
|
||||||
|
data = self.incus.run_json(
|
||||||
|
["list", self.name],
|
||||||
|
check=False,
|
||||||
|
)
|
||||||
|
if data and data[0].get("status") == "Running":
|
||||||
|
net = data[0].get("state", {}).get("network", {})
|
||||||
|
self.ipv4 = _extract_ip(net, "inet")
|
||||||
|
self.ipv6 = _extract_ip(net, "inet6")
|
||||||
|
if self.ipv4:
|
||||||
|
return
|
||||||
|
time.sleep(1)
|
||||||
|
raise TimeoutError(
|
||||||
|
f"Container {self.name!r} did not become ready within {timeout}s"
|
||||||
|
)
|
||||||
|
|
||||||
|
def rss_mib(self):
|
||||||
|
"""Return ``(used, total)`` memory from container (or None if unobtainable)."""
|
||||||
|
output = self.bash("free -m", check=False)
|
||||||
|
if output:
|
||||||
|
for line in output.splitlines():
|
||||||
|
if line.startswith("Mem:"):
|
||||||
|
parts = line.split()
|
||||||
|
return int(parts[2]), int(parts[1])
|
||||||
|
|
||||||
|
|
||||||
|
class RelayContainer(Container):
|
||||||
|
"""Container handle for a chatmail relay.
|
||||||
|
|
||||||
|
Accepts the short relay name (e.g. ``test0``) and derives
|
||||||
|
the Incus container name and mail domain automatically.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, incus, name):
|
||||||
|
super().__init__(
|
||||||
|
incus,
|
||||||
|
f"{name}-localchat",
|
||||||
|
domain=f"_{name}{DOMAIN_SUFFIX}",
|
||||||
|
memory="500MiB",
|
||||||
|
ipv4=RELAY_IPS.get(name),
|
||||||
|
)
|
||||||
|
self.sname = name
|
||||||
|
self.image_alias = f"localchat-{name}"
|
||||||
|
self.ini = incus.lxconfigs_dir / f"chatmail-{name}.ini"
|
||||||
|
self.zone = incus.lxconfigs_dir / f"{name}.zone"
|
||||||
|
|
||||||
|
def launch(self):
|
||||||
|
"""Launch from a cached per-relay image if available, else from base."""
|
||||||
|
cached = self.incus._find_image(self.image_alias)
|
||||||
|
if cached:
|
||||||
|
print(f" Using cached image {cached!r}")
|
||||||
|
else:
|
||||||
|
print(" No cached image, building from base")
|
||||||
|
image = super().launch(image=cached)
|
||||||
|
self.bash("rm -f /etc/chatmail-version")
|
||||||
|
return image
|
||||||
|
|
||||||
|
def destroy(self):
|
||||||
|
"""Stop, delete, and clean up config files."""
|
||||||
|
super().destroy()
|
||||||
|
if self.ini.exists():
|
||||||
|
self.ini.unlink()
|
||||||
|
|
||||||
|
def disable_ipv6(self):
|
||||||
|
"""Disable IPv6 inside the container via sysctl."""
|
||||||
|
self.bash("""\
|
||||||
|
sysctl -w net.ipv6.conf.all.disable_ipv6=1
|
||||||
|
sysctl -w net.ipv6.conf.default.disable_ipv6=1
|
||||||
|
mkdir -p /etc/sysctl.d
|
||||||
|
printf 'net.ipv6.conf.all.disable_ipv6=1\\n
|
||||||
|
net.ipv6.conf.default.disable_ipv6=1\\n'
|
||||||
|
> /etc/sysctl.d/99-disable-ipv6.conf
|
||||||
|
""")
|
||||||
|
|
||||||
|
def configure_hosts(self, ip):
|
||||||
|
"""Set hostname and /etc/hosts inside the container."""
|
||||||
|
self.bash(f"""
|
||||||
|
echo '{self.name}' > /etc/hostname
|
||||||
|
hostname {self.name}
|
||||||
|
sed -i '/ {self.domain}$/d' /etc/hosts
|
||||||
|
echo '{ip} {self.name} {self.domain}' >> /etc/hosts
|
||||||
|
""")
|
||||||
|
|
||||||
|
def publish_image(self):
|
||||||
|
"""Publish this container as a reusable per-relay image.
|
||||||
|
|
||||||
|
Returns True if an image was published,
|
||||||
|
False if a cached image already existed.
|
||||||
|
"""
|
||||||
|
if self.incus._find_image(self.image_alias):
|
||||||
|
return False
|
||||||
|
self.bash("apt-get clean && rm -rf /var/lib/apt/lists/*")
|
||||||
|
print(f" Publishing {self.name!r} as {self.image_alias!r} image ...")
|
||||||
|
self.incus.run(
|
||||||
|
["publish", self.name, f"--alias={self.image_alias}", "--force"],
|
||||||
|
capture=False,
|
||||||
|
)
|
||||||
|
self.wait_ready()
|
||||||
|
print(f" Image {self.image_alias!r} ready.")
|
||||||
|
return True
|
||||||
|
|
||||||
|
def deployed_version(self):
|
||||||
|
"""Read /etc/chatmail-version, or None if absent."""
|
||||||
|
return self.bash("cat /etc/chatmail-version", check=False)
|
||||||
|
|
||||||
|
def deployed_domain(self):
|
||||||
|
"""Read the domain deployed on the container (postfix myhostname)."""
|
||||||
|
return self.bash(
|
||||||
|
"postconf -h myhostname 2>/dev/null",
|
||||||
|
check=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
def verify_ssh(self, ssh_config):
|
||||||
|
"""Verify SSH connectivity to this container."""
|
||||||
|
cmd = f"ssh -F {ssh_config} -o ConnectTimeout=10 root@{self.domain} hostname"
|
||||||
|
return shell(cmd, timeout=15).returncode == 0
|
||||||
|
|
||||||
|
def configure_dns(self, dns_ip):
|
||||||
|
"""Point this container's resolver at *dns_ip*.
|
||||||
|
|
||||||
|
Disables systemd-resolved to free port 53 and writes
|
||||||
|
a static /etc/resolv.conf. Also configures unbound
|
||||||
|
(if present) to forward .localchat queries.
|
||||||
|
"""
|
||||||
|
self.bash(f"""\
|
||||||
|
systemctl disable --now systemd-resolved 2>/dev/null || true
|
||||||
|
rm -f /etc/resolv.conf
|
||||||
|
echo 'nameserver {dns_ip}' > /etc/resolv.conf
|
||||||
|
mkdir -p /etc/unbound/unbound.conf.d
|
||||||
|
printf 'server:\\n domain-insecure: "localchat"\\n\\n
|
||||||
|
forward-zone:\\n name: "localchat"\\n
|
||||||
|
forward-addr: {dns_ip}\\n'
|
||||||
|
> /etc/unbound/unbound.conf.d/localchat-forward.conf
|
||||||
|
systemctl restart unbound 2>/dev/null || true
|
||||||
|
""")
|
||||||
|
|
||||||
|
def check_dns(self, retries=5, delay=2):
|
||||||
|
"""Verify that external DNS resolution works inside the container."""
|
||||||
|
for i in range(retries):
|
||||||
|
result = self.bash(
|
||||||
|
"getent hosts pypi.org",
|
||||||
|
check=False,
|
||||||
|
)
|
||||||
|
if result:
|
||||||
|
return True
|
||||||
|
if i < retries - 1:
|
||||||
|
time.sleep(delay)
|
||||||
|
return False
|
||||||
|
|
||||||
|
def write_ini(self, disable_ipv6=False):
|
||||||
|
"""Generate a chatmail.ini config file in lxconfigs/."""
|
||||||
|
from chatmaild.config import write_initial_config
|
||||||
|
|
||||||
|
overrides = {
|
||||||
|
"max_user_send_per_minute": 600,
|
||||||
|
"max_user_send_burst_size": 100,
|
||||||
|
"mtail_address": "127.0.0.1",
|
||||||
|
}
|
||||||
|
if disable_ipv6:
|
||||||
|
overrides["disable_ipv6"] = "True"
|
||||||
|
write_initial_config(self.ini, self.domain, overrides)
|
||||||
|
return self.ini
|
||||||
|
|
||||||
|
|
||||||
|
class DNSContainer(Container):
|
||||||
|
"""Specialised container handle for the PowerDNS name server."""
|
||||||
|
|
||||||
|
def __init__(self, incus):
|
||||||
|
super().__init__(
|
||||||
|
incus, DNS_CONTAINER_NAME, domain=DNS_DOMAIN, memory="256MiB", ipv4=DNS_IP
|
||||||
|
)
|
||||||
|
|
||||||
|
def launch(self):
|
||||||
|
"""Launch from cached DNS image if available, else from base image."""
|
||||||
|
cached = self.incus._find_image(DNS_IMAGE_ALIAS)
|
||||||
|
if cached:
|
||||||
|
print(f" Using cached image {cached!r}")
|
||||||
|
else:
|
||||||
|
print(" No cached image, building from base")
|
||||||
|
return super().launch(image=cached)
|
||||||
|
|
||||||
|
def publish_as_dns_image(self):
|
||||||
|
"""Publish this container as a reusable DNS image."""
|
||||||
|
if self.incus._find_image(DNS_IMAGE_ALIAS):
|
||||||
|
return
|
||||||
|
self.bash("apt-get clean && rm -rf /var/lib/apt/lists/*")
|
||||||
|
print(f" Publishing {self.name!r} as {DNS_IMAGE_ALIAS!r} image ...")
|
||||||
|
self.incus.run(
|
||||||
|
["publish", self.name, f"--alias={DNS_IMAGE_ALIAS}", "--force"],
|
||||||
|
capture=False,
|
||||||
|
)
|
||||||
|
self.wait_ready()
|
||||||
|
print(f" DNS image {DNS_IMAGE_ALIAS!r} ready.")
|
||||||
|
|
||||||
|
def pdnsutil(self, *args, check=True):
|
||||||
|
"""Run ``pdnsutil <args>`` inside the DNS container."""
|
||||||
|
return self.run_cmd("pdnsutil", *args, check=check)
|
||||||
|
|
||||||
|
def replace_rrset(self, zone, name, rtype, ttl, rdata):
|
||||||
|
"""Shortcut for ``pdnsutil replace-rrset``."""
|
||||||
|
self.pdnsutil("replace-rrset", zone, name, rtype, ttl, rdata)
|
||||||
|
|
||||||
|
def restart_services(self):
|
||||||
|
"""Restart pdns and pdns-recursor."""
|
||||||
|
self.bash("""\
|
||||||
|
systemctl restart pdns
|
||||||
|
systemctl restart pdns-recursor || true
|
||||||
|
""")
|
||||||
|
|
||||||
|
def ensure(self):
|
||||||
|
"""Create the DNS container with PowerDNS if needed.
|
||||||
|
|
||||||
|
Calls ``super().ensure()`` to create/start the container
|
||||||
|
and set up SSH, then installs PowerDNS and configures
|
||||||
|
the Incus bridge to use this container as DNS.
|
||||||
|
"""
|
||||||
|
super().ensure()
|
||||||
|
self._install_powerdns()
|
||||||
|
self.incus.run(
|
||||||
|
["network", "set", "incusbr0", "dns.mode=none"],
|
||||||
|
check=False,
|
||||||
|
)
|
||||||
|
self.incus.run(
|
||||||
|
["network", "set", "incusbr0", f"raw.dnsmasq=dhcp-option=6,{self.ipv4}"],
|
||||||
|
check=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _install_powerdns(self):
|
||||||
|
"""Install and configure PowerDNS if not already present."""
|
||||||
|
if self.run_cmd("which", "pdns_server", check=False) is not None:
|
||||||
|
return
|
||||||
|
|
||||||
|
self.bash("""\
|
||||||
|
systemctl disable --now systemd-resolved 2>/dev/null || true
|
||||||
|
rm -f /etc/resolv.conf
|
||||||
|
echo 'nameserver 9.9.9.9' > /etc/resolv.conf
|
||||||
|
apt-get -o DPkg::Lock::Timeout=60 update
|
||||||
|
DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||||
|
pdns-server pdns-backend-sqlite3 sqlite3 pdns-recursor dnsutils
|
||||||
|
systemctl stop pdns pdns-recursor || true
|
||||||
|
mkdir -p /var/lib/powerdns
|
||||||
|
sqlite3 /var/lib/powerdns/pdns.sqlite3 \
|
||||||
|
</usr/share/doc/pdns-backend-sqlite3/schema.sqlite3.sql
|
||||||
|
chown -R pdns:pdns /var/lib/powerdns
|
||||||
|
""")
|
||||||
|
|
||||||
|
self.push_file_content(
|
||||||
|
"/etc/powerdns/pdns.conf",
|
||||||
|
"""\
|
||||||
|
launch=gsqlite3
|
||||||
|
gsqlite3-database=/var/lib/powerdns/pdns.sqlite3
|
||||||
|
local-address=127.0.0.1
|
||||||
|
local-port=5353
|
||||||
|
""",
|
||||||
|
)
|
||||||
|
|
||||||
|
self.push_file_content(
|
||||||
|
"/etc/powerdns/recursor.conf",
|
||||||
|
"""\
|
||||||
|
local-address=0.0.0.0
|
||||||
|
local-port=53
|
||||||
|
forward-zones=localchat=127.0.0.1:5353
|
||||||
|
forward-zones-recurse=.=9.9.9.9;149.112.112.112
|
||||||
|
allow-from=0.0.0.0/0
|
||||||
|
dont-query=
|
||||||
|
dnssec=off
|
||||||
|
""",
|
||||||
|
)
|
||||||
|
|
||||||
|
self.bash("""\
|
||||||
|
systemctl start pdns
|
||||||
|
systemctl start pdns-recursor
|
||||||
|
echo 'nameserver 127.0.0.1' > /etc/resolv.conf
|
||||||
|
""")
|
||||||
|
|
||||||
|
def reset_dns_records(self, dns_ip, domains):
|
||||||
|
"""Create DNS zones with initial A records via pdnsutil.
|
||||||
|
|
||||||
|
Only sets SOA, NS, and A records as the minimal set
|
||||||
|
needed for SSH connectivity. Full records (MX, TXT, SRV,
|
||||||
|
CNAME, DKIM) are added later by ``cmdeploy dns``.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dns_ip: IP of the DNS container
|
||||||
|
domains: list of dicts with 'name', 'domain', 'ip'
|
||||||
|
"""
|
||||||
|
for d in domains:
|
||||||
|
domain = d["domain"]
|
||||||
|
ip = d["ip"]
|
||||||
|
print(f" {domain} -> {ip}")
|
||||||
|
|
||||||
|
# Delete and recreate zone fresh (removes stale records)
|
||||||
|
self.pdnsutil("delete-zone", domain, check=False)
|
||||||
|
self.pdnsutil("create-zone", domain, f"ns.{domain}")
|
||||||
|
|
||||||
|
serial = str(int(time.time()))
|
||||||
|
soa = f"ns.{domain} hostmaster.{domain} {serial} 3600 900 604800 300"
|
||||||
|
self.replace_rrset(domain, ".", "SOA", "3600", soa)
|
||||||
|
self.replace_rrset(domain, ".", "NS", "3600", f"ns.{domain}.")
|
||||||
|
self.replace_rrset(domain, ".", "A", "3600", ip)
|
||||||
|
self.replace_rrset(domain, "ns", "A", "3600", dns_ip)
|
||||||
|
|
||||||
|
# AAAA (domain -> container IPv6, if available)
|
||||||
|
ipv6 = d.get("ipv6")
|
||||||
|
if ipv6:
|
||||||
|
self.replace_rrset(domain, ".", "AAAA", "3600", ipv6)
|
||||||
|
print(f" zone reset: SOA, NS, A, AAAA ({ip}, {ipv6})")
|
||||||
|
else:
|
||||||
|
# Remove any stale AAAA record
|
||||||
|
self.pdnsutil("delete-rrset", domain, ".", "AAAA", check=False)
|
||||||
|
print(f" zone reset: SOA, NS, A ({ip}, IPv4-only)")
|
||||||
|
|
||||||
|
self.restart_services()
|
||||||
|
|
||||||
|
def set_dns_records(self, text):
|
||||||
|
"""Add or overwrite DNS records from standard BIND format.
|
||||||
|
|
||||||
|
Uses ``cmdeploy.dns.parse_zone_records`` to parse.
|
||||||
|
Zones are created automatically from the record names.
|
||||||
|
"""
|
||||||
|
from ..dns import parse_zone_records
|
||||||
|
|
||||||
|
zones_seen = set()
|
||||||
|
|
||||||
|
for name, ttl, rtype, rdata in parse_zone_records(text):
|
||||||
|
# Derive zone from name: find top-level .localchat domain
|
||||||
|
name_parts = name.split(".")
|
||||||
|
zone = name # fallback
|
||||||
|
for i in range(len(name_parts) - 1):
|
||||||
|
if name_parts[i + 1 :] == ["localchat"]:
|
||||||
|
zone = ".".join(name_parts[i:])
|
||||||
|
break
|
||||||
|
|
||||||
|
# Create zone if first time seeing it
|
||||||
|
if zone not in zones_seen:
|
||||||
|
self.pdnsutil(
|
||||||
|
"create-zone",
|
||||||
|
zone,
|
||||||
|
f"ns.{zone}",
|
||||||
|
check=False,
|
||||||
|
)
|
||||||
|
zones_seen.add(zone)
|
||||||
|
|
||||||
|
# Figure out the record name relative to zone
|
||||||
|
if name == zone:
|
||||||
|
relative = "."
|
||||||
|
elif name.endswith(f".{zone}"):
|
||||||
|
relative = name[: -(len(zone) + 1)]
|
||||||
|
else:
|
||||||
|
relative = name
|
||||||
|
|
||||||
|
self.replace_rrset(zone, relative, rtype, ttl, rdata)
|
||||||
|
|
||||||
|
if zones_seen:
|
||||||
|
self.restart_services()
|
||||||
@@ -1 +0,0 @@
|
|||||||
*/5 * * * * root {{ config.execpath }} {{ config.mailboxes_dir }} >/var/www/html/metrics
|
|
||||||
@@ -44,21 +44,37 @@ counter warning_count
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
counter filtered_mail_count
|
counter filtered_outgoing_mail_count
|
||||||
|
|
||||||
counter encrypted_mail_count
|
counter outgoing_encrypted_mail_count
|
||||||
/Filtering encrypted mail\./ {
|
/Outgoing: Filtering encrypted mail\./ {
|
||||||
encrypted_mail_count++
|
outgoing_encrypted_mail_count++
|
||||||
filtered_mail_count++
|
filtered_outgoing_mail_count++
|
||||||
}
|
}
|
||||||
|
|
||||||
counter unencrypted_mail_count
|
counter outgoing_unencrypted_mail_count
|
||||||
/Filtering unencrypted mail\./ {
|
/Outgoing: Filtering unencrypted mail\./ {
|
||||||
unencrypted_mail_count++
|
outgoing_unencrypted_mail_count++
|
||||||
filtered_mail_count++
|
filtered_outgoing_mail_count++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
counter filtered_incoming_mail_count
|
||||||
|
|
||||||
|
counter incoming_encrypted_mail_count
|
||||||
|
/Incoming: Filtering encrypted mail\./ {
|
||||||
|
incoming_encrypted_mail_count++
|
||||||
|
filtered_incoming_mail_count++
|
||||||
|
}
|
||||||
|
|
||||||
|
counter incoming_unencrypted_mail_count
|
||||||
|
/Incoming: Filtering unencrypted mail\./ {
|
||||||
|
incoming_unencrypted_mail_count++
|
||||||
|
filtered_incoming_mail_count++
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
counter rejected_unencrypted_mail_count
|
counter rejected_unencrypted_mail_count
|
||||||
/Rejected unencrypted mail\./ {
|
/Rejected unencrypted mail/ {
|
||||||
rejected_unencrypted_mail_count++
|
rejected_unencrypted_mail_count++
|
||||||
}
|
}
|
||||||
|
|||||||
68
cmdeploy/src/cmdeploy/mtail/deployer.py
Normal file
68
cmdeploy/src/cmdeploy/mtail/deployer.py
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
from pyinfra import facts, host
|
||||||
|
from pyinfra.operations import apt, files, server, systemd
|
||||||
|
|
||||||
|
from cmdeploy.basedeploy import (
|
||||||
|
Deployer,
|
||||||
|
get_resource,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class MtailDeployer(Deployer):
|
||||||
|
def __init__(self, mtail_address):
|
||||||
|
self.mtail_address = mtail_address
|
||||||
|
|
||||||
|
def install(self):
|
||||||
|
# Uninstall mtail package to install a static binary.
|
||||||
|
apt.packages(name="Uninstall mtail", packages=["mtail"], present=False)
|
||||||
|
|
||||||
|
(url, sha256sum) = {
|
||||||
|
"x86_64": (
|
||||||
|
"https://github.com/google/mtail/releases/download/v3.0.8/mtail_3.0.8_linux_amd64.tar.gz",
|
||||||
|
"123c2ee5f48c3eff12ebccee38befd2233d715da736000ccde49e3d5607724e4",
|
||||||
|
),
|
||||||
|
"aarch64": (
|
||||||
|
"https://github.com/google/mtail/releases/download/v3.0.8/mtail_3.0.8_linux_arm64.tar.gz",
|
||||||
|
"aa04811c0929b6754408676de520e050c45dddeb3401881888a092c9aea89cae",
|
||||||
|
),
|
||||||
|
}[host.get_fact(facts.server.Arch)]
|
||||||
|
|
||||||
|
server.shell(
|
||||||
|
name="Download mtail",
|
||||||
|
commands=[
|
||||||
|
f"(echo '{sha256sum} /usr/local/bin/mtail' | sha256sum -c) || (curl -L {url} | gunzip | tar -x -f - mtail -O >/usr/local/bin/mtail.new && mv /usr/local/bin/mtail.new /usr/local/bin/mtail)",
|
||||||
|
"chmod 755 /usr/local/bin/mtail",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
def configure(self):
|
||||||
|
# Using our own systemd unit instead of `/usr/lib/systemd/system/mtail.service`.
|
||||||
|
# This allows to read from journalctl instead of log files.
|
||||||
|
files.template(
|
||||||
|
src=get_resource("mtail/mtail.service.j2"),
|
||||||
|
dest="/etc/systemd/system/mtail.service",
|
||||||
|
user="root",
|
||||||
|
group="root",
|
||||||
|
mode="644",
|
||||||
|
address=self.mtail_address or "127.0.0.1",
|
||||||
|
port=3903,
|
||||||
|
)
|
||||||
|
|
||||||
|
mtail_conf = files.put(
|
||||||
|
name="Mtail configuration",
|
||||||
|
src=get_resource("mtail/delivered_mail.mtail"),
|
||||||
|
dest="/etc/mtail/delivered_mail.mtail",
|
||||||
|
user="root",
|
||||||
|
group="root",
|
||||||
|
mode="644",
|
||||||
|
)
|
||||||
|
self.need_restart = mtail_conf.changed
|
||||||
|
|
||||||
|
def activate(self):
|
||||||
|
systemd.service(
|
||||||
|
name="Start and enable mtail",
|
||||||
|
service="mtail.service",
|
||||||
|
running=bool(self.mtail_address),
|
||||||
|
enabled=bool(self.mtail_address),
|
||||||
|
restarted=self.need_restart,
|
||||||
|
)
|
||||||
|
self.need_restart = False
|
||||||
@@ -1,47 +1,47 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
|
||||||
<clientConfig version="1.1">
|
<clientConfig version="1.1">
|
||||||
<emailProvider id="{{ config.domain_name }}">
|
<emailProvider id="{{ config.mail_domain }}">
|
||||||
<domain>{{ config.domain_name }}</domain>
|
<domain>{{ config.mail_domain }}</domain>
|
||||||
<displayName>{{ config.domain_name }} chatmail</displayName>
|
<displayName>{{ config.mail_domain }} chatmail</displayName>
|
||||||
<displayShortName>{{ config.domain_name }}</displayShortName>
|
<displayShortName>{{ config.mail_domain }}</displayShortName>
|
||||||
<incomingServer type="imap">
|
<incomingServer type="imap">
|
||||||
<hostname>{{ config.domain_name }}</hostname>
|
<hostname>{{ config.mail_domain }}</hostname>
|
||||||
<port>993</port>
|
<port>993</port>
|
||||||
<socketType>SSL</socketType>
|
<socketType>SSL</socketType>
|
||||||
<authentication>password-cleartext</authentication>
|
<authentication>password-cleartext</authentication>
|
||||||
<username>%EMAILADDRESS%</username>
|
<username>%EMAILADDRESS%</username>
|
||||||
</incomingServer>
|
</incomingServer>
|
||||||
<incomingServer type="imap">
|
<incomingServer type="imap">
|
||||||
<hostname>{{ config.domain_name }}</hostname>
|
<hostname>{{ config.mail_domain }}</hostname>
|
||||||
<port>143</port>
|
<port>143</port>
|
||||||
<socketType>STARTTLS</socketType>
|
<socketType>STARTTLS</socketType>
|
||||||
<authentication>password-cleartext</authentication>
|
<authentication>password-cleartext</authentication>
|
||||||
<username>%EMAILADDRESS%</username>
|
<username>%EMAILADDRESS%</username>
|
||||||
</incomingServer>
|
</incomingServer>
|
||||||
<incomingServer type="imap">
|
<incomingServer type="imap">
|
||||||
<hostname>{{ config.domain_name }}</hostname>
|
<hostname>{{ config.mail_domain }}</hostname>
|
||||||
<port>443</port>
|
<port>443</port>
|
||||||
<socketType>SSL</socketType>
|
<socketType>SSL</socketType>
|
||||||
<authentication>password-cleartext</authentication>
|
<authentication>password-cleartext</authentication>
|
||||||
<username>%EMAILADDRESS%</username>
|
<username>%EMAILADDRESS%</username>
|
||||||
</incomingServer>
|
</incomingServer>
|
||||||
<outgoingServer type="smtp">
|
<outgoingServer type="smtp">
|
||||||
<hostname>{{ config.domain_name }}</hostname>
|
<hostname>{{ config.mail_domain }}</hostname>
|
||||||
<port>465</port>
|
<port>465</port>
|
||||||
<socketType>SSL</socketType>
|
<socketType>SSL</socketType>
|
||||||
<authentication>password-cleartext</authentication>
|
<authentication>password-cleartext</authentication>
|
||||||
<username>%EMAILADDRESS%</username>
|
<username>%EMAILADDRESS%</username>
|
||||||
</outgoingServer>
|
</outgoingServer>
|
||||||
<outgoingServer type="smtp">
|
<outgoingServer type="smtp">
|
||||||
<hostname>{{ config.domain_name }}</hostname>
|
<hostname>{{ config.mail_domain }}</hostname>
|
||||||
<port>587</port>
|
<port>587</port>
|
||||||
<socketType>STARTTLS</socketType>
|
<socketType>STARTTLS</socketType>
|
||||||
<authentication>password-cleartext</authentication>
|
<authentication>password-cleartext</authentication>
|
||||||
<username>%EMAILADDRESS%</username>
|
<username>%EMAILADDRESS%</username>
|
||||||
</outgoingServer>
|
</outgoingServer>
|
||||||
<outgoingServer type="smtp">
|
<outgoingServer type="smtp">
|
||||||
<hostname>{{ config.domain_name }}</hostname>
|
<hostname>{{ config.mail_domain }}</hostname>
|
||||||
<port>443</port>
|
<port>443</port>
|
||||||
<socketType>SSL</socketType>
|
<socketType>SSL</socketType>
|
||||||
<authentication>password-cleartext</authentication>
|
<authentication>password-cleartext</authentication>
|
||||||
|
|||||||
117
cmdeploy/src/cmdeploy/nginx/deployer.py
Normal file
117
cmdeploy/src/cmdeploy/nginx/deployer.py
Normal file
@@ -0,0 +1,117 @@
|
|||||||
|
from chatmaild.config import Config
|
||||||
|
from pyinfra.operations import apt, files, systemd
|
||||||
|
|
||||||
|
from cmdeploy.basedeploy import (
|
||||||
|
Deployer,
|
||||||
|
get_resource,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class NginxDeployer(Deployer):
|
||||||
|
def __init__(self, config):
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
def install(self):
|
||||||
|
#
|
||||||
|
# If we allow nginx to start up on install, it will grab port
|
||||||
|
# 80, which then will block acmetool from listening on the port.
|
||||||
|
# That in turn prevents getting certificates, which then causes
|
||||||
|
# an error when we try to start nginx on the custom config
|
||||||
|
# that leaves port 80 open but also requires certificates to
|
||||||
|
# be present. To avoid getting into that interlocking mess,
|
||||||
|
# we use policy-rc.d to prevent nginx from starting up when it
|
||||||
|
# is installed.
|
||||||
|
#
|
||||||
|
# This approach allows us to avoid performing any explicit
|
||||||
|
# systemd operations during the install stage (as opposed to
|
||||||
|
# allowing it to start and then forcing it to stop), which allows
|
||||||
|
# the install stage to run in non-systemd environments like a
|
||||||
|
# container image build.
|
||||||
|
#
|
||||||
|
# For documentation about policy-rc.d, see:
|
||||||
|
# https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt
|
||||||
|
#
|
||||||
|
files.put(
|
||||||
|
src=get_resource("policy-rc.d"),
|
||||||
|
dest="/usr/sbin/policy-rc.d",
|
||||||
|
user="root",
|
||||||
|
group="root",
|
||||||
|
mode="755",
|
||||||
|
)
|
||||||
|
|
||||||
|
apt.packages(
|
||||||
|
name="Install nginx",
|
||||||
|
packages=["nginx", "libnginx-mod-stream"],
|
||||||
|
)
|
||||||
|
|
||||||
|
files.file("/usr/sbin/policy-rc.d", present=False)
|
||||||
|
|
||||||
|
def configure(self):
|
||||||
|
self.need_restart = _configure_nginx(self.config)
|
||||||
|
|
||||||
|
def activate(self):
|
||||||
|
systemd.service(
|
||||||
|
name="Start and enable nginx",
|
||||||
|
service="nginx.service",
|
||||||
|
running=True,
|
||||||
|
enabled=True,
|
||||||
|
restarted=self.need_restart,
|
||||||
|
)
|
||||||
|
self.need_restart = False
|
||||||
|
|
||||||
|
|
||||||
|
def _configure_nginx(config: Config, debug: bool = False) -> bool:
|
||||||
|
"""Configures nginx HTTP server."""
|
||||||
|
need_restart = False
|
||||||
|
|
||||||
|
main_config = files.template(
|
||||||
|
src=get_resource("nginx/nginx.conf.j2"),
|
||||||
|
dest="/etc/nginx/nginx.conf",
|
||||||
|
user="root",
|
||||||
|
group="root",
|
||||||
|
mode="644",
|
||||||
|
config=config,
|
||||||
|
disable_ipv6=config.disable_ipv6,
|
||||||
|
)
|
||||||
|
need_restart |= main_config.changed
|
||||||
|
|
||||||
|
autoconfig = files.template(
|
||||||
|
src=get_resource("nginx/autoconfig.xml.j2"),
|
||||||
|
dest="/var/www/html/.well-known/autoconfig/mail/config-v1.1.xml",
|
||||||
|
user="root",
|
||||||
|
group="root",
|
||||||
|
mode="644",
|
||||||
|
config=config,
|
||||||
|
)
|
||||||
|
need_restart |= autoconfig.changed
|
||||||
|
|
||||||
|
mta_sts_config = files.template(
|
||||||
|
src=get_resource("nginx/mta-sts.txt.j2"),
|
||||||
|
dest="/var/www/html/.well-known/mta-sts.txt",
|
||||||
|
user="root",
|
||||||
|
group="root",
|
||||||
|
mode="644",
|
||||||
|
config=config,
|
||||||
|
)
|
||||||
|
need_restart |= mta_sts_config.changed
|
||||||
|
|
||||||
|
# install CGI newemail script
|
||||||
|
#
|
||||||
|
cgi_dir = "/usr/lib/cgi-bin"
|
||||||
|
files.directory(
|
||||||
|
name=f"Ensure {cgi_dir} exists",
|
||||||
|
path=cgi_dir,
|
||||||
|
user="root",
|
||||||
|
group="root",
|
||||||
|
)
|
||||||
|
|
||||||
|
files.put(
|
||||||
|
name="Upload cgi newemail.py script",
|
||||||
|
src=get_resource("newemail.py", pkg="chatmaild").open("rb"),
|
||||||
|
dest=f"{cgi_dir}/newemail.py",
|
||||||
|
user="root",
|
||||||
|
group="root",
|
||||||
|
mode="755",
|
||||||
|
)
|
||||||
|
|
||||||
|
return need_restart
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
version: STSv1
|
version: STSv1
|
||||||
mode: enforce
|
mode: enforce
|
||||||
mx: {{ config.domain_name }}
|
mx: {{ config.mail_domain }}
|
||||||
max_age: 2419200
|
max_age: 2419200
|
||||||
|
|||||||
@@ -42,6 +42,9 @@ stream {
|
|||||||
}
|
}
|
||||||
|
|
||||||
http {
|
http {
|
||||||
|
{% if config.tls_cert_mode == "self" %}
|
||||||
|
limit_req_zone $binary_remote_addr zone=newaccount:10m rate=2r/s;
|
||||||
|
{% endif %}
|
||||||
sendfile on;
|
sendfile on;
|
||||||
tcp_nopush on;
|
tcp_nopush on;
|
||||||
|
|
||||||
@@ -51,10 +54,10 @@ http {
|
|||||||
include /etc/nginx/mime.types;
|
include /etc/nginx/mime.types;
|
||||||
default_type application/octet-stream;
|
default_type application/octet-stream;
|
||||||
|
|
||||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
|
ssl_protocols TLSv1.2 TLSv1.3;
|
||||||
ssl_prefer_server_ciphers on;
|
ssl_prefer_server_ciphers on;
|
||||||
ssl_certificate /var/lib/acme/live/{{ config.domain_name }}/fullchain;
|
ssl_certificate {{ config.tls_cert_path }};
|
||||||
ssl_certificate_key /var/lib/acme/live/{{ config.domain_name }}/privkey;
|
ssl_certificate_key {{ config.tls_key_path }};
|
||||||
|
|
||||||
gzip on;
|
gzip on;
|
||||||
|
|
||||||
@@ -66,7 +69,7 @@ http {
|
|||||||
|
|
||||||
index index.html index.htm;
|
index index.html index.htm;
|
||||||
|
|
||||||
server_name {{ config.domain_name }} www.{{ config.domain_name }} mta-sts.{{ config.domain_name }};
|
server_name {{ config.mail_domain }} www.{{ config.mail_domain }} mta-sts.{{ config.mail_domain }};
|
||||||
|
|
||||||
access_log syslog:server=unix:/dev/log,facility=local7;
|
access_log syslog:server=unix:/dev/log,facility=local7;
|
||||||
|
|
||||||
@@ -76,16 +79,16 @@ http {
|
|||||||
try_files $uri $uri/ =404;
|
try_files $uri $uri/ =404;
|
||||||
}
|
}
|
||||||
|
|
||||||
location /metrics {
|
|
||||||
default_type text/plain;
|
|
||||||
}
|
|
||||||
|
|
||||||
location /new {
|
location /new {
|
||||||
|
{% if config.tls_cert_mode != "self" %}
|
||||||
if ($request_method = GET) {
|
if ($request_method = GET) {
|
||||||
# Redirect to Delta Chat,
|
# Redirect to Delta Chat,
|
||||||
# which will in turn do a POST request.
|
# which will in turn do a POST request.
|
||||||
return 301 dcaccount:https://{{ config.domain_name }}/new;
|
return 301 dcaccount:https://{{ config.mail_domain }}/new;
|
||||||
}
|
}
|
||||||
|
{% else %}
|
||||||
|
limit_req zone=newaccount burst=5 nodelay;
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
fastcgi_pass unix:/run/fcgiwrap.socket;
|
fastcgi_pass unix:/run/fcgiwrap.socket;
|
||||||
include /etc/nginx/fastcgi_params;
|
include /etc/nginx/fastcgi_params;
|
||||||
@@ -99,9 +102,11 @@ http {
|
|||||||
#
|
#
|
||||||
# Redirects are only for browsers.
|
# Redirects are only for browsers.
|
||||||
location /cgi-bin/newemail.py {
|
location /cgi-bin/newemail.py {
|
||||||
|
{% if config.tls_cert_mode != "self" %}
|
||||||
if ($request_method = GET) {
|
if ($request_method = GET) {
|
||||||
return 301 dcaccount:https://{{ config.domain_name }}/new;
|
return 301 dcaccount:https://{{ config.mail_domain }}/new;
|
||||||
}
|
}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
fastcgi_pass unix:/run/fcgiwrap.socket;
|
fastcgi_pass unix:/run/fcgiwrap.socket;
|
||||||
include /etc/nginx/fastcgi_params;
|
include /etc/nginx/fastcgi_params;
|
||||||
@@ -132,8 +137,29 @@ http {
|
|||||||
# Redirect www. to non-www
|
# Redirect www. to non-www
|
||||||
server {
|
server {
|
||||||
listen 127.0.0.1:8443 ssl;
|
listen 127.0.0.1:8443 ssl;
|
||||||
server_name www.{{ config.domain_name }};
|
server_name www.{{ config.mail_domain }};
|
||||||
return 301 $scheme://{{ config.domain_name }}$request_uri;
|
return 301 $scheme://{{ config.mail_domain }}$request_uri;
|
||||||
access_log syslog:server=unix:/dev/log,facility=local7;
|
access_log syslog:server=unix:/dev/log,facility=local7;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
{% if not disable_ipv6 %}
|
||||||
|
listen [::]:80;
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if config.tls_cert_mode == "acme" %}
|
||||||
|
location /.well-known/acme-challenge/ {
|
||||||
|
proxy_pass http://acmetool;
|
||||||
|
}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
return 301 https://$host$request_uri;
|
||||||
|
}
|
||||||
|
|
||||||
|
{% if config.tls_cert_mode == "acme" %}
|
||||||
|
upstream acmetool {
|
||||||
|
server 127.0.0.1:402;
|
||||||
|
}
|
||||||
|
{% endif %}
|
||||||
}
|
}
|
||||||
|
|||||||
124
cmdeploy/src/cmdeploy/opendkim/deployer.py
Normal file
124
cmdeploy/src/cmdeploy/opendkim/deployer.py
Normal file
@@ -0,0 +1,124 @@
|
|||||||
|
"""
|
||||||
|
Installs OpenDKIM
|
||||||
|
"""
|
||||||
|
|
||||||
|
from pyinfra import host
|
||||||
|
from pyinfra.facts.files import File
|
||||||
|
from pyinfra.operations import apt, files, server, systemd
|
||||||
|
|
||||||
|
from cmdeploy.basedeploy import Deployer, get_resource
|
||||||
|
|
||||||
|
|
||||||
|
class OpendkimDeployer(Deployer):
|
||||||
|
required_users = [("opendkim", None, ["opendkim"])]
|
||||||
|
|
||||||
|
def __init__(self, mail_domain):
|
||||||
|
self.mail_domain = mail_domain
|
||||||
|
|
||||||
|
def install(self):
|
||||||
|
apt.packages(
|
||||||
|
name="apt install opendkim opendkim-tools",
|
||||||
|
packages=["opendkim", "opendkim-tools"],
|
||||||
|
)
|
||||||
|
|
||||||
|
def configure(self):
|
||||||
|
domain = self.mail_domain
|
||||||
|
dkim_selector = "opendkim"
|
||||||
|
"""Configures OpenDKIM"""
|
||||||
|
need_restart = False
|
||||||
|
|
||||||
|
main_config = files.template(
|
||||||
|
src=get_resource("opendkim/opendkim.conf"),
|
||||||
|
dest="/etc/opendkim.conf",
|
||||||
|
user="root",
|
||||||
|
group="root",
|
||||||
|
mode="644",
|
||||||
|
config={"domain_name": domain, "opendkim_selector": dkim_selector},
|
||||||
|
)
|
||||||
|
need_restart |= main_config.changed
|
||||||
|
|
||||||
|
screen_script = files.file(
|
||||||
|
path="/etc/opendkim/screen.lua",
|
||||||
|
present=False,
|
||||||
|
)
|
||||||
|
need_restart |= screen_script.changed
|
||||||
|
|
||||||
|
final_script = files.file(
|
||||||
|
path="/etc/opendkim/final.lua",
|
||||||
|
present=False,
|
||||||
|
)
|
||||||
|
need_restart |= final_script.changed
|
||||||
|
|
||||||
|
files.directory(
|
||||||
|
name="Add opendkim directory to /etc",
|
||||||
|
path="/etc/opendkim",
|
||||||
|
user="opendkim",
|
||||||
|
group="opendkim",
|
||||||
|
mode="750",
|
||||||
|
present=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
keytable = files.template(
|
||||||
|
src=get_resource("opendkim/KeyTable"),
|
||||||
|
dest="/etc/dkimkeys/KeyTable",
|
||||||
|
user="opendkim",
|
||||||
|
group="opendkim",
|
||||||
|
mode="644",
|
||||||
|
config={"domain_name": domain, "opendkim_selector": dkim_selector},
|
||||||
|
)
|
||||||
|
need_restart |= keytable.changed
|
||||||
|
|
||||||
|
signing_table = files.template(
|
||||||
|
src=get_resource("opendkim/SigningTable"),
|
||||||
|
dest="/etc/dkimkeys/SigningTable",
|
||||||
|
user="opendkim",
|
||||||
|
group="opendkim",
|
||||||
|
mode="644",
|
||||||
|
config={"domain_name": domain, "opendkim_selector": dkim_selector},
|
||||||
|
)
|
||||||
|
need_restart |= signing_table.changed
|
||||||
|
files.directory(
|
||||||
|
name="Add opendkim socket directory to /var/spool/postfix",
|
||||||
|
path="/var/spool/postfix/opendkim",
|
||||||
|
user="opendkim",
|
||||||
|
group="opendkim",
|
||||||
|
mode="750",
|
||||||
|
present=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
if not host.get_fact(File, f"/etc/dkimkeys/{dkim_selector}.private"):
|
||||||
|
server.shell(
|
||||||
|
name="Generate OpenDKIM domain keys",
|
||||||
|
commands=[
|
||||||
|
f"/usr/sbin/opendkim-genkey -D /etc/dkimkeys -d {domain} -s {dkim_selector}"
|
||||||
|
],
|
||||||
|
_use_su_login=True,
|
||||||
|
_su_user="opendkim",
|
||||||
|
)
|
||||||
|
|
||||||
|
service_file = files.put(
|
||||||
|
name="Configure opendkim to restart once a day",
|
||||||
|
src=get_resource("opendkim/systemd.conf"),
|
||||||
|
dest="/etc/systemd/system/opendkim.service.d/10-prevent-memory-leak.conf",
|
||||||
|
)
|
||||||
|
need_restart |= service_file.changed
|
||||||
|
|
||||||
|
files.file(
|
||||||
|
name="chown opendkim: /etc/dkimkeys/opendkim.private",
|
||||||
|
path="/etc/dkimkeys/opendkim.private",
|
||||||
|
user="opendkim",
|
||||||
|
group="opendkim",
|
||||||
|
)
|
||||||
|
|
||||||
|
self.need_restart = need_restart
|
||||||
|
|
||||||
|
def activate(self):
|
||||||
|
systemd.service(
|
||||||
|
name="Start and enable OpenDKIM",
|
||||||
|
service="opendkim.service",
|
||||||
|
running=True,
|
||||||
|
enabled=True,
|
||||||
|
daemon_reload=self.need_restart,
|
||||||
|
restarted=self.need_restart,
|
||||||
|
)
|
||||||
|
self.need_restart = False
|
||||||
@@ -1,28 +0,0 @@
|
|||||||
if odkim.internal_ip(ctx) == 1 then
|
|
||||||
-- Outgoing message will be signed,
|
|
||||||
-- no need to look for signatures.
|
|
||||||
return nil
|
|
||||||
end
|
|
||||||
|
|
||||||
nsigs = odkim.get_sigcount(ctx)
|
|
||||||
if nsigs == nil then
|
|
||||||
return nil
|
|
||||||
end
|
|
||||||
|
|
||||||
for i = 1, nsigs do
|
|
||||||
sig = odkim.get_sighandle(ctx, i - 1)
|
|
||||||
sigres = odkim.sig_result(sig)
|
|
||||||
|
|
||||||
-- All signatures that do not correspond to From:
|
|
||||||
-- were ignored in screen.lua and return sigres -1.
|
|
||||||
--
|
|
||||||
-- Any valid signature that was not ignored like this
|
|
||||||
-- means the message is acceptable.
|
|
||||||
if sigres == 0 then
|
|
||||||
return nil
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
odkim.set_reply(ctx, "554", "5.7.1", "No valid DKIM signature found")
|
|
||||||
odkim.set_result(ctx, SMFIS_REJECT)
|
|
||||||
return nil
|
|
||||||
@@ -45,12 +45,6 @@ SignHeaders *,+autocrypt,+content-type
|
|||||||
# Default is empty.
|
# Default is empty.
|
||||||
OversignHeaders from,reply-to,subject,date,to,cc,resent-date,resent-from,resent-sender,resent-to,resent-cc,in-reply-to,references,list-id,list-help,list-unsubscribe,list-subscribe,list-post,list-owner,list-archive,autocrypt
|
OversignHeaders from,reply-to,subject,date,to,cc,resent-date,resent-from,resent-sender,resent-to,resent-cc,in-reply-to,references,list-id,list-help,list-unsubscribe,list-subscribe,list-post,list-owner,list-archive,autocrypt
|
||||||
|
|
||||||
# Script to ignore signatures that do not correspond to the From: domain.
|
|
||||||
ScreenPolicyScript /etc/opendkim/screen.lua
|
|
||||||
|
|
||||||
# Script to reject mails without a valid DKIM signature.
|
|
||||||
FinalPolicyScript /etc/opendkim/final.lua
|
|
||||||
|
|
||||||
# In Debian, opendkim runs as user "opendkim". A umask of 007 is required when
|
# In Debian, opendkim runs as user "opendkim". A umask of 007 is required when
|
||||||
# using a local socket with MTAs that access the socket as a non-privileged
|
# using a local socket with MTAs that access the socket as a non-privileged
|
||||||
# user (for example, Postfix). You may need to add user "postfix" to group
|
# user (for example, Postfix). You may need to add user "postfix" to group
|
||||||
@@ -65,3 +59,9 @@ PidFile /run/opendkim/opendkim.pid
|
|||||||
# The trust anchor enables DNSSEC. In Debian, the trust anchor file is provided
|
# The trust anchor enables DNSSEC. In Debian, the trust anchor file is provided
|
||||||
# by the package dns-root-data.
|
# by the package dns-root-data.
|
||||||
TrustAnchorFile /usr/share/dns/root.key
|
TrustAnchorFile /usr/share/dns/root.key
|
||||||
|
|
||||||
|
# Sign messages when `-o milter_macro_daemon_name=ORIGINATING` is set.
|
||||||
|
MTA ORIGINATING
|
||||||
|
|
||||||
|
# No hosts are treated as internal, ORIGINATING daemon name should be set explicitly.
|
||||||
|
InternalHosts -
|
||||||
|
|||||||
@@ -1,21 +0,0 @@
|
|||||||
-- Ignore signatures that do not correspond to the From: domain.
|
|
||||||
|
|
||||||
from_domain = odkim.get_fromdomain(ctx)
|
|
||||||
if from_domain == nil then
|
|
||||||
return nil
|
|
||||||
end
|
|
||||||
|
|
||||||
n = odkim.get_sigcount(ctx)
|
|
||||||
if n == nil then
|
|
||||||
return nil
|
|
||||||
end
|
|
||||||
|
|
||||||
for i = 1, n do
|
|
||||||
sig = odkim.get_sighandle(ctx, i - 1)
|
|
||||||
sig_domain = odkim.sig_getdomain(sig)
|
|
||||||
if from_domain ~= sig_domain then
|
|
||||||
odkim.sig_ignore(sig)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
return nil
|
|
||||||
119
cmdeploy/src/cmdeploy/postfix/deployer.py
Normal file
119
cmdeploy/src/cmdeploy/postfix/deployer.py
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
from pyinfra.operations import apt, files, server, systemd
|
||||||
|
|
||||||
|
from cmdeploy.basedeploy import Deployer, get_resource
|
||||||
|
|
||||||
|
|
||||||
|
class PostfixDeployer(Deployer):
|
||||||
|
required_users = [("postfix", None, ["opendkim"])]
|
||||||
|
daemon_reload = False
|
||||||
|
|
||||||
|
def __init__(self, config, disable_mail):
|
||||||
|
self.config = config
|
||||||
|
self.disable_mail = disable_mail
|
||||||
|
|
||||||
|
def install(self):
|
||||||
|
apt.packages(
|
||||||
|
name="Install Postfix",
|
||||||
|
packages="postfix",
|
||||||
|
)
|
||||||
|
|
||||||
|
def configure(self):
|
||||||
|
config = self.config
|
||||||
|
need_restart = False
|
||||||
|
|
||||||
|
main_config = files.template(
|
||||||
|
src=get_resource("postfix/main.cf.j2"),
|
||||||
|
dest="/etc/postfix/main.cf",
|
||||||
|
user="root",
|
||||||
|
group="root",
|
||||||
|
mode="644",
|
||||||
|
config=config,
|
||||||
|
disable_ipv6=config.disable_ipv6,
|
||||||
|
)
|
||||||
|
need_restart |= main_config.changed
|
||||||
|
|
||||||
|
master_config = files.template(
|
||||||
|
src=get_resource("postfix/master.cf.j2"),
|
||||||
|
dest="/etc/postfix/master.cf",
|
||||||
|
user="root",
|
||||||
|
group="root",
|
||||||
|
mode="644",
|
||||||
|
debug=False,
|
||||||
|
config=config,
|
||||||
|
)
|
||||||
|
need_restart |= master_config.changed
|
||||||
|
|
||||||
|
header_cleanup = files.put(
|
||||||
|
src=get_resource("postfix/submission_header_cleanup"),
|
||||||
|
dest="/etc/postfix/submission_header_cleanup",
|
||||||
|
user="root",
|
||||||
|
group="root",
|
||||||
|
mode="644",
|
||||||
|
)
|
||||||
|
need_restart |= header_cleanup.changed
|
||||||
|
|
||||||
|
lmtp_header_cleanup = files.put(
|
||||||
|
src=get_resource("postfix/lmtp_header_cleanup"),
|
||||||
|
dest="/etc/postfix/lmtp_header_cleanup",
|
||||||
|
user="root",
|
||||||
|
group="root",
|
||||||
|
mode="644",
|
||||||
|
)
|
||||||
|
need_restart |= lmtp_header_cleanup.changed
|
||||||
|
|
||||||
|
tls_policy_map = files.put(
|
||||||
|
name="Upload SMTP TLS Policy that accepts self-signed certificates for IP-only hosts",
|
||||||
|
src=get_resource("postfix/smtp_tls_policy_map"),
|
||||||
|
dest="/etc/postfix/smtp_tls_policy_map",
|
||||||
|
user="root",
|
||||||
|
group="root",
|
||||||
|
mode="644",
|
||||||
|
)
|
||||||
|
need_restart |= tls_policy_map.changed
|
||||||
|
if tls_policy_map.changed:
|
||||||
|
server.shell(
|
||||||
|
commands=["postmap /etc/postfix/smtp_tls_policy_map"],
|
||||||
|
)
|
||||||
|
|
||||||
|
# Login map that 1:1 maps email address to login.
|
||||||
|
login_map = files.put(
|
||||||
|
src=get_resource("postfix/login_map"),
|
||||||
|
dest="/etc/postfix/login_map",
|
||||||
|
user="root",
|
||||||
|
group="root",
|
||||||
|
mode="644",
|
||||||
|
)
|
||||||
|
need_restart |= login_map.changed
|
||||||
|
|
||||||
|
restart_conf = files.put(
|
||||||
|
name="postfix: restart automatically on failure",
|
||||||
|
src=get_resource("service/10_restart.conf"),
|
||||||
|
dest="/etc/systemd/system/postfix@.service.d/10_restart.conf",
|
||||||
|
)
|
||||||
|
self.daemon_reload = restart_conf.changed
|
||||||
|
|
||||||
|
# Validate postfix configuration before restart
|
||||||
|
if need_restart:
|
||||||
|
server.shell(
|
||||||
|
name="Validate postfix configuration",
|
||||||
|
# Extract stderr and quit with error if non-zero
|
||||||
|
commands=[
|
||||||
|
"""bash -c 'w=$(postconf 2>&1 >/dev/null); [[ -z "$w" ]] || { echo "$w"; false; }'"""
|
||||||
|
],
|
||||||
|
)
|
||||||
|
self.need_restart = need_restart
|
||||||
|
|
||||||
|
def activate(self):
|
||||||
|
restart = False if self.disable_mail else self.need_restart
|
||||||
|
|
||||||
|
systemd.service(
|
||||||
|
name="disable postfix for now"
|
||||||
|
if self.disable_mail
|
||||||
|
else "Start and enable Postfix",
|
||||||
|
service="postfix.service",
|
||||||
|
running=False if self.disable_mail else True,
|
||||||
|
enabled=False if self.disable_mail else True,
|
||||||
|
restarted=restart,
|
||||||
|
daemon_reload=self.daemon_reload,
|
||||||
|
)
|
||||||
|
self.need_restart = False
|
||||||
3
cmdeploy/src/cmdeploy/postfix/lmtp_header_cleanup
Normal file
3
cmdeploy/src/cmdeploy/postfix/lmtp_header_cleanup
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
/^DKIM-Signature:/ IGNORE
|
||||||
|
/^Authentication-Results:/ IGNORE
|
||||||
|
/^Received:/ IGNORE
|
||||||
@@ -15,19 +15,19 @@ readme_directory = no
|
|||||||
compatibility_level = 3.6
|
compatibility_level = 3.6
|
||||||
|
|
||||||
# TLS parameters
|
# TLS parameters
|
||||||
smtpd_tls_cert_file=/var/lib/acme/live/{{ config.mail_domain }}/fullchain
|
smtpd_tls_cert_file={{ config.tls_cert_path }}
|
||||||
smtpd_tls_key_file=/var/lib/acme/live/{{ config.mail_domain }}/privkey
|
smtpd_tls_key_file={{ config.tls_key_path }}
|
||||||
smtpd_tls_security_level=may
|
smtpd_tls_security_level=may
|
||||||
|
|
||||||
smtp_tls_CApath=/etc/ssl/certs
|
smtp_tls_CApath=/etc/ssl/certs
|
||||||
smtp_tls_security_level=verify
|
smtp_tls_security_level={{ "verify" if config.tls_cert_mode == "acme" else "encrypt" }}
|
||||||
# Send SNI extension when connecting to other servers.
|
# Send SNI extension when connecting to other servers.
|
||||||
# <https://www.postfix.org/postconf.5.html#smtp_tls_servername>
|
# <https://www.postfix.org/postconf.5.html#smtp_tls_servername>
|
||||||
smtp_tls_servername = hostname
|
smtp_tls_servername = hostname
|
||||||
smtp_tls_session_cache_database = btree:${data_directory}/smtp_scache
|
smtp_tls_session_cache_database = btree:${data_directory}/smtp_scache
|
||||||
smtp_tls_policy_maps = inline:{nauta.cu=may}
|
smtp_tls_policy_maps = regexp:/etc/postfix/smtp_tls_policy_map
|
||||||
smtp_tls_protocols = >=TLSv1.2
|
smtp_tls_protocols = >=TLSv1.2
|
||||||
smtpd_tls_protocols = >=TLSv1.2
|
smtp_tls_mandatory_protocols = >=TLSv1.2
|
||||||
|
|
||||||
# Disable anonymous cipher suites
|
# Disable anonymous cipher suites
|
||||||
# and known insecure algorithms.
|
# and known insecure algorithms.
|
||||||
@@ -64,7 +64,20 @@ alias_database = hash:/etc/aliases
|
|||||||
mydestination =
|
mydestination =
|
||||||
|
|
||||||
relayhost =
|
relayhost =
|
||||||
|
{% if disable_ipv6 %}
|
||||||
|
mynetworks = 127.0.0.0/8
|
||||||
|
{% else %}
|
||||||
mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128
|
mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128
|
||||||
|
{% endif %}
|
||||||
|
{% if config.addr_v4 %}
|
||||||
|
smtp_bind_address = {{ config.addr_v4 }}
|
||||||
|
{% endif %}
|
||||||
|
{% if config.addr_v6 %}
|
||||||
|
smtp_bind_address6 = {{ config.addr_v6 }}
|
||||||
|
{% endif %}
|
||||||
|
{% if config.addr_v4 or config.addr_v6 %}
|
||||||
|
smtp_bind_address_enforce = yes
|
||||||
|
{% endif %}
|
||||||
mailbox_size_limit = 0
|
mailbox_size_limit = 0
|
||||||
message_size_limit = {{config.max_message_size}}
|
message_size_limit = {{config.max_message_size}}
|
||||||
recipient_delimiter = +
|
recipient_delimiter = +
|
||||||
@@ -77,6 +90,7 @@ inet_protocols = all
|
|||||||
|
|
||||||
virtual_transport = lmtp:unix:private/dovecot-lmtp
|
virtual_transport = lmtp:unix:private/dovecot-lmtp
|
||||||
virtual_mailbox_domains = {{ config.mail_domain }}
|
virtual_mailbox_domains = {{ config.mail_domain }}
|
||||||
|
lmtp_header_checks = regexp:/etc/postfix/lmtp_header_cleanup
|
||||||
|
|
||||||
mua_client_restrictions = permit_sasl_authenticated, reject
|
mua_client_restrictions = permit_sasl_authenticated, reject
|
||||||
mua_sender_restrictions = reject_sender_login_mismatch, permit_sasl_authenticated, reject
|
mua_sender_restrictions = reject_sender_login_mismatch, permit_sasl_authenticated, reject
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ smtp inet n - y - - smtpd -v
|
|||||||
smtp inet n - y - - smtpd
|
smtp inet n - y - - smtpd
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
-o smtpd_tls_security_level=encrypt
|
-o smtpd_tls_security_level=encrypt
|
||||||
|
-o smtpd_tls_mandatory_protocols=>=TLSv1.2
|
||||||
-o smtpd_proxy_filter=127.0.0.1:{{ config.filtermail_smtp_port_incoming }}
|
-o smtpd_proxy_filter=127.0.0.1:{{ config.filtermail_smtp_port_incoming }}
|
||||||
submission inet n - y - 5000 smtpd
|
submission inet n - y - 5000 smtpd
|
||||||
-o syslog_name=postfix/submission
|
-o syslog_name=postfix/submission
|
||||||
@@ -30,7 +31,6 @@ submission inet n - y - 5000 smtpd
|
|||||||
-o smtpd_sender_restrictions=$mua_sender_restrictions
|
-o smtpd_sender_restrictions=$mua_sender_restrictions
|
||||||
-o smtpd_recipient_restrictions=
|
-o smtpd_recipient_restrictions=
|
||||||
-o smtpd_relay_restrictions=permit_sasl_authenticated,reject
|
-o smtpd_relay_restrictions=permit_sasl_authenticated,reject
|
||||||
-o milter_macro_daemon_name=ORIGINATING
|
|
||||||
-o smtpd_client_connection_count_limit=1000
|
-o smtpd_client_connection_count_limit=1000
|
||||||
-o smtpd_proxy_filter=127.0.0.1:{{ config.filtermail_smtp_port }}
|
-o smtpd_proxy_filter=127.0.0.1:{{ config.filtermail_smtp_port }}
|
||||||
smtps inet n - y - 5000 smtpd
|
smtps inet n - y - 5000 smtpd
|
||||||
@@ -48,7 +48,6 @@ smtps inet n - y - 5000 smtpd
|
|||||||
-o smtpd_recipient_restrictions=
|
-o smtpd_recipient_restrictions=
|
||||||
-o smtpd_relay_restrictions=permit_sasl_authenticated,reject
|
-o smtpd_relay_restrictions=permit_sasl_authenticated,reject
|
||||||
-o smtpd_client_connection_count_limit=1000
|
-o smtpd_client_connection_count_limit=1000
|
||||||
-o milter_macro_daemon_name=ORIGINATING
|
|
||||||
-o smtpd_proxy_filter=127.0.0.1:{{ config.filtermail_smtp_port }}
|
-o smtpd_proxy_filter=127.0.0.1:{{ config.filtermail_smtp_port }}
|
||||||
#628 inet n - y - - qmqpd
|
#628 inet n - y - - qmqpd
|
||||||
pickup unix n - y 60 1 pickup
|
pickup unix n - y 60 1 pickup
|
||||||
@@ -80,13 +79,13 @@ filter unix - n n - - lmtp
|
|||||||
# Local SMTP server for reinjecting outgoing filtered mail.
|
# Local SMTP server for reinjecting outgoing filtered mail.
|
||||||
127.0.0.1:{{ config.postfix_reinject_port }} inet n - n - 100 smtpd
|
127.0.0.1:{{ config.postfix_reinject_port }} inet n - n - 100 smtpd
|
||||||
-o syslog_name=postfix/reinject
|
-o syslog_name=postfix/reinject
|
||||||
|
-o milter_macro_daemon_name=ORIGINATING
|
||||||
-o smtpd_milters=unix:opendkim/opendkim.sock
|
-o smtpd_milters=unix:opendkim/opendkim.sock
|
||||||
-o cleanup_service_name=authclean
|
-o cleanup_service_name=authclean
|
||||||
|
|
||||||
# Local SMTP server for reinjecting incoming filtered mail
|
# Local SMTP server for reinjecting incoming filtered mail
|
||||||
127.0.0.1:{{ config.postfix_reinject_port_incoming }} inet n - n - 100 smtpd
|
127.0.0.1:{{ config.postfix_reinject_port_incoming }} inet n - n - 100 smtpd
|
||||||
-o syslog_name=postfix/reinject_incoming
|
-o syslog_name=postfix/reinject_incoming
|
||||||
-o smtpd_milters=unix:opendkim/opendkim.sock
|
|
||||||
|
|
||||||
# Cleanup `Received` headers for authenticated mail
|
# Cleanup `Received` headers for authenticated mail
|
||||||
# to avoid leaking client IP.
|
# to avoid leaking client IP.
|
||||||
|
|||||||
3
cmdeploy/src/cmdeploy/postfix/smtp_tls_policy_map
Normal file
3
cmdeploy/src/cmdeploy/postfix/smtp_tls_policy_map
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
/^\[[^]]+\]$/ encrypt
|
||||||
|
/^_/ encrypt
|
||||||
|
/^nauta\.cu$/ may
|
||||||
@@ -37,7 +37,10 @@ def perform_initial_checks(mail_domain, pre_command=""):
|
|||||||
return res
|
return res
|
||||||
|
|
||||||
# parse out sts-id if exists, example: "v=STSv1; id=2090123"
|
# parse out sts-id if exists, example: "v=STSv1; id=2090123"
|
||||||
parts = query_dns("TXT", f"_mta-sts.{mail_domain}").split("id=")
|
mta_sts_txt = query_dns("TXT", f"_mta-sts.{mail_domain}")
|
||||||
|
if not mta_sts_txt:
|
||||||
|
return res
|
||||||
|
parts = mta_sts_txt.split("id=")
|
||||||
res["sts_id"] = parts[1].rstrip('"') if len(parts) == 2 else ""
|
res["sts_id"] = parts[1].rstrip('"') if len(parts) == 2 else ""
|
||||||
return res
|
return res
|
||||||
|
|
||||||
@@ -50,13 +53,13 @@ def get_dkim_entry(mail_domain, pre_command, dkim_selector):
|
|||||||
print=log_progress,
|
print=log_progress,
|
||||||
)
|
)
|
||||||
except CalledProcessError:
|
except CalledProcessError:
|
||||||
return
|
return None, None
|
||||||
dkim_value_raw = f"v=DKIM1;k=rsa;p={dkim_pubkey};s=email;t=s"
|
dkim_value_raw = f"v=DKIM1;k=rsa;p={dkim_pubkey};s=email;t=s"
|
||||||
dkim_value = '" "'.join(re.findall(".{1,255}", dkim_value_raw))
|
dkim_value = '" "'.join(re.findall(".{1,255}", dkim_value_raw))
|
||||||
web_dkim_value = "".join(re.findall(".{1,255}", dkim_value_raw))
|
web_dkim_value = "".join(re.findall(".{1,255}", dkim_value_raw))
|
||||||
return (
|
return (
|
||||||
f'{dkim_selector}._domainkey.{mail_domain}. TXT "{dkim_value}"',
|
f'{dkim_selector}._domainkey.{mail_domain}. 3600 IN TXT "{dkim_value}"',
|
||||||
f'{dkim_selector}._domainkey.{mail_domain}. TXT "{web_dkim_value}"',
|
f'{dkim_selector}._domainkey.{mail_domain}. 3600 IN TXT "{web_dkim_value}"',
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -91,9 +94,11 @@ def check_zonefile(zonefile, verbose=True):
|
|||||||
if not zf_line.strip() or zf_line.startswith(";"):
|
if not zf_line.strip() or zf_line.startswith(";"):
|
||||||
continue
|
continue
|
||||||
print(f"dns-checking {zf_line!r}") if verbose else log_progress("")
|
print(f"dns-checking {zf_line!r}") if verbose else log_progress("")
|
||||||
zf_domain, zf_typ, zf_value = zf_line.split(maxsplit=2)
|
parts = zf_line.split(None, 4)
|
||||||
zf_domain = zf_domain.rstrip(".")
|
zf_domain = parts[0].rstrip(".")
|
||||||
zf_value = zf_value.strip()
|
# parts[1]=TTL, parts[2]=IN, parts[3]=type, parts[4]=rdata
|
||||||
|
zf_typ = parts[3]
|
||||||
|
zf_value = parts[4].strip()
|
||||||
query_value = query_dns(zf_typ, zf_domain)
|
query_value = query_dns(zf_typ, zf_domain)
|
||||||
if zf_value != query_value:
|
if zf_value != query_value:
|
||||||
assert zf_typ in ("A", "AAAA", "CNAME", "CAA", "SRV", "MX", "TXT"), zf_line
|
assert zf_typ in ("A", "AAAA", "CNAME", "CAA", "SRV", "MX", "TXT"), zf_line
|
||||||
|
|||||||
@@ -40,5 +40,5 @@ def dovecot_recalc_quota(user):
|
|||||||
#
|
#
|
||||||
for line in output.split("\n"):
|
for line in output.split("\n"):
|
||||||
parts = line.split()
|
parts = line.split()
|
||||||
if parts[2] == "STORAGE":
|
if len(parts) >= 6 and parts[2] == "STORAGE":
|
||||||
return dict(value=int(parts[3]), limit=int(parts[4]), percent=int(parts[5]))
|
return dict(value=int(parts[3]), limit=int(parts[4]), percent=int(parts[5]))
|
||||||
|
|||||||
@@ -14,8 +14,9 @@ def main():
|
|||||||
importlib.resources.files("cmdeploy").joinpath("../../../chatmail.ini"),
|
importlib.resources.files("cmdeploy").joinpath("../../../chatmail.ini"),
|
||||||
)
|
)
|
||||||
disable_mail = bool(os.environ.get("CHATMAIL_DISABLE_MAIL"))
|
disable_mail = bool(os.environ.get("CHATMAIL_DISABLE_MAIL"))
|
||||||
|
website_only = bool(os.environ.get("CHATMAIL_WEBSITE_ONLY"))
|
||||||
|
|
||||||
deploy_chatmail(config_path, disable_mail)
|
deploy_chatmail(config_path, disable_mail, website_only)
|
||||||
|
|
||||||
|
|
||||||
if pyinfra.is_cli:
|
if pyinfra.is_cli:
|
||||||
|
|||||||
68
cmdeploy/src/cmdeploy/selfsigned/deployer.py
Normal file
68
cmdeploy/src/cmdeploy/selfsigned/deployer.py
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
import shlex
|
||||||
|
|
||||||
|
from pyinfra.operations import apt, server
|
||||||
|
|
||||||
|
from cmdeploy.basedeploy import Deployer
|
||||||
|
|
||||||
|
|
||||||
|
def openssl_selfsigned_args(domain, cert_path, key_path, days=36500):
|
||||||
|
"""Return the openssl argument list for a self-signed certificate.
|
||||||
|
|
||||||
|
The certificate uses an EC P-256 key with SAN entries for *domain*,
|
||||||
|
``www.<domain>`` and ``mta-sts.<domain>``.
|
||||||
|
"""
|
||||||
|
return [
|
||||||
|
"openssl",
|
||||||
|
"req",
|
||||||
|
"-x509",
|
||||||
|
"-newkey",
|
||||||
|
"ec",
|
||||||
|
"-pkeyopt",
|
||||||
|
"ec_paramgen_curve:P-256",
|
||||||
|
"-noenc",
|
||||||
|
"-days",
|
||||||
|
str(days),
|
||||||
|
"-keyout",
|
||||||
|
str(key_path),
|
||||||
|
"-out",
|
||||||
|
str(cert_path),
|
||||||
|
"-subj",
|
||||||
|
f"/CN={domain}",
|
||||||
|
# Mark as end-entity cert so it cannot be used as a CA to sign others.
|
||||||
|
"-addext",
|
||||||
|
"basicConstraints=critical,CA:FALSE",
|
||||||
|
"-addext",
|
||||||
|
"extendedKeyUsage=serverAuth,clientAuth",
|
||||||
|
"-addext",
|
||||||
|
f"subjectAltName=DNS:{domain},DNS:www.{domain},DNS:mta-sts.{domain}",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class SelfSignedTlsDeployer(Deployer):
|
||||||
|
"""Generates a self-signed TLS certificate for all chatmail endpoints."""
|
||||||
|
|
||||||
|
def __init__(self, mail_domain):
|
||||||
|
self.mail_domain = mail_domain
|
||||||
|
self.cert_path = "/etc/ssl/certs/mailserver.pem"
|
||||||
|
self.key_path = "/etc/ssl/private/mailserver.key"
|
||||||
|
|
||||||
|
def install(self):
|
||||||
|
apt.packages(
|
||||||
|
name="Install openssl",
|
||||||
|
packages=["openssl"],
|
||||||
|
)
|
||||||
|
|
||||||
|
def configure(self):
|
||||||
|
args = openssl_selfsigned_args(
|
||||||
|
self.mail_domain,
|
||||||
|
self.cert_path,
|
||||||
|
self.key_path,
|
||||||
|
)
|
||||||
|
cmd = shlex.join(args)
|
||||||
|
server.shell(
|
||||||
|
name="Generate self-signed TLS certificate if not present",
|
||||||
|
commands=[f"[ -f {self.cert_path} ] || {cmd}"],
|
||||||
|
)
|
||||||
|
|
||||||
|
def activate(self):
|
||||||
|
pass
|
||||||
3
cmdeploy/src/cmdeploy/service/10_restart.conf
Normal file
3
cmdeploy/src/cmdeploy/service/10_restart.conf
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
[Service]
|
||||||
|
Restart=always
|
||||||
|
RestartSec=30
|
||||||
@@ -5,5 +5,5 @@ After=network.target
|
|||||||
[Service]
|
[Service]
|
||||||
Type=oneshot
|
Type=oneshot
|
||||||
User=vmail
|
User=vmail
|
||||||
ExecStart=/usr/local/lib/chatmaild/venv/bin/chatmail-fsreport /usr/local/lib/chatmaild/chatmail.ini
|
ExecStart=/usr/local/lib/chatmaild/venv/bin/chatmail-fsreport /usr/local/lib/chatmaild/chatmail.ini
|
||||||
|
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ Description=Chatmail dict proxy for IMAP METADATA
|
|||||||
[Service]
|
[Service]
|
||||||
ExecStart={execpath} /run/chatmail-metadata/metadata.socket {config_path}
|
ExecStart={execpath} /run/chatmail-metadata/metadata.socket {config_path}
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=30
|
RestartSec=5
|
||||||
User=vmail
|
User=vmail
|
||||||
RuntimeDirectory=chatmail-metadata
|
RuntimeDirectory=chatmail-metadata
|
||||||
UMask=0077
|
UMask=0077
|
||||||
|
|||||||
@@ -1,67 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=Chatmail echo bot for testing it works
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
ExecStart={execpath} {config_path}
|
|
||||||
Environment="PATH={remote_venv_dir}:$PATH"
|
|
||||||
Restart=always
|
|
||||||
RestartSec=30
|
|
||||||
|
|
||||||
User=echobot
|
|
||||||
Group=echobot
|
|
||||||
|
|
||||||
# Create /var/lib/echobot
|
|
||||||
StateDirectory=echobot
|
|
||||||
|
|
||||||
# Create /run/echobot
|
|
||||||
#
|
|
||||||
# echobot stores /run/echobot/password
|
|
||||||
# with a password there, which doveauth then reads.
|
|
||||||
RuntimeDirectory=echobot
|
|
||||||
|
|
||||||
WorkingDirectory=/var/lib/echobot
|
|
||||||
|
|
||||||
# Apply security restrictions suggested by
|
|
||||||
# systemd-analyze security echobot.service
|
|
||||||
CapabilityBoundingSet=
|
|
||||||
LockPersonality=true
|
|
||||||
MemoryDenyWriteExecute=true
|
|
||||||
NoNewPrivileges=true
|
|
||||||
PrivateDevices=true
|
|
||||||
PrivateMounts=true
|
|
||||||
PrivateTmp=true
|
|
||||||
|
|
||||||
# We need to know about doveauth user to give it access to /run/echobot/password
|
|
||||||
PrivateUsers=false
|
|
||||||
|
|
||||||
ProtectClock=true
|
|
||||||
ProtectControlGroups=true
|
|
||||||
ProtectHostname=true
|
|
||||||
ProtectKernelLogs=true
|
|
||||||
ProtectKernelModules=true
|
|
||||||
ProtectKernelTunables=true
|
|
||||||
ProtectProc=noaccess
|
|
||||||
|
|
||||||
# Should be "strict", but we currently write /accounts folder in a protected path
|
|
||||||
ProtectSystem=full
|
|
||||||
|
|
||||||
RemoveIPC=true
|
|
||||||
RestrictAddressFamilies=AF_INET AF_INET6
|
|
||||||
RestrictNamespaces=true
|
|
||||||
RestrictRealtime=true
|
|
||||||
RestrictSUIDSGID=true
|
|
||||||
SystemCallArchitectures=native
|
|
||||||
SystemCallFilter=~@clock
|
|
||||||
SystemCallFilter=~@cpu-emulation
|
|
||||||
SystemCallFilter=~@debug
|
|
||||||
SystemCallFilter=~@module
|
|
||||||
SystemCallFilter=~@mount
|
|
||||||
SystemCallFilter=~@obsolete
|
|
||||||
SystemCallFilter=~@raw-io
|
|
||||||
SystemCallFilter=~@reboot
|
|
||||||
SystemCallFilter=~@resources
|
|
||||||
SystemCallFilter=~@swap
|
|
||||||
UMask=0077
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
@@ -49,8 +49,13 @@ class SSHExec:
|
|||||||
RemoteError = execnet.RemoteError
|
RemoteError = execnet.RemoteError
|
||||||
FuncError = FuncError
|
FuncError = FuncError
|
||||||
|
|
||||||
def __init__(self, host, verbose=False, python="python3", timeout=60):
|
def __init__(
|
||||||
self.gateway = execnet.makegateway(f"ssh=root@{host}//python={python}")
|
self, host, verbose=False, python="python3", timeout=60, ssh_config=None
|
||||||
|
):
|
||||||
|
spec = f"ssh=root@{host}//python={python}"
|
||||||
|
if ssh_config:
|
||||||
|
spec += f"//ssh_config={ssh_config}"
|
||||||
|
self.gateway = execnet.makegateway(spec)
|
||||||
self._remote_cmdloop_channel = bootstrap_remote(self.gateway, remote)
|
self._remote_cmdloop_channel = bootstrap_remote(self.gateway, remote)
|
||||||
self.timeout = timeout
|
self.timeout = timeout
|
||||||
self.verbose = verbose
|
self.verbose = verbose
|
||||||
@@ -85,16 +90,74 @@ class SSHExec:
|
|||||||
|
|
||||||
|
|
||||||
class LocalExec:
|
class LocalExec:
|
||||||
|
FuncError = FuncError
|
||||||
|
|
||||||
def __init__(self, verbose=False, docker=False):
|
def __init__(self, verbose=False, docker=False):
|
||||||
self.verbose = verbose
|
self.verbose = verbose
|
||||||
self.docker = docker
|
self.docker = docker
|
||||||
|
|
||||||
|
def __call__(self, call, kwargs=None, log_callback=None):
|
||||||
|
if kwargs is None:
|
||||||
|
kwargs = {}
|
||||||
|
return call(**kwargs)
|
||||||
|
|
||||||
def logged(self, call, kwargs: dict):
|
def logged(self, call, kwargs: dict):
|
||||||
|
title = call.__doc__
|
||||||
|
if not title:
|
||||||
|
title = call.__name__
|
||||||
where = "locally"
|
where = "locally"
|
||||||
if self.docker:
|
if self.docker:
|
||||||
if call == remote.rdns.perform_initial_checks:
|
if call == remote.rdns.perform_initial_checks:
|
||||||
kwargs["pre_command"] = "docker exec chatmail "
|
kwargs["pre_command"] = "docker exec chatmail "
|
||||||
where = "in docker"
|
where = "in docker"
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
print(f"Running {where}: {call.__name__}(**{kwargs})")
|
print_stderr(f"Running {where}: {title}(**{kwargs})")
|
||||||
return call(**kwargs)
|
return self(call, kwargs, log_callback=print_stderr)
|
||||||
|
else:
|
||||||
|
print_stderr(title, end="")
|
||||||
|
res = self(call, kwargs, log_callback=remote.rshell.log_progress)
|
||||||
|
print_stderr()
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
# pyinfra exposes a ``ssh_config_file`` data key that *should* let
|
||||||
|
# paramiko parse an SSH config file directly. In practice it silently
|
||||||
|
# fails to connect (zero hosts / zero operations), so we resolve the
|
||||||
|
# hostname and identity-file ourselves and pass them via
|
||||||
|
# ``--data ssh_hostname`` / ``--data ssh_key`` instead.
|
||||||
|
# Execnet uses ssh natively (and not paramiko) and doesn't have this problem.
|
||||||
|
|
||||||
|
|
||||||
|
def _get_from_ssh_config(host, ssh_config_path, key):
|
||||||
|
"""Internal helper to parse a value for a specific key from ssh-config."""
|
||||||
|
current_hosts = []
|
||||||
|
found_value = None
|
||||||
|
with open(ssh_config_path) as f:
|
||||||
|
for raw_line in f:
|
||||||
|
line = raw_line.strip()
|
||||||
|
if not line or line.startswith("#"):
|
||||||
|
continue
|
||||||
|
parts = line.split(None, 1)
|
||||||
|
if not parts:
|
||||||
|
continue
|
||||||
|
directive = parts[0].lower()
|
||||||
|
if directive == "host":
|
||||||
|
if host in current_hosts and found_value:
|
||||||
|
return found_value
|
||||||
|
current_hosts = parts[1].split()
|
||||||
|
found_value = None
|
||||||
|
elif directive == key.lower():
|
||||||
|
found_value = parts[1]
|
||||||
|
if host in current_hosts and found_value:
|
||||||
|
return found_value
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def resolve_host_from_ssh_config(host, ssh_config_path):
|
||||||
|
"""Resolve a host alias to its IP from an ssh-config file."""
|
||||||
|
return _get_from_ssh_config(host, ssh_config_path, "Hostname") or host
|
||||||
|
|
||||||
|
|
||||||
|
def resolve_key_from_ssh_config(host, ssh_config_path):
|
||||||
|
"""Resolve a host alias to its IdentityFile from an ssh-config file."""
|
||||||
|
return _get_from_ssh_config(host, ssh_config_path, "IdentityFile")
|
||||||
|
|||||||
@@ -1,17 +1,18 @@
|
|||||||
; Required DNS entries for chatmail servers
|
; Required DNS entries
|
||||||
zftest.testrun.org. A 135.181.204.127
|
zftest.testrun.org. 3600 IN A 135.181.204.127
|
||||||
zftest.testrun.org. AAAA 2a01:4f9:c012:52f4::1
|
zftest.testrun.org. 3600 IN AAAA 2a01:4f9:c012:52f4::1
|
||||||
zftest.testrun.org. MX 10 zftest.testrun.org.
|
zftest.testrun.org. 3600 IN MX 10 zftest.testrun.org.
|
||||||
_mta-sts.zftest.testrun.org. TXT "v=STSv1; id=202403211706"
|
_mta-sts.zftest.testrun.org. 3600 IN TXT "v=STSv1; id=202403211706"
|
||||||
mta-sts.zftest.testrun.org. CNAME zftest.testrun.org.
|
mta-sts.zftest.testrun.org. 3600 IN CNAME zftest.testrun.org.
|
||||||
www.zftest.testrun.org. CNAME zftest.testrun.org.
|
www.zftest.testrun.org. 3600 IN CNAME zftest.testrun.org.
|
||||||
opendkim._domainkey.zftest.testrun.org. TXT "v=DKIM1;k=rsa;p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoYt82CVUyz2ouaqjX2kB+5J80knAyoOU3MGU5aWppmwUwwTvj/oSTSpkc5JMtVTRmKKr8NUDWAL1Yw7dfGqqPHdHfwwjS3BIvDzYx+hzgtz62RnfNgV+/2MAoNpfX7cAFIHdRzEHNtwugc3RDLquqPoupAE3Y2YRw2T5zG5fILh4vwIcJZL5Uq6B92j8wwJqOex" "33n+vm1NKQ9rxo/UsHAmZlJzpooXcG/4igTBxJyJlamVSRR6N7Nul1v//YJb7J6v2o0iPHW6uE0StzKaPPNC2IVosSRFbD9H2oqppltptFSNPlI0E+t0JBWHem6YK7xcugiO3ImMCaaU8g6Jt/wIDAQAB;s=email;t=s"
|
opendkim._domainkey.zftest.testrun.org. 3600 IN TXT "v=DKIM1;k=rsa;p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoYt82CVUyz2ouaqjX2kB+5J80knAyoOU3MGU5aWppmwUwwTvj/oSTSpkc5JMtVTRmKKr8NUDWAL1Yw7dfGqqPHdHfwwjS3BIvDzYx+hzgtz62RnfNgV+/2MAoNpfX7cAFIHdRzEHNtwugc3RDLquqPoupAE3Y2YRw2T5zG5fILh4vwIcJZL5Uq6B92j8wwJqOex" "33n+vm1NKQ9rxo/UsHAmZlJzpooXcG/4igTBxJyJlamVSRR6N7Nul1v//YJb7J6v2o0iPHW6uE0StzKaPPNC2IVosSRFbD9H2oqppltptFSNPlI0E+t0JBWHem6YK7xcugiO3ImMCaaU8g6Jt/wIDAQAB;s=email;t=s"
|
||||||
|
|
||||||
; Recommended DNS entries
|
; Recommended DNS entries
|
||||||
_submission._tcp.zftest.testrun.org. SRV 0 1 587 zftest.testrun.org.
|
zftest.testrun.org. 3600 IN TXT "v=spf1 a ~all"
|
||||||
_submissions._tcp.zftest.testrun.org. SRV 0 1 465 zftest.testrun.org.
|
_dmarc.zftest.testrun.org. 3600 IN TXT "v=DMARC1;p=reject;adkim=s;aspf=s"
|
||||||
_imap._tcp.zftest.testrun.org. SRV 0 1 143 zftest.testrun.org.
|
zftest.testrun.org. 3600 IN CAA 0 issue "letsencrypt.org;accounturi=https://acme-v02.api.letsencrypt.org/acme/acct/1371472956"
|
||||||
_imaps._tcp.zftest.testrun.org. SRV 0 1 993 zftest.testrun.org.
|
_adsp._domainkey.zftest.testrun.org. 3600 IN TXT "dkim=discardable"
|
||||||
zftest.testrun.org. CAA 0 issue "letsencrypt.org;accounturi=https://acme-v02.api.letsencrypt.org/acme/acct/1371472956"
|
_submission._tcp.zftest.testrun.org. 3600 IN SRV 0 1 587 zftest.testrun.org.
|
||||||
zftest.testrun.org. TXT "v=spf1 a:zftest.testrun.org ~all"
|
_submissions._tcp.zftest.testrun.org. 3600 IN SRV 0 1 465 zftest.testrun.org.
|
||||||
_dmarc.zftest.testrun.org. TXT "v=DMARC1;p=reject;adkim=s;aspf=s"
|
_imap._tcp.zftest.testrun.org. 3600 IN SRV 0 1 143 zftest.testrun.org.
|
||||||
_adsp._domainkey.zftest.testrun.org. TXT "dkim=discardable"
|
_imaps._tcp.zftest.testrun.org. 3600 IN SRV 0 1 993 zftest.testrun.org.
|
||||||
|
|||||||
@@ -41,9 +41,9 @@ class TestDC:
|
|||||||
|
|
||||||
def dc_ping_pong():
|
def dc_ping_pong():
|
||||||
chat.send_text("ping")
|
chat.send_text("ping")
|
||||||
msg = ac2._evtracker.wait_next_incoming_message()
|
msg = ac2.wait_for_incoming_msg()
|
||||||
msg.chat.send_text("pong")
|
msg.get_snapshot().chat.send_text("pong")
|
||||||
ac1._evtracker.wait_next_incoming_message()
|
ac1.wait_for_incoming_msg()
|
||||||
|
|
||||||
benchmark(dc_ping_pong, 5)
|
benchmark(dc_ping_pong, 5)
|
||||||
|
|
||||||
@@ -55,6 +55,6 @@ class TestDC:
|
|||||||
for i in range(10):
|
for i in range(10):
|
||||||
chat.send_text(f"hello {i}")
|
chat.send_text(f"hello {i}")
|
||||||
for i in range(10):
|
for i in range(10):
|
||||||
ac2._evtracker.wait_next_incoming_message()
|
ac2.wait_for_incoming_msg()
|
||||||
|
|
||||||
benchmark(dc_send_10_receive_10, 5)
|
benchmark(dc_send_10_receive_10, 5, cooldown="auto")
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
import pytest
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
from cmdeploy.genqr import gen_qr_png_data
|
from cmdeploy.genqr import gen_qr_png_data
|
||||||
@@ -8,18 +9,36 @@ def test_gen_qr_png_data(maildomain):
|
|||||||
assert data
|
assert data
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.filterwarnings("ignore::urllib3.exceptions.InsecureRequestWarning")
|
||||||
def test_fastcgi_working(maildomain, chatmail_config):
|
def test_fastcgi_working(maildomain, chatmail_config):
|
||||||
url = f"https://{maildomain}/new"
|
url = f"https://{maildomain}/new"
|
||||||
print(url)
|
print(url)
|
||||||
res = requests.post(url)
|
verify = chatmail_config.tls_cert_mode == "acme"
|
||||||
|
res = requests.post(url, verify=verify)
|
||||||
assert maildomain in res.json().get("email")
|
assert maildomain in res.json().get("email")
|
||||||
assert len(res.json().get("password")) > chatmail_config.password_min_length
|
assert len(res.json().get("password")) > chatmail_config.password_min_length
|
||||||
|
|
||||||
|
|
||||||
def test_newemail_configure(maildomain, rpc):
|
@pytest.mark.filterwarnings("ignore::urllib3.exceptions.InsecureRequestWarning")
|
||||||
|
def test_newemail_configure(maildomain, maildomain_ip, rpc, chatmail_config):
|
||||||
"""Test configuring accounts by scanning a QR code works."""
|
"""Test configuring accounts by scanning a QR code works."""
|
||||||
url = f"DCACCOUNT:https://{maildomain}/new"
|
url = f"DCACCOUNT:https://{maildomain}/new"
|
||||||
for i in range(3):
|
for i in range(3):
|
||||||
account_id = rpc.add_account()
|
account_id = rpc.add_account()
|
||||||
rpc.set_config_from_qr(account_id, url)
|
if chatmail_config.tls_cert_mode == "self":
|
||||||
rpc.configure(account_id)
|
# deltachat core's rustls rejects self-signed HTTPS certs during
|
||||||
|
# set_config_from_qr, so fetch credentials via requests instead
|
||||||
|
res = requests.post(f"https://{maildomain}/new", verify=False)
|
||||||
|
data = res.json()
|
||||||
|
rpc.add_or_update_transport(
|
||||||
|
account_id,
|
||||||
|
{
|
||||||
|
"addr": data["email"],
|
||||||
|
"password": data["password"],
|
||||||
|
"imapServer": maildomain_ip,
|
||||||
|
"smtpServer": maildomain_ip,
|
||||||
|
"certificateChecks": "acceptInvalidCertificates",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
rpc.add_transport_from_qr(account_id, url)
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
import datetime
|
import datetime
|
||||||
import os
|
|
||||||
import smtplib
|
import smtplib
|
||||||
import socket
|
import socket
|
||||||
import subprocess
|
import subprocess
|
||||||
@@ -8,14 +7,14 @@ import time
|
|||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from cmdeploy import remote
|
from cmdeploy import remote
|
||||||
from cmdeploy.cmdeploy import main
|
from cmdeploy.cmdeploy import get_sshexec
|
||||||
from cmdeploy.sshexec import SSHExec
|
|
||||||
|
|
||||||
|
|
||||||
class TestSSHExecutor:
|
class TestSSHExecutor:
|
||||||
@pytest.fixture(scope="class")
|
@pytest.fixture(scope="class")
|
||||||
def sshexec(self, sshdomain):
|
def sshexec(self, sshdomain, pytestconfig):
|
||||||
return SSHExec(sshdomain)
|
ssh_config = pytestconfig.getoption("ssh_config")
|
||||||
|
return get_sshexec(sshdomain, ssh_config=ssh_config)
|
||||||
|
|
||||||
def test_ls(self, sshexec):
|
def test_ls(self, sshexec):
|
||||||
out = sshexec(call=remote.rdns.shell, kwargs=dict(command="ls"))
|
out = sshexec(call=remote.rdns.shell, kwargs=dict(command="ls"))
|
||||||
@@ -29,6 +28,7 @@ class TestSSHExecutor:
|
|||||||
assert res["A"] or res["AAAA"]
|
assert res["A"] or res["AAAA"]
|
||||||
|
|
||||||
def test_logged(self, sshexec, maildomain, capsys):
|
def test_logged(self, sshexec, maildomain, capsys):
|
||||||
|
sshexec.verbose = False
|
||||||
sshexec.logged(
|
sshexec.logged(
|
||||||
remote.rdns.perform_initial_checks, kwargs=dict(mail_domain=maildomain)
|
remote.rdns.perform_initial_checks, kwargs=dict(mail_domain=maildomain)
|
||||||
)
|
)
|
||||||
@@ -54,6 +54,8 @@ class TestSSHExecutor:
|
|||||||
remote.rdns.perform_initial_checks,
|
remote.rdns.perform_initial_checks,
|
||||||
kwargs=dict(mail_domain=None),
|
kwargs=dict(mail_domain=None),
|
||||||
)
|
)
|
||||||
|
except AssertionError:
|
||||||
|
pass
|
||||||
except sshexec.FuncError as e:
|
except sshexec.FuncError as e:
|
||||||
assert "rdns.py" in str(e)
|
assert "rdns.py" in str(e)
|
||||||
assert "AssertionError" in str(e)
|
assert "AssertionError" in str(e)
|
||||||
@@ -70,47 +72,6 @@ class TestSSHExecutor:
|
|||||||
assert (now - since_date).total_seconds() < 60 * 60 * 51
|
assert (now - since_date).total_seconds() < 60 * 60 * 51
|
||||||
|
|
||||||
|
|
||||||
def test_status_cmd(chatmail_config, capsys, request):
|
|
||||||
os.chdir(request.config.invocation_params.dir)
|
|
||||||
assert main(["status"]) == 0
|
|
||||||
status_out = capsys.readouterr()
|
|
||||||
print(status_out.out)
|
|
||||||
|
|
||||||
services = [
|
|
||||||
"acmetool-redirector",
|
|
||||||
"chatmail-metadata",
|
|
||||||
"doveauth",
|
|
||||||
"dovecot",
|
|
||||||
"echobot",
|
|
||||||
"fcgiwrap",
|
|
||||||
"filtermail-incoming",
|
|
||||||
"filtermail",
|
|
||||||
"lastlogin",
|
|
||||||
"nginx",
|
|
||||||
"opendkim",
|
|
||||||
"postfix@-",
|
|
||||||
"systemd-journald",
|
|
||||||
"turnserver",
|
|
||||||
"unbound",
|
|
||||||
]
|
|
||||||
not_running = []
|
|
||||||
for service in services:
|
|
||||||
active = False
|
|
||||||
for line in status_out:
|
|
||||||
if service in line:
|
|
||||||
active = True
|
|
||||||
if not "loaded" in line:
|
|
||||||
active = False
|
|
||||||
if not "active" in line:
|
|
||||||
active = False
|
|
||||||
if not "running" in line:
|
|
||||||
active = False
|
|
||||||
break
|
|
||||||
if not active:
|
|
||||||
not_running.append(service)
|
|
||||||
assert not_running == []
|
|
||||||
|
|
||||||
|
|
||||||
def test_timezone_env(remote):
|
def test_timezone_env(remote):
|
||||||
for line in remote.iter_output("env"):
|
for line in remote.iter_output("env"):
|
||||||
print(line)
|
print(line)
|
||||||
@@ -126,10 +87,8 @@ def test_remote(remote, imap_or_smtp):
|
|||||||
|
|
||||||
|
|
||||||
def test_use_two_chatmailservers(cmfactory, maildomain2):
|
def test_use_two_chatmailservers(cmfactory, maildomain2):
|
||||||
ac1 = cmfactory.new_online_configuring_account(cache=False)
|
ac1 = cmfactory.get_online_account()
|
||||||
cmfactory.switch_maildomain(maildomain2)
|
ac2 = cmfactory.get_online_account(domain=maildomain2)
|
||||||
ac2 = cmfactory.new_online_configuring_account(cache=False)
|
|
||||||
cmfactory.bring_accounts_online()
|
|
||||||
cmfactory.get_accepted_chat(ac1, ac2)
|
cmfactory.get_accepted_chat(ac1, ac2)
|
||||||
domain1 = ac1.get_config("addr").split("@")[1]
|
domain1 = ac1.get_config("addr").split("@")[1]
|
||||||
domain2 = ac2.get_config("addr").split("@")[1]
|
domain2 = ac2.get_config("addr").split("@")[1]
|
||||||
@@ -174,11 +133,10 @@ def test_authenticated_from(cmsetup, maildata):
|
|||||||
@pytest.mark.parametrize("from_addr", ["fake@example.org", "fake@testrun.org"])
|
@pytest.mark.parametrize("from_addr", ["fake@example.org", "fake@testrun.org"])
|
||||||
def test_reject_missing_dkim(cmsetup, maildata, from_addr):
|
def test_reject_missing_dkim(cmsetup, maildata, from_addr):
|
||||||
domain = cmsetup.maildomain
|
domain = cmsetup.maildomain
|
||||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
||||||
sock.settimeout(10)
|
|
||||||
try:
|
try:
|
||||||
sock.connect((domain, 25))
|
sock = socket.create_connection((domain, 25), timeout=10)
|
||||||
except socket.timeout:
|
sock.close()
|
||||||
|
except (socket.timeout, OSError):
|
||||||
pytest.skip(f"port 25 not reachable for {domain}")
|
pytest.skip(f"port 25 not reachable for {domain}")
|
||||||
|
|
||||||
recipient = cmsetup.gen_users(1)[0]
|
recipient = cmsetup.gen_users(1)[0]
|
||||||
@@ -189,7 +147,7 @@ def test_reject_missing_dkim(cmsetup, maildata, from_addr):
|
|||||||
conn.starttls()
|
conn.starttls()
|
||||||
|
|
||||||
with conn as s:
|
with conn as s:
|
||||||
with pytest.raises(smtplib.SMTPDataError, match="No valid DKIM signature"):
|
with pytest.raises(smtplib.SMTPDataError, match="No DKIM signature found"):
|
||||||
s.sendmail(from_addr=from_addr, to_addrs=recipient.addr, msg=msg)
|
s.sendmail(from_addr=from_addr, to_addrs=recipient.addr, msg=msg)
|
||||||
|
|
||||||
|
|
||||||
@@ -232,12 +190,14 @@ def test_exceed_rate_limit(cmsetup, gencreds, maildata, chatmail_config):
|
|||||||
mail = maildata(
|
mail = maildata(
|
||||||
"encrypted.eml", from_addr=user1.addr, to_addr=user2.addr
|
"encrypted.eml", from_addr=user1.addr, to_addr=user2.addr
|
||||||
).as_string()
|
).as_string()
|
||||||
for i in range(chatmail_config.max_user_send_per_minute + 5):
|
|
||||||
print("Sending mail", str(i))
|
start = time.time()
|
||||||
|
for i in range(chatmail_config.max_user_send_per_minute * 3):
|
||||||
|
print("Sending mail", str(i + 1), "at", time.time() - start, "s.")
|
||||||
try:
|
try:
|
||||||
user1.smtp.sendmail(user1.addr, [user2.addr], mail)
|
user1.smtp.sendmail(user1.addr, [user2.addr], mail)
|
||||||
except smtplib.SMTPException as e:
|
except smtplib.SMTPException as e:
|
||||||
if i < chatmail_config.max_user_send_per_minute:
|
if i < chatmail_config.max_user_send_burst_size:
|
||||||
pytest.fail(f"rate limit was exceeded too early with msg {i}")
|
pytest.fail(f"rate limit was exceeded too early with msg {i}")
|
||||||
outcome = e.recipients[user2.addr]
|
outcome = e.recipients[user2.addr]
|
||||||
assert outcome[0] == 450
|
assert outcome[0] == 450
|
||||||
@@ -259,7 +219,7 @@ def test_expunged(remote, chatmail_config):
|
|||||||
]
|
]
|
||||||
outdated_days = int(chatmail_config.delete_large_after) + 1
|
outdated_days = int(chatmail_config.delete_large_after) + 1
|
||||||
find_cmds.append(
|
find_cmds.append(
|
||||||
"find {chatmail_config.mailboxes_dir} -path '*/cur/*' -mtime +{outdated_days} -size +200k -type f"
|
f"find {chatmail_config.mailboxes_dir} -path '*/cur/*' -mtime +{outdated_days} -size +200k -type f"
|
||||||
)
|
)
|
||||||
for cmd in find_cmds:
|
for cmd in find_cmds:
|
||||||
for line in remote.iter_output(cmd):
|
for line in remote.iter_output(cmd):
|
||||||
|
|||||||
@@ -6,17 +6,19 @@ import imap_tools
|
|||||||
import pytest
|
import pytest
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
|
from cmdeploy.cmdeploy import get_sshexec
|
||||||
from cmdeploy.remote import rshell
|
from cmdeploy.remote import rshell
|
||||||
from cmdeploy.sshexec import SSHExec
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def imap_mailbox(cmfactory):
|
def imap_mailbox(cmfactory, ssl_context):
|
||||||
(ac1,) = cmfactory.get_online_accounts(1)
|
(ac1,) = cmfactory.get_online_accounts(1)
|
||||||
user = ac1.get_config("addr")
|
user = ac1.get_config("addr")
|
||||||
password = ac1.get_config("mail_pw")
|
password = ac1.get_config("mail_pw")
|
||||||
mailbox = imap_tools.MailBox(user.split("@")[1])
|
host = user.split("@")[1]
|
||||||
|
mailbox = imap_tools.MailBox(host, ssl_context=ssl_context)
|
||||||
mailbox.login(user, password)
|
mailbox.login(user, password)
|
||||||
|
mailbox.dc_ac = ac1
|
||||||
return mailbox
|
return mailbox
|
||||||
|
|
||||||
|
|
||||||
@@ -25,6 +27,7 @@ class TestMetadataTokens:
|
|||||||
|
|
||||||
def test_set_get_metadata(self, imap_mailbox):
|
def test_set_get_metadata(self, imap_mailbox):
|
||||||
"set and get metadata token for an account"
|
"set and get metadata token for an account"
|
||||||
|
time.sleep(5) # make sure Metadata service had a chance to restart
|
||||||
client = imap_mailbox.client
|
client = imap_mailbox.client
|
||||||
client.send(b'a01 SETMETADATA INBOX (/private/devicetoken "1111" )\n')
|
client.send(b'a01 SETMETADATA INBOX (/private/devicetoken "1111" )\n')
|
||||||
res = client.readline()
|
res = client.readline()
|
||||||
@@ -60,11 +63,11 @@ class TestEndToEndDeltaChat:
|
|||||||
chat.send_text("message0")
|
chat.send_text("message0")
|
||||||
|
|
||||||
lp.sec("wait for ac2 to receive message")
|
lp.sec("wait for ac2 to receive message")
|
||||||
msg2 = ac2._evtracker.wait_next_incoming_message()
|
msg2 = ac2.wait_for_incoming_msg()
|
||||||
assert msg2.text == "message0"
|
assert msg2.get_snapshot().text == "message0"
|
||||||
|
|
||||||
def test_exceed_quota(
|
def test_exceed_quota(
|
||||||
self, cmfactory, lp, tmpdir, remote, chatmail_config, sshdomain
|
self, cmfactory, lp, tmpdir, remote, chatmail_config, sshdomain, pytestconfig
|
||||||
):
|
):
|
||||||
"""This is a very slow test as it needs to upload >100MB of mail data
|
"""This is a very slow test as it needs to upload >100MB of mail data
|
||||||
before quota is exceeded, and thus depends on the speed of the upload.
|
before quota is exceeded, and thus depends on the speed of the upload.
|
||||||
@@ -89,66 +92,85 @@ class TestEndToEndDeltaChat:
|
|||||||
lp.sec(f"filling remote inbox for {user}")
|
lp.sec(f"filling remote inbox for {user}")
|
||||||
fn = f"7743102289.M843172P2484002.c20,S={quota},W=2398:2,"
|
fn = f"7743102289.M843172P2484002.c20,S={quota},W=2398:2,"
|
||||||
path = chatmail_config.mailboxes_dir.joinpath(user, "cur", fn)
|
path = chatmail_config.mailboxes_dir.joinpath(user, "cur", fn)
|
||||||
sshexec = SSHExec(sshdomain)
|
sshexec = get_sshexec(
|
||||||
|
sshdomain, ssh_config=pytestconfig.getoption("ssh_config")
|
||||||
|
)
|
||||||
sshexec(call=rshell.write_numbytes, kwargs=dict(path=str(path), num=120))
|
sshexec(call=rshell.write_numbytes, kwargs=dict(path=str(path), num=120))
|
||||||
res = sshexec(call=rshell.dovecot_recalc_quota, kwargs=dict(user=user))
|
res = sshexec(call=rshell.dovecot_recalc_quota, kwargs=dict(user=user))
|
||||||
assert res["percent"] >= 100
|
assert res["percent"] >= 100
|
||||||
|
|
||||||
lp.sec("ac2: check quota is triggered")
|
lp.sec("ac2: check quota is triggered")
|
||||||
|
|
||||||
starting = True
|
def send_hello():
|
||||||
for line in remote.iter_output("journalctl -n0 -f -u dovecot"):
|
chat.send_text("hello")
|
||||||
if starting:
|
|
||||||
chat.send_text("hello")
|
for line in remote.iter_output(
|
||||||
starting = False
|
"journalctl -n1 -f -u dovecot", ready=send_hello
|
||||||
|
):
|
||||||
if user not in line:
|
if user not in line:
|
||||||
# print(line)
|
|
||||||
continue
|
continue
|
||||||
if "quota exceeded" in line:
|
if "quota exceeded" in line:
|
||||||
return
|
return
|
||||||
|
|
||||||
def test_securejoin(self, cmfactory, lp, maildomain2):
|
def test_securejoin(self, cmfactory, lp, maildomain2):
|
||||||
ac1 = cmfactory.new_online_configuring_account(cache=False)
|
ac1 = cmfactory.get_online_account()
|
||||||
cmfactory.switch_maildomain(maildomain2)
|
ac2 = cmfactory.get_online_account(domain=maildomain2)
|
||||||
ac2 = cmfactory.new_online_configuring_account(cache=False)
|
|
||||||
cmfactory.bring_accounts_online()
|
|
||||||
|
|
||||||
lp.sec("ac1: create QR code and let ac2 scan it, starting the securejoin")
|
lp.sec("ac1: create QR code and let ac2 scan it, starting the securejoin")
|
||||||
qr = ac1.get_setup_contact_qr()
|
qr = ac1.get_qr_code()
|
||||||
|
|
||||||
lp.sec("ac2: start QR-code based setup contact protocol")
|
lp.sec("ac2: start QR-code based setup contact protocol")
|
||||||
ch = ac2.qr_setup_contact(qr)
|
ch = ac2.secure_join(qr)
|
||||||
assert ch.id >= 10
|
assert ch.id >= 10
|
||||||
ac1._evtracker.wait_securejoin_inviter_progress(1000)
|
ac1.wait_for_securejoin_inviter_success()
|
||||||
|
|
||||||
|
def test_dkim_header_stripped(self, cmfactory, maildomain2, lp, imap_mailbox):
|
||||||
|
"""Test that if a DC address receives a message, it has no
|
||||||
|
DKIM-Signature and Authentication-Results headers."""
|
||||||
|
ac1 = cmfactory.get_online_account()
|
||||||
|
ac2 = cmfactory.get_online_account(domain=maildomain2)
|
||||||
|
chat = cmfactory.get_accepted_chat(ac1, imap_mailbox.dc_ac)
|
||||||
|
chat.send_text("message0")
|
||||||
|
chat2 = cmfactory.get_accepted_chat(ac2, imap_mailbox.dc_ac)
|
||||||
|
chat2.send_text("message1")
|
||||||
|
|
||||||
|
lp.sec("receive message with ac1...")
|
||||||
|
received = 0
|
||||||
|
while received < 2:
|
||||||
|
msgs = imap_mailbox.fetch()
|
||||||
|
for msg in msgs:
|
||||||
|
lp.sec(f"ac1 received msg from {msg.from_}")
|
||||||
|
received += 1
|
||||||
|
assert "authentication-results" not in msg.headers
|
||||||
|
assert "dkim-signature" not in msg.headers
|
||||||
|
|
||||||
def test_read_receipts_between_instances(self, cmfactory, lp, maildomain2):
|
def test_read_receipts_between_instances(self, cmfactory, lp, maildomain2):
|
||||||
ac1 = cmfactory.new_online_configuring_account(cache=False)
|
ac1 = cmfactory.get_online_account()
|
||||||
cmfactory.switch_maildomain(maildomain2)
|
ac2 = cmfactory.get_online_account(domain=maildomain2)
|
||||||
ac2 = cmfactory.new_online_configuring_account(cache=False)
|
|
||||||
cmfactory.bring_accounts_online()
|
|
||||||
|
|
||||||
lp.sec("setup encrypted comms between ac1 and ac2 on different instances")
|
lp.sec("setup encrypted comms between ac1 and ac2 on different instances")
|
||||||
qr = ac1.get_setup_contact_qr()
|
qr = ac1.get_qr_code()
|
||||||
ch = ac2.qr_setup_contact(qr)
|
ch = ac2.secure_join(qr)
|
||||||
assert ch.id >= 10
|
assert ch.id >= 10
|
||||||
ac1._evtracker.wait_securejoin_inviter_progress(1000)
|
ac1.wait_for_securejoin_inviter_success()
|
||||||
|
|
||||||
lp.sec("ac1 sends a message and ac2 marks it as seen")
|
lp.sec("ac1 sends a message and ac2 marks it as seen")
|
||||||
chat = ac1.create_chat(ac2)
|
chat = ac1.create_chat(ac2)
|
||||||
msg = chat.send_text("hi")
|
msg = chat.send_text("hi")
|
||||||
m = ac2._evtracker.wait_next_incoming_message()
|
m = ac2.wait_for_incoming_msg()
|
||||||
m.mark_seen()
|
m.mark_seen()
|
||||||
# we can only indirectly wait for mark-seen to cause an smtp-error
|
# we can only indirectly wait for mark-seen to cause an smtp-error
|
||||||
lp.sec("try to wait for markseen to complete and check error states")
|
lp.sec("try to wait for markseen to complete and check error states")
|
||||||
deadline = time.time() + 3.1
|
deadline = time.time() + 3.1
|
||||||
while time.time() < deadline:
|
while time.time() < deadline:
|
||||||
msgs = m.chat.get_messages()
|
m_snap = m.get_snapshot()
|
||||||
|
msgs = m_snap.chat.get_messages()
|
||||||
for msg in msgs:
|
for msg in msgs:
|
||||||
assert "error" not in m.get_message_info()
|
assert "error" not in m.get_info()
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
|
|
||||||
def test_hide_senders_ip_address(cmfactory):
|
def test_hide_senders_ip_address(cmfactory, ssl_context):
|
||||||
public_ip = requests.get("http://icanhazip.com").content.decode().strip()
|
public_ip = requests.get("http://icanhazip.com").content.decode().strip()
|
||||||
assert ipaddress.ip_address(public_ip)
|
assert ipaddress.ip_address(public_ip)
|
||||||
|
|
||||||
@@ -156,26 +178,12 @@ def test_hide_senders_ip_address(cmfactory):
|
|||||||
chat = cmfactory.get_accepted_chat(user1, user2)
|
chat = cmfactory.get_accepted_chat(user1, user2)
|
||||||
|
|
||||||
chat.send_text("testing submission header cleanup")
|
chat.send_text("testing submission header cleanup")
|
||||||
user2._evtracker.wait_next_incoming_message()
|
user2.wait_for_incoming_msg()
|
||||||
user2.direct_imap.select_folder("Inbox")
|
addr = user2.get_config("addr")
|
||||||
msg = user2.direct_imap.get_all_messages()[0]
|
host = addr.split("@")[1]
|
||||||
assert public_ip not in msg.obj.as_string()
|
pw = user2.get_config("mail_pw")
|
||||||
|
mailbox = imap_tools.MailBox(host, ssl_context=ssl_context)
|
||||||
|
mailbox.login(addr, pw)
|
||||||
def test_echobot(cmfactory, chatmail_config, lp, sshdomain):
|
msgs = list(mailbox.fetch(mark_seen=False))
|
||||||
ac = cmfactory.get_online_accounts(1)[0]
|
assert msgs, "expected at least one message"
|
||||||
|
assert public_ip not in msgs[0].obj.as_string()
|
||||||
# establish contact with echobot
|
|
||||||
sshexec = SSHExec(sshdomain)
|
|
||||||
command = "cat /var/lib/echobot/invite-link.txt"
|
|
||||||
echo_invite_link = sshexec(call=rshell.shell, kwargs=dict(command=command))
|
|
||||||
chat = ac.qr_setup_contact(echo_invite_link)
|
|
||||||
ac._evtracker.wait_securejoin_joiner_progress(1000)
|
|
||||||
|
|
||||||
# send message and check it gets replied back
|
|
||||||
lp.sec("Send message to echobot")
|
|
||||||
text = "hi, I hope you text me back"
|
|
||||||
chat.send_text(text)
|
|
||||||
lp.sec("Wait for reply from echobot")
|
|
||||||
reply = ac._evtracker.wait_next_incoming_message()
|
|
||||||
assert reply.text == text
|
|
||||||
|
|||||||
56
cmdeploy/src/cmdeploy/tests/online/test_3_status.py
Normal file
56
cmdeploy/src/cmdeploy/tests/online/test_3_status.py
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
from cmdeploy.cmdeploy import main
|
||||||
|
|
||||||
|
|
||||||
|
def test_status_cmd(chatmail_config, capsys, request, pytestconfig):
|
||||||
|
os.chdir(request.config.invocation_params.dir)
|
||||||
|
command = ["status"]
|
||||||
|
ssh_host = pytestconfig.getoption("ssh_host")
|
||||||
|
if ssh_host:
|
||||||
|
command.extend(["--ssh-host", ssh_host])
|
||||||
|
ssh_config = pytestconfig.getoption("ssh_config")
|
||||||
|
if ssh_config:
|
||||||
|
command.extend(["--ssh-config", ssh_config])
|
||||||
|
assert main(command) == 0
|
||||||
|
status_out = capsys.readouterr()
|
||||||
|
print(status_out.out)
|
||||||
|
|
||||||
|
assert len(status_out.out.splitlines()) > 5
|
||||||
|
|
||||||
|
"""
|
||||||
|
don't test actual server state:
|
||||||
|
|
||||||
|
services = [
|
||||||
|
"acmetool-redirector",
|
||||||
|
"chatmail-metadata",
|
||||||
|
"doveauth",
|
||||||
|
"dovecot",
|
||||||
|
"fcgiwrap",
|
||||||
|
"filtermail-incoming",
|
||||||
|
"filtermail",
|
||||||
|
"lastlogin",
|
||||||
|
"nginx",
|
||||||
|
"opendkim",
|
||||||
|
"postfix@-",
|
||||||
|
"systemd-journald",
|
||||||
|
"turnserver",
|
||||||
|
"unbound",
|
||||||
|
]
|
||||||
|
not_running = []
|
||||||
|
for service in services:
|
||||||
|
active = False
|
||||||
|
for line in status_out:
|
||||||
|
if service in line:
|
||||||
|
active = True
|
||||||
|
if not "loaded" in line:
|
||||||
|
active = False
|
||||||
|
if not "active" in line:
|
||||||
|
active = False
|
||||||
|
if not "running" in line:
|
||||||
|
active = False
|
||||||
|
break
|
||||||
|
if not active:
|
||||||
|
not_running.append(service)
|
||||||
|
assert not_running == []
|
||||||
|
"""
|
||||||
@@ -1,9 +1,11 @@
|
|||||||
import imaplib
|
import imaplib
|
||||||
import io
|
|
||||||
import itertools
|
import itertools
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
|
import re
|
||||||
import smtplib
|
import smtplib
|
||||||
|
import socket
|
||||||
|
import ssl
|
||||||
import subprocess
|
import subprocess
|
||||||
import time
|
import time
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
@@ -18,6 +20,76 @@ def pytest_addoption(parser):
|
|||||||
parser.addoption(
|
parser.addoption(
|
||||||
"--slow", action="store_true", default=False, help="also run slow tests"
|
"--slow", action="store_true", default=False, help="also run slow tests"
|
||||||
)
|
)
|
||||||
|
parser.addoption(
|
||||||
|
"--ssh-host",
|
||||||
|
dest="ssh_host",
|
||||||
|
default=None,
|
||||||
|
help="SSH host (overrides mail_domain for SSH operations).",
|
||||||
|
)
|
||||||
|
parser.addoption(
|
||||||
|
"--ssh-config",
|
||||||
|
dest="ssh_config",
|
||||||
|
default=None,
|
||||||
|
help="Path to an SSH config file (e.g. lxconfigs/ssh-config).",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_ssh_config_hosts(path):
|
||||||
|
"""Parse an OpenSSH config file and return a dict of hostname -> IP."""
|
||||||
|
mapping = {}
|
||||||
|
current_names = []
|
||||||
|
for ln in Path(path).read_text().splitlines():
|
||||||
|
line = ln.strip()
|
||||||
|
m = re.match(r"^Host\s+(.+)", line)
|
||||||
|
if m:
|
||||||
|
current_names = m.group(1).split()
|
||||||
|
continue
|
||||||
|
m = re.match(r"^Hostname\s+(\S+)", line)
|
||||||
|
if m and current_names:
|
||||||
|
ip = m.group(1)
|
||||||
|
for name in current_names:
|
||||||
|
mapping[name] = ip
|
||||||
|
current_names = []
|
||||||
|
return mapping
|
||||||
|
|
||||||
|
|
||||||
|
_original_getaddrinfo = socket.getaddrinfo
|
||||||
|
|
||||||
|
|
||||||
|
def _make_patched_getaddrinfo(host_map):
|
||||||
|
"""Return a getaddrinfo that resolves hosts in host_map to their IPs."""
|
||||||
|
|
||||||
|
def patched_getaddrinfo(host, port, family=0, type=0, proto=0, flags=0):
|
||||||
|
if host in host_map:
|
||||||
|
ip = host_map[host]
|
||||||
|
return _original_getaddrinfo(ip, port, family, type, proto, flags)
|
||||||
|
return _original_getaddrinfo(host, port, family, type, proto, flags)
|
||||||
|
|
||||||
|
return patched_getaddrinfo
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(autouse=True, scope="session")
|
||||||
|
def _setup_localchat_dns(pytestconfig):
|
||||||
|
"""Monkey-patch socket.getaddrinfo to resolve .localchat via ssh-config."""
|
||||||
|
ssh_config = pytestconfig.getoption("ssh_config")
|
||||||
|
if not ssh_config or not Path(ssh_config).exists():
|
||||||
|
yield {}
|
||||||
|
return
|
||||||
|
host_map = _parse_ssh_config_hosts(ssh_config)
|
||||||
|
if not host_map:
|
||||||
|
yield {}
|
||||||
|
return
|
||||||
|
socket.getaddrinfo = _make_patched_getaddrinfo(host_map)
|
||||||
|
try:
|
||||||
|
yield host_map
|
||||||
|
finally:
|
||||||
|
socket.getaddrinfo = _original_getaddrinfo
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session")
|
||||||
|
def ssh_config_host_map(_setup_localchat_dns):
|
||||||
|
"""Return the host-name → IP map parsed from ssh-config."""
|
||||||
|
return _setup_localchat_dns
|
||||||
|
|
||||||
|
|
||||||
def pytest_configure(config):
|
def pytest_configure(config):
|
||||||
@@ -34,17 +106,29 @@ def pytest_runtest_setup(item):
|
|||||||
pytest.skip("skipping slow test, use --slow to run")
|
pytest.skip("skipping slow test, use --slow to run")
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
def _get_chatmail_config():
|
||||||
def chatmail_config(pytestconfig):
|
ini = os.environ.get("CHATMAIL_INI")
|
||||||
current = basedir = Path().resolve()
|
if ini:
|
||||||
|
path = Path(ini).resolve()
|
||||||
|
if path.exists():
|
||||||
|
return read_config(path), path
|
||||||
|
current = Path().resolve()
|
||||||
while 1:
|
while 1:
|
||||||
path = current.joinpath("chatmail.ini").resolve()
|
path = current.joinpath("chatmail.ini").resolve()
|
||||||
if path.exists():
|
if path.exists():
|
||||||
return read_config(path)
|
return read_config(path), path
|
||||||
if current == current.parent:
|
if current == current.parent:
|
||||||
break
|
break
|
||||||
current = current.parent
|
current = current.parent
|
||||||
|
return None, None
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session")
|
||||||
|
def chatmail_config(pytestconfig):
|
||||||
|
config, path = _get_chatmail_config()
|
||||||
|
if config:
|
||||||
|
return config
|
||||||
|
basedir = Path().resolve()
|
||||||
pytest.skip(f"no chatmail.ini file found in {basedir} or parent dirs")
|
pytest.skip(f"no chatmail.ini file found in {basedir} or parent dirs")
|
||||||
|
|
||||||
|
|
||||||
@@ -54,8 +138,14 @@ def maildomain(chatmail_config):
|
|||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
def sshdomain(maildomain):
|
def sshdomain(maildomain, pytestconfig):
|
||||||
return os.environ.get("CHATMAIL_SSH", maildomain)
|
return pytestconfig.getoption("ssh_host") or maildomain
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session")
|
||||||
|
def maildomain_ip(maildomain, ssh_config_host_map):
|
||||||
|
"""Return the IP for maildomain from ssh-config, or maildomain itself."""
|
||||||
|
return ssh_config_host_map.get(maildomain, maildomain)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
@@ -72,10 +162,17 @@ def sshdomain2(maildomain2):
|
|||||||
|
|
||||||
|
|
||||||
def pytest_report_header():
|
def pytest_report_header():
|
||||||
domain = os.environ.get("CHATMAIL_DOMAIN")
|
config, path = _get_chatmail_config()
|
||||||
if domain:
|
domain2 = os.environ.get("CHATMAIL_DOMAIN2", "NOT SET")
|
||||||
text = f"chatmail test instance: {domain}"
|
domain = config.mail_domain if config else "NOT SET"
|
||||||
return ["-" * len(text), text, "-" * len(text)]
|
path = path if path else "NOT SET"
|
||||||
|
|
||||||
|
lines = [
|
||||||
|
f"chatmail.ini {domain} location: {path}",
|
||||||
|
f"chatmail2: {domain2}",
|
||||||
|
]
|
||||||
|
sep = "-" * max(map(len, lines))
|
||||||
|
return [sep, *lines, sep]
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
@@ -90,15 +187,22 @@ def cm_data(request):
|
|||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def benchmark(request):
|
def benchmark(request, chatmail_config):
|
||||||
def bench(func, num, name=None, reportfunc=None):
|
def bench(func, num, name=None, reportfunc=None, cooldown=0.0):
|
||||||
if name is None:
|
if name is None:
|
||||||
name = func.__name__
|
name = func.__name__
|
||||||
|
if cooldown == "auto":
|
||||||
|
per_minute = max(chatmail_config.max_user_send_per_minute, 1)
|
||||||
|
cooldown = chatmail_config.max_user_send_burst_size * 60 / per_minute
|
||||||
|
|
||||||
durations = []
|
durations = []
|
||||||
for i in range(num):
|
for i in range(num):
|
||||||
now = time.time()
|
now = time.time()
|
||||||
func()
|
func()
|
||||||
durations.append(time.time() - now)
|
durations.append(time.time() - now)
|
||||||
|
if cooldown > 0 and i + 1 < num:
|
||||||
|
# Keep post-run cooldown out of measured benchmark duration.
|
||||||
|
time.sleep(cooldown)
|
||||||
durations.sort()
|
durations.sort()
|
||||||
request.config._benchresults[name] = (reportfunc, durations)
|
request.config._benchresults[name] = (reportfunc, durations)
|
||||||
|
|
||||||
@@ -144,15 +248,25 @@ def pytest_terminal_summary(terminalreporter):
|
|||||||
tr.write_line(line)
|
tr.write_line(line)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture(scope="session")
|
||||||
def imap(maildomain):
|
def ssl_context(chatmail_config):
|
||||||
return ImapConn(maildomain)
|
if chatmail_config.tls_cert_mode == "self":
|
||||||
|
ctx = ssl.create_default_context()
|
||||||
|
ctx.check_hostname = False
|
||||||
|
ctx.verify_mode = ssl.CERT_NONE
|
||||||
|
return ctx
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def make_imap_connection(maildomain):
|
def imap(maildomain, ssl_context):
|
||||||
|
return ImapConn(maildomain, ssl_context=ssl_context)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def make_imap_connection(maildomain, ssl_context):
|
||||||
def make_imap_connection():
|
def make_imap_connection():
|
||||||
conn = ImapConn(maildomain)
|
conn = ImapConn(maildomain, ssl_context=ssl_context)
|
||||||
conn.connect()
|
conn.connect()
|
||||||
return conn
|
return conn
|
||||||
|
|
||||||
@@ -164,12 +278,13 @@ class ImapConn:
|
|||||||
logcmd = "journalctl -f -u dovecot"
|
logcmd = "journalctl -f -u dovecot"
|
||||||
name = "dovecot"
|
name = "dovecot"
|
||||||
|
|
||||||
def __init__(self, host):
|
def __init__(self, host, ssl_context=None):
|
||||||
self.host = host
|
self.host = host
|
||||||
|
self.ssl_context = ssl_context
|
||||||
|
|
||||||
def connect(self):
|
def connect(self):
|
||||||
print(f"imap-connect {self.host}")
|
print(f"imap-connect {self.host}")
|
||||||
self.conn = imaplib.IMAP4_SSL(self.host)
|
self.conn = imaplib.IMAP4_SSL(self.host, ssl_context=self.ssl_context)
|
||||||
|
|
||||||
def login(self, user, password):
|
def login(self, user, password):
|
||||||
print(f"imap-login {user!r} {password!r}")
|
print(f"imap-login {user!r} {password!r}")
|
||||||
@@ -195,14 +310,14 @@ class ImapConn:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def smtp(maildomain):
|
def smtp(maildomain, ssl_context):
|
||||||
return SmtpConn(maildomain)
|
return SmtpConn(maildomain, ssl_context=ssl_context)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def make_smtp_connection(maildomain):
|
def make_smtp_connection(maildomain, ssl_context):
|
||||||
def make_smtp_connection():
|
def make_smtp_connection():
|
||||||
conn = SmtpConn(maildomain)
|
conn = SmtpConn(maildomain, ssl_context=ssl_context)
|
||||||
conn.connect()
|
conn.connect()
|
||||||
return conn
|
return conn
|
||||||
|
|
||||||
@@ -214,12 +329,14 @@ class SmtpConn:
|
|||||||
logcmd = "journalctl -f -t postfix/smtpd -t postfix/smtp -t postfix/lmtp"
|
logcmd = "journalctl -f -t postfix/smtpd -t postfix/smtp -t postfix/lmtp"
|
||||||
name = "postfix"
|
name = "postfix"
|
||||||
|
|
||||||
def __init__(self, host):
|
def __init__(self, host, ssl_context=None):
|
||||||
self.host = host
|
self.host = host
|
||||||
|
self.ssl_context = ssl_context
|
||||||
|
|
||||||
def connect(self):
|
def connect(self):
|
||||||
print(f"smtp-connect {self.host}")
|
print(f"smtp-connect {self.host}")
|
||||||
self.conn = smtplib.SMTP_SSL(self.host)
|
context = self.ssl_context or ssl.create_default_context()
|
||||||
|
self.conn = smtplib.SMTP_SSL(self.host, context=context)
|
||||||
|
|
||||||
def login(self, user, password):
|
def login(self, user, password):
|
||||||
print(f"smtp-login {user!r} {password!r}")
|
print(f"smtp-login {user!r} {password!r}")
|
||||||
@@ -262,92 +379,146 @@ def gencreds(chatmail_config):
|
|||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Delta Chat testplugin re-use
|
# Delta Chat RPC-based test support
|
||||||
# use the cmfactory fixture to get chatmail instance accounts
|
# use the cmfactory fixture to get chatmail instance accounts
|
||||||
#
|
#
|
||||||
|
|
||||||
|
from deltachat_rpc_client import DeltaChat, Rpc
|
||||||
|
|
||||||
class ChatmailTestProcess:
|
|
||||||
"""Provider for chatmail instance accounts as used by deltachat.testplugin.acfactory"""
|
|
||||||
|
|
||||||
def __init__(self, pytestconfig, maildomain, gencreds):
|
class ChatmailACFactory:
|
||||||
self.pytestconfig = pytestconfig
|
"""RPC-based account factory for chatmail testing."""
|
||||||
self.maildomain = maildomain
|
|
||||||
assert "." in self.maildomain, maildomain
|
def __init__(
|
||||||
|
self,
|
||||||
|
rpc,
|
||||||
|
maildomain,
|
||||||
|
maildomain_ip,
|
||||||
|
gencreds,
|
||||||
|
chatmail_config,
|
||||||
|
ssh_config_host_map,
|
||||||
|
):
|
||||||
|
self.dc = DeltaChat(rpc)
|
||||||
|
self.rpc = rpc
|
||||||
|
self._maildomain = maildomain
|
||||||
|
self._maildomain_ip = maildomain_ip
|
||||||
self.gencreds = gencreds
|
self.gencreds = gencreds
|
||||||
self._addr2files = {}
|
self.chatmail_config = chatmail_config
|
||||||
|
self._ssh_config_host_map = ssh_config_host_map
|
||||||
|
|
||||||
def get_liveconfig_producer(self):
|
def _make_transport(self, domain):
|
||||||
while 1:
|
"""Build a transport config dict for the given domain."""
|
||||||
user, password = self.gencreds(self.maildomain)
|
addr, password = self.gencreds(domain)
|
||||||
config = {
|
server = self._ssh_config_host_map.get(domain, domain)
|
||||||
"addr": user,
|
transport = {
|
||||||
"mail_pw": password,
|
"addr": addr,
|
||||||
}
|
"password": password,
|
||||||
# speed up account configuration
|
"imapServer": server,
|
||||||
config["mail_server"] = self.maildomain
|
"smtpServer": server,
|
||||||
config["send_server"] = self.maildomain
|
}
|
||||||
yield config
|
if self.chatmail_config.tls_cert_mode == "self":
|
||||||
|
transport["certificateChecks"] = "acceptInvalidCertificates"
|
||||||
|
return transport
|
||||||
|
|
||||||
def cache_maybe_retrieve_configured_db_files(self, cache_addr, db_target_path):
|
def get_online_account(self, domain=None):
|
||||||
pass
|
"""Create, configure and bring online a single account."""
|
||||||
|
return self.get_online_accounts(1, domain)[0]
|
||||||
|
|
||||||
def cache_maybe_store_configured_db_files(self, acc):
|
def get_online_accounts(self, num, domain=None):
|
||||||
pass
|
"""Create multiple online accounts in parallel."""
|
||||||
|
domain = domain or self._maildomain
|
||||||
|
futures = []
|
||||||
|
accounts = []
|
||||||
|
for _ in range(num):
|
||||||
|
account = self.dc.add_account()
|
||||||
|
future = account.add_or_update_transport.future(
|
||||||
|
self._make_transport(domain)
|
||||||
|
)
|
||||||
|
futures.append(future)
|
||||||
|
|
||||||
|
# ensure messages stay in INBOX so that they can be
|
||||||
|
# concurrently fetched via extra IMAP connections during tests
|
||||||
|
account.set_config("delete_server_after", "10")
|
||||||
|
accounts.append(account)
|
||||||
|
|
||||||
|
for future in futures:
|
||||||
|
future()
|
||||||
|
|
||||||
|
for account in accounts:
|
||||||
|
account.bring_online()
|
||||||
|
return accounts
|
||||||
|
|
||||||
|
def get_accepted_chat(self, ac1, ac2):
|
||||||
|
"""Create a 1:1 chat between ac1 and ac2 accepted on both sides."""
|
||||||
|
ac2.create_chat(ac1)
|
||||||
|
return ac1.create_chat(ac2)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session")
|
||||||
|
def rpc(tmp_path_factory):
|
||||||
|
"""Start a deltachat-rpc-server process for the test session."""
|
||||||
|
|
||||||
|
# NB: accounts_dir must NOT already exist as directory --
|
||||||
|
# core-rust only creates accounts.toml if the dir doesn't exist yet.
|
||||||
|
accounts_dir = str(tmp_path_factory.mktemp("dc") / "accounts")
|
||||||
|
rpc = Rpc(accounts_dir=accounts_dir)
|
||||||
|
rpc.start()
|
||||||
|
yield rpc
|
||||||
|
rpc.close()
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def cmfactory(request, gencreds, tmpdir, maildomain):
|
def cmfactory(
|
||||||
# cloned from deltachat.testplugin.amfactory
|
rpc, gencreds, maildomain, maildomain_ip, chatmail_config, ssh_config_host_map
|
||||||
pytest.importorskip("deltachat")
|
):
|
||||||
from deltachat.testplugin import ACFactory
|
"""Return a ChatmailACFactory for creating online Delta Chat accounts."""
|
||||||
|
return ChatmailACFactory(
|
||||||
testproc = ChatmailTestProcess(request.config, maildomain, gencreds)
|
rpc=rpc,
|
||||||
|
maildomain=maildomain,
|
||||||
class Data:
|
maildomain_ip=maildomain_ip,
|
||||||
def read_path(self, path):
|
gencreds=gencreds,
|
||||||
return
|
chatmail_config=chatmail_config,
|
||||||
|
ssh_config_host_map=ssh_config_host_map,
|
||||||
am = ACFactory(request=request, tmpdir=tmpdir, testprocess=testproc, data=Data())
|
)
|
||||||
|
|
||||||
# nb. a bit hacky
|
|
||||||
# would probably be better if deltachat's test machinery grows native support
|
|
||||||
def switch_maildomain(maildomain2):
|
|
||||||
am.testprocess.maildomain = maildomain2
|
|
||||||
|
|
||||||
am.switch_maildomain = switch_maildomain
|
|
||||||
|
|
||||||
yield am
|
|
||||||
if hasattr(request.node, "rep_call") and request.node.rep_call.failed:
|
|
||||||
if testproc.pytestconfig.getoption("--extra-info"):
|
|
||||||
logfile = io.StringIO()
|
|
||||||
am.dump_imap_summary(logfile=logfile)
|
|
||||||
print(logfile.getvalue())
|
|
||||||
# request.node.add_report_section("call", "imap-server-state", s)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def remote(sshdomain):
|
def remote(sshdomain, pytestconfig):
|
||||||
return Remote(sshdomain)
|
return Remote(sshdomain, ssh_config=pytestconfig.getoption("ssh_config"))
|
||||||
|
|
||||||
|
|
||||||
class Remote:
|
class Remote:
|
||||||
def __init__(self, sshdomain):
|
def __init__(self, sshdomain, ssh_config=None):
|
||||||
self.sshdomain = sshdomain
|
self.sshdomain = sshdomain
|
||||||
|
self.ssh_config = ssh_config
|
||||||
|
|
||||||
def iter_output(self, logcmd=""):
|
def iter_output(self, logcmd="", ready=None):
|
||||||
getjournal = "journalctl -f" if not logcmd else logcmd
|
getjournal = "journalctl -f" if not logcmd else logcmd
|
||||||
|
print(self.sshdomain)
|
||||||
|
match self.sshdomain:
|
||||||
|
case "@local":
|
||||||
|
command = []
|
||||||
|
case "localhost":
|
||||||
|
command = []
|
||||||
|
case _:
|
||||||
|
command = ["ssh"]
|
||||||
|
if self.ssh_config:
|
||||||
|
command.extend(["-F", self.ssh_config])
|
||||||
|
command.append(f"root@{self.sshdomain}")
|
||||||
|
[command.append(arg) for arg in getjournal.split()]
|
||||||
self.popen = subprocess.Popen(
|
self.popen = subprocess.Popen(
|
||||||
["ssh", f"root@{self.sshdomain}", getjournal],
|
command,
|
||||||
stdout=subprocess.PIPE,
|
stdout=subprocess.PIPE,
|
||||||
)
|
)
|
||||||
while 1:
|
while 1:
|
||||||
line = self.popen.stdout.readline()
|
line = self.popen.stdout.readline()
|
||||||
res = line.decode().strip().lower()
|
res = line.decode().strip().lower()
|
||||||
if res:
|
if not res:
|
||||||
yield res
|
|
||||||
else:
|
|
||||||
break
|
break
|
||||||
|
if ready is not None:
|
||||||
|
ready()
|
||||||
|
ready = None
|
||||||
|
yield res
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
@@ -363,38 +534,40 @@ def lp(request):
|
|||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def cmsetup(maildomain, gencreds):
|
def cmsetup(maildomain, gencreds, ssl_context):
|
||||||
return CMSetup(maildomain, gencreds)
|
return CMSetup(maildomain, gencreds, ssl_context)
|
||||||
|
|
||||||
|
|
||||||
class CMSetup:
|
class CMSetup:
|
||||||
def __init__(self, maildomain, gencreds):
|
def __init__(self, maildomain, gencreds, ssl_context):
|
||||||
self.maildomain = maildomain
|
self.maildomain = maildomain
|
||||||
self.gencreds = gencreds
|
self.gencreds = gencreds
|
||||||
|
self.ssl_context = ssl_context
|
||||||
|
|
||||||
def gen_users(self, num):
|
def gen_users(self, num):
|
||||||
print(f"Creating {num} online users")
|
print(f"Creating {num} online users")
|
||||||
users = []
|
users = []
|
||||||
for i in range(num):
|
for i in range(num):
|
||||||
addr, password = self.gencreds()
|
addr, password = self.gencreds()
|
||||||
user = CMUser(self.maildomain, addr, password)
|
user = CMUser(self.maildomain, addr, password, self.ssl_context)
|
||||||
assert user.smtp
|
assert user.smtp
|
||||||
users.append(user)
|
users.append(user)
|
||||||
return users
|
return users
|
||||||
|
|
||||||
|
|
||||||
class CMUser:
|
class CMUser:
|
||||||
def __init__(self, maildomain, addr, password):
|
def __init__(self, maildomain, addr, password, ssl_context=None):
|
||||||
self.maildomain = maildomain
|
self.maildomain = maildomain
|
||||||
self.addr = addr
|
self.addr = addr
|
||||||
self.password = password
|
self.password = password
|
||||||
|
self.ssl_context = ssl_context
|
||||||
self._smtp = None
|
self._smtp = None
|
||||||
self._imap = None
|
self._imap = None
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def smtp(self):
|
def smtp(self):
|
||||||
if not self._smtp:
|
if not self._smtp:
|
||||||
handle = SmtpConn(self.maildomain)
|
handle = SmtpConn(self.maildomain, ssl_context=self.ssl_context)
|
||||||
handle.connect()
|
handle.connect()
|
||||||
handle.login(self.addr, self.password)
|
handle.login(self.addr, self.password)
|
||||||
self._smtp = handle
|
self._smtp = handle
|
||||||
@@ -403,7 +576,7 @@ class CMUser:
|
|||||||
@property
|
@property
|
||||||
def imap(self):
|
def imap(self):
|
||||||
if not self._imap:
|
if not self._imap:
|
||||||
imap = ImapConn(self.maildomain)
|
imap = ImapConn(self.maildomain, ssl_context=self.ssl_context)
|
||||||
imap.connect()
|
imap.connect()
|
||||||
imap.login(self.addr, self.password)
|
imap.login(self.addr, self.password)
|
||||||
self._imap = imap
|
self._imap = imap
|
||||||
|
|||||||
@@ -23,7 +23,10 @@ class TestCmdline:
|
|||||||
run = parser.parse_args(["run"])
|
run = parser.parse_args(["run"])
|
||||||
assert init and run
|
assert init and run
|
||||||
|
|
||||||
def test_init_not_overwrite(self, capsys):
|
def test_init_not_overwrite(self, tmp_path, capsys, monkeypatch):
|
||||||
|
monkeypatch.delenv("CHATMAIL_INI", raising=False)
|
||||||
|
monkeypatch.chdir(tmp_path)
|
||||||
|
|
||||||
assert main(["init", "chat.example.org"]) == 0
|
assert main(["init", "chat.example.org"]) == 0
|
||||||
capsys.readouterr()
|
capsys.readouterr()
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from copy import deepcopy
|
|||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from cmdeploy import remote
|
from cmdeploy import remote
|
||||||
from cmdeploy.dns import check_full_zone, check_initial_remote_data
|
from cmdeploy.dns import check_full_zone, check_initial_remote_data, parse_zone_records
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
@@ -60,6 +60,29 @@ def mockdns(request, mockdns_base, mockdns_expected):
|
|||||||
return mockdns_base
|
return mockdns_base
|
||||||
|
|
||||||
|
|
||||||
|
class TestGetDkimEntry:
|
||||||
|
def test_dkim_entry_returns_tuple_on_success(self, mockdns):
|
||||||
|
entry, web_entry = remote.rdns.get_dkim_entry(
|
||||||
|
"some.domain", "", dkim_selector="opendkim"
|
||||||
|
)
|
||||||
|
# May return None,None if openssl not available, but should never crash
|
||||||
|
if entry is not None:
|
||||||
|
assert "opendkim._domainkey.some.domain" in entry
|
||||||
|
assert "opendkim._domainkey.some.domain" in web_entry
|
||||||
|
|
||||||
|
def test_dkim_entry_returns_none_tuple_on_error(self, monkeypatch):
|
||||||
|
"""CalledProcessError must return (None, None), not bare None."""
|
||||||
|
from subprocess import CalledProcessError
|
||||||
|
|
||||||
|
def failing_shell(command, fail_ok=False, print=print):
|
||||||
|
raise CalledProcessError(1, command)
|
||||||
|
|
||||||
|
monkeypatch.setattr(remote.rdns, "shell", failing_shell)
|
||||||
|
result = remote.rdns.get_dkim_entry("some.domain", "", dkim_selector="opendkim")
|
||||||
|
assert result == (None, None)
|
||||||
|
assert result[0] is None and result[1] is None
|
||||||
|
|
||||||
|
|
||||||
class TestPerformInitialChecks:
|
class TestPerformInitialChecks:
|
||||||
def test_perform_initial_checks_ok1(self, mockdns, mockdns_expected):
|
def test_perform_initial_checks_ok1(self, mockdns, mockdns_expected):
|
||||||
remote_data = remote.rdns.perform_initial_checks("some.domain")
|
remote_data = remote.rdns.perform_initial_checks("some.domain")
|
||||||
@@ -91,19 +114,44 @@ class TestPerformInitialChecks:
|
|||||||
assert not res
|
assert not res
|
||||||
assert len(l) == 2
|
assert len(l) == 2
|
||||||
|
|
||||||
|
def test_perform_initial_checks_no_mta_sts_self_signed(self, mockdns):
|
||||||
|
del mockdns["CNAME"]["mta-sts.some.domain"]
|
||||||
|
remote_data = remote.rdns.perform_initial_checks("some.domain")
|
||||||
|
assert not remote_data["MTA_STS"]
|
||||||
|
|
||||||
|
l = []
|
||||||
|
res = check_initial_remote_data(remote_data, strict_tls=False, print=l.append)
|
||||||
|
assert res
|
||||||
|
assert not l
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_zone_records():
|
||||||
|
text = """
|
||||||
|
; This is a comment
|
||||||
|
some.domain. 3600 IN A 1.1.1.1
|
||||||
|
|
||||||
|
; Another comment
|
||||||
|
www.some.domain. 3600 IN CNAME some.domain.
|
||||||
|
"""
|
||||||
|
records = list(parse_zone_records(text))
|
||||||
|
assert records == [
|
||||||
|
("some.domain", "3600", "A", "1.1.1.1"),
|
||||||
|
("www.some.domain", "3600", "CNAME", "some.domain."),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_zone_records_invalid_line():
|
||||||
|
text = "invalid line"
|
||||||
|
with pytest.raises(ValueError, match="Bad zone record line"):
|
||||||
|
list(parse_zone_records(text))
|
||||||
|
|
||||||
|
|
||||||
def parse_zonefile_into_dict(zonefile, mockdns_base, only_required=False):
|
def parse_zonefile_into_dict(zonefile, mockdns_base, only_required=False):
|
||||||
for zf_line in zonefile.split("\n"):
|
if only_required:
|
||||||
if zf_line.startswith("#"):
|
# Only take records before the "; Recommended" section
|
||||||
if "Recommended" in zf_line and only_required:
|
zonefile = zonefile.split("; Recommended")[0]
|
||||||
return
|
for name, ttl, rtype, rdata in parse_zone_records(zonefile):
|
||||||
continue
|
mockdns_base.setdefault(rtype, {})[name] = rdata
|
||||||
if not zf_line.strip():
|
|
||||||
continue
|
|
||||||
zf_domain, zf_typ, zf_value = zf_line.split(maxsplit=2)
|
|
||||||
zf_domain = zf_domain.rstrip(".")
|
|
||||||
zf_value = zf_value.strip()
|
|
||||||
mockdns_base.setdefault(zf_typ, {})[zf_domain] = zf_value
|
|
||||||
|
|
||||||
|
|
||||||
class MockSSHExec:
|
class MockSSHExec:
|
||||||
|
|||||||
78
cmdeploy/src/cmdeploy/tests/test_external_tls.py
Normal file
78
cmdeploy/src/cmdeploy/tests/test_external_tls.py
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
"""Functional tests for tls_external_cert_and_key option."""
|
||||||
|
|
||||||
|
import json
|
||||||
|
|
||||||
|
import chatmaild.newemail
|
||||||
|
import pytest
|
||||||
|
from chatmaild.config import read_config, write_initial_config
|
||||||
|
|
||||||
|
|
||||||
|
def make_external_config(tmp_path, cert_key=None):
|
||||||
|
inipath = tmp_path / "chatmail.ini"
|
||||||
|
overrides = {}
|
||||||
|
if cert_key is not None:
|
||||||
|
overrides["tls_external_cert_and_key"] = cert_key
|
||||||
|
write_initial_config(inipath, "chat.example.org", overrides=overrides)
|
||||||
|
return inipath
|
||||||
|
|
||||||
|
|
||||||
|
def test_external_tls_config_reads_paths(tmp_path):
|
||||||
|
inipath = make_external_config(
|
||||||
|
tmp_path,
|
||||||
|
cert_key=(
|
||||||
|
"/etc/letsencrypt/live/chat.example.org/fullchain.pem"
|
||||||
|
" /etc/letsencrypt/live/chat.example.org/privkey.pem"
|
||||||
|
),
|
||||||
|
)
|
||||||
|
config = read_config(inipath)
|
||||||
|
assert config.tls_cert_mode == "external"
|
||||||
|
assert (
|
||||||
|
config.tls_cert_path == "/etc/letsencrypt/live/chat.example.org/fullchain.pem"
|
||||||
|
)
|
||||||
|
assert config.tls_key_path == "/etc/letsencrypt/live/chat.example.org/privkey.pem"
|
||||||
|
|
||||||
|
|
||||||
|
def test_external_tls_missing_option_uses_acme(tmp_path):
|
||||||
|
config = read_config(make_external_config(tmp_path))
|
||||||
|
assert config.tls_cert_mode == "acme"
|
||||||
|
|
||||||
|
|
||||||
|
def test_external_tls_bad_format_raises(tmp_path):
|
||||||
|
inipath = make_external_config(tmp_path, cert_key="/only/one/path.pem")
|
||||||
|
with pytest.raises(ValueError, match="two space-separated"):
|
||||||
|
read_config(inipath)
|
||||||
|
|
||||||
|
|
||||||
|
def test_external_tls_three_paths_raises(tmp_path):
|
||||||
|
inipath = make_external_config(tmp_path, cert_key="/a /b /c")
|
||||||
|
with pytest.raises(ValueError, match="two space-separated"):
|
||||||
|
read_config(inipath)
|
||||||
|
|
||||||
|
|
||||||
|
def test_external_tls_no_dclogin_url(tmp_path, capsys, monkeypatch):
|
||||||
|
inipath = make_external_config(
|
||||||
|
tmp_path, cert_key="/certs/fullchain.pem /certs/privkey.pem"
|
||||||
|
)
|
||||||
|
monkeypatch.setattr(chatmaild.newemail, "CONFIG_PATH", str(inipath))
|
||||||
|
chatmaild.newemail.print_new_account()
|
||||||
|
out, _ = capsys.readouterr()
|
||||||
|
lines = out.split("\n")
|
||||||
|
dic = json.loads(lines[2])
|
||||||
|
assert "dclogin_url" not in dic
|
||||||
|
|
||||||
|
|
||||||
|
def test_external_tls_selects_correct_deployer(tmp_path):
|
||||||
|
from cmdeploy.deployers import get_tls_deployer
|
||||||
|
from cmdeploy.external.deployer import ExternalTlsDeployer
|
||||||
|
from cmdeploy.selfsigned.deployer import SelfSignedTlsDeployer
|
||||||
|
|
||||||
|
inipath = make_external_config(
|
||||||
|
tmp_path, cert_key="/certs/fullchain.pem /certs/privkey.pem"
|
||||||
|
)
|
||||||
|
config = read_config(inipath)
|
||||||
|
deployer = get_tls_deployer(config, "chat.example.org")
|
||||||
|
|
||||||
|
assert isinstance(deployer, ExternalTlsDeployer)
|
||||||
|
assert not isinstance(deployer, SelfSignedTlsDeployer)
|
||||||
|
assert deployer.cert_path == "/certs/fullchain.pem"
|
||||||
|
assert deployer.key_path == "/certs/privkey.pem"
|
||||||
173
cmdeploy/src/cmdeploy/tests/test_lxc.py
Normal file
173
cmdeploy/src/cmdeploy/tests/test_lxc.py
Normal file
@@ -0,0 +1,173 @@
|
|||||||
|
"""Tests for cmdeploy lxc-* subcommands."""
|
||||||
|
|
||||||
|
import shutil
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from cmdeploy.lxc import cli
|
||||||
|
from cmdeploy.lxc.incus import Incus
|
||||||
|
|
||||||
|
pytestmark = pytest.mark.skipif(
|
||||||
|
not shutil.which("incus") or not shutil.which("lxc"),
|
||||||
|
reason="incus/lxc not installed",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Fixtures
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def ix():
|
||||||
|
return Incus()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session")
|
||||||
|
def lxc_setup():
|
||||||
|
ix = Incus()
|
||||||
|
ix.get_dns_container().ensure()
|
||||||
|
return ix.list_managed()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session")
|
||||||
|
def relay_container(lxc_setup):
|
||||||
|
test_names = {f"{n}-localchat" for n in cli.RELAY_NAMES}
|
||||||
|
relays = [c for c in lxc_setup if c["name"] in test_names and c.get("ip")]
|
||||||
|
if not relays:
|
||||||
|
pytest.skip("no test relay containers running")
|
||||||
|
return relays[0]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def cmdeploy():
|
||||||
|
def run(*args):
|
||||||
|
return subprocess.run(
|
||||||
|
[sys.executable, "-m", "cmdeploy.cmdeploy", *args],
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
check=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
return run
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Tests
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"subcmd, expected, absent",
|
||||||
|
[
|
||||||
|
(None, ["lxc-start", "lxc-stop", "lxc-test", "lxc-status"], ["lxc-destroy"]),
|
||||||
|
("lxc-start", ["--ipv4-only", "--run"], ["--config"]),
|
||||||
|
("lxc-stop", ["--destroy", "--destroy-all"], ["--config"]),
|
||||||
|
("lxc-test", ["--one"], ["--config"]),
|
||||||
|
("lxc-status", [], ["--config"]),
|
||||||
|
("run", ["--ssh-config"], ["--lxc"]),
|
||||||
|
("dns", ["--ssh-config"], []),
|
||||||
|
("test", ["--ssh-config"], []),
|
||||||
|
("status", ["--ssh-config"], []),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_help_options(cmdeploy, subcmd, expected, absent):
|
||||||
|
args = [subcmd, "--help"] if subcmd else ["--help"]
|
||||||
|
result = cmdeploy(*args)
|
||||||
|
output = result.stdout + result.stderr
|
||||||
|
assert result.returncode == 0
|
||||||
|
for flag in expected:
|
||||||
|
assert flag in output
|
||||||
|
for flag in absent:
|
||||||
|
assert flag not in output
|
||||||
|
|
||||||
|
|
||||||
|
class TestSSHConfig:
|
||||||
|
def test_lxconfigs(self, ix, lxc_setup):
|
||||||
|
d = ix.lxconfigs_dir
|
||||||
|
assert d.name == "lxconfigs"
|
||||||
|
assert d.exists()
|
||||||
|
path = ix.ssh_config_path
|
||||||
|
assert path.name == "ssh-config"
|
||||||
|
assert path.parent.name == "lxconfigs"
|
||||||
|
|
||||||
|
def test_write_ssh_config(self, ix, lxc_setup):
|
||||||
|
path = ix.write_ssh_config()
|
||||||
|
assert path.exists()
|
||||||
|
text = path.read_text()
|
||||||
|
|
||||||
|
for c in lxc_setup:
|
||||||
|
if c.get("ip"):
|
||||||
|
assert c["name"] in text
|
||||||
|
assert f"Hostname {c['ip']}" in text
|
||||||
|
|
||||||
|
assert "User root" in text
|
||||||
|
assert "IdentityFile" in text
|
||||||
|
assert "StrictHostKeyChecking accept-new" in text
|
||||||
|
|
||||||
|
|
||||||
|
def test_dns(ix, relay_container):
|
||||||
|
def dig(qname, qtype):
|
||||||
|
ct = ix.get_dns_container()
|
||||||
|
return ct.bash(f"dig @127.0.0.1 {qname} {qtype} +short").strip()
|
||||||
|
|
||||||
|
domain = relay_container["domain"]
|
||||||
|
assert dig(domain, "A") == relay_container["ip"]
|
||||||
|
assert domain in dig(domain, "MX")
|
||||||
|
assert "587" in dig(f"_submission._tcp.{domain}", "SRV")
|
||||||
|
|
||||||
|
|
||||||
|
class TestLxcStatus:
|
||||||
|
def test_cli_lxc_status_help(self, cmdeploy):
|
||||||
|
result = cmdeploy("lxc-status", "--help")
|
||||||
|
assert result.returncode == 0
|
||||||
|
assert "status" in result.stdout.lower()
|
||||||
|
|
||||||
|
def test_shows_containers(self, lxc_setup, capsys):
|
||||||
|
from cmdeploy.cmdeploy import Out
|
||||||
|
|
||||||
|
class QuietOut(Out):
|
||||||
|
def red(self, msg, **kw):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def green(self, msg, **kw):
|
||||||
|
pass
|
||||||
|
|
||||||
|
ret = cli.lxc_status_cmd(None, QuietOut())
|
||||||
|
assert ret == 0
|
||||||
|
captured = capsys.readouterr().out
|
||||||
|
assert "ns-localchat" in captured
|
||||||
|
assert "running" in captured
|
||||||
|
|
||||||
|
def test_deploy_freshness(self, ix, monkeypatch):
|
||||||
|
ct = ix.get_container("x")
|
||||||
|
|
||||||
|
monkeypatch.setattr(
|
||||||
|
"cmdeploy.lxc.incus.RelayContainer.deployed_version",
|
||||||
|
lambda _self: "abc123def456",
|
||||||
|
)
|
||||||
|
monkeypatch.setattr(
|
||||||
|
"cmdeploy.lxc.incus.RelayContainer.deployed_domain",
|
||||||
|
lambda _self: ct.domain,
|
||||||
|
)
|
||||||
|
monkeypatch.setattr(
|
||||||
|
"cmdeploy.lxc.cli.get_version_string",
|
||||||
|
lambda: "abc123def456",
|
||||||
|
)
|
||||||
|
assert "IN-SYNC" in cli._deploy_status(ct, "abc123def456", ix)
|
||||||
|
assert "STALE" in cli._deploy_status(ct, "other_hash_here", ix)
|
||||||
|
|
||||||
|
# Hash matches but local has uncommitted changes
|
||||||
|
monkeypatch.setattr(
|
||||||
|
"cmdeploy.lxc.cli.get_version_string",
|
||||||
|
lambda: "abc123def456\ndiff --git a/foo",
|
||||||
|
)
|
||||||
|
assert "DIRTY" in cli._deploy_status(ct, "abc123def456", ix)
|
||||||
|
|
||||||
|
monkeypatch.setattr(
|
||||||
|
"cmdeploy.lxc.incus.RelayContainer.deployed_version",
|
||||||
|
lambda _self: None,
|
||||||
|
)
|
||||||
|
assert "NOT DEPLOYED" in cli._deploy_status(ct, "abc123", ix)
|
||||||
68
cmdeploy/src/cmdeploy/tests/test_rshell.py
Normal file
68
cmdeploy/src/cmdeploy/tests/test_rshell.py
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
from unittest.mock import patch
|
||||||
|
|
||||||
|
from cmdeploy.remote.rshell import dovecot_recalc_quota
|
||||||
|
|
||||||
|
|
||||||
|
def test_dovecot_recalc_quota_normal_output():
|
||||||
|
"""Normal doveadm output returns parsed dict."""
|
||||||
|
normal_output = (
|
||||||
|
"Quota name Type Value Limit %\n"
|
||||||
|
"User quota STORAGE 5 102400 0\n"
|
||||||
|
"User quota MESSAGE 2 - 0\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
with patch("cmdeploy.remote.rshell.shell", return_value=normal_output):
|
||||||
|
result = dovecot_recalc_quota("user@example.org")
|
||||||
|
|
||||||
|
# shell is called twice (recalc + get), patch returns same for both
|
||||||
|
assert result == {"value": 5, "limit": 102400, "percent": 0}
|
||||||
|
|
||||||
|
|
||||||
|
def test_dovecot_recalc_quota_empty_output():
|
||||||
|
"""Empty doveadm output (trailing newline) must not IndexError."""
|
||||||
|
call_count = [0]
|
||||||
|
|
||||||
|
def mock_shell(cmd):
|
||||||
|
call_count[0] += 1
|
||||||
|
if "recalc" in cmd:
|
||||||
|
return ""
|
||||||
|
# quota get returns only empty lines
|
||||||
|
return "\n\n"
|
||||||
|
|
||||||
|
with patch("cmdeploy.remote.rshell.shell", side_effect=mock_shell):
|
||||||
|
result = dovecot_recalc_quota("user@example.org")
|
||||||
|
|
||||||
|
assert result is None
|
||||||
|
|
||||||
|
|
||||||
|
def test_dovecot_recalc_quota_malformed_output():
|
||||||
|
"""Malformed output with too few columns must not crash."""
|
||||||
|
call_count = [0]
|
||||||
|
|
||||||
|
def mock_shell(cmd):
|
||||||
|
call_count[0] += 1
|
||||||
|
if "recalc" in cmd:
|
||||||
|
return ""
|
||||||
|
# partial line, fewer than 6 parts
|
||||||
|
return "Quota name\nUser quota STORAGE\n"
|
||||||
|
|
||||||
|
with patch("cmdeploy.remote.rshell.shell", side_effect=mock_shell):
|
||||||
|
result = dovecot_recalc_quota("user@example.org")
|
||||||
|
|
||||||
|
assert result is None
|
||||||
|
|
||||||
|
|
||||||
|
def test_dovecot_recalc_quota_header_only():
|
||||||
|
"""Only header line, no data rows."""
|
||||||
|
call_count = [0]
|
||||||
|
|
||||||
|
def mock_shell(cmd):
|
||||||
|
call_count[0] += 1
|
||||||
|
if "recalc" in cmd:
|
||||||
|
return ""
|
||||||
|
return "Quota name Type Value Limit %\n"
|
||||||
|
|
||||||
|
with patch("cmdeploy.remote.rshell.shell", side_effect=mock_shell):
|
||||||
|
result = dovecot_recalc_quota("user@example.org")
|
||||||
|
|
||||||
|
assert result is None
|
||||||
97
cmdeploy/src/cmdeploy/tests/test_util.py
Normal file
97
cmdeploy/src/cmdeploy/tests/test_util.py
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
import pytest
|
||||||
|
|
||||||
|
from cmdeploy.util import (
|
||||||
|
build_chatmaild_sdist,
|
||||||
|
collapse,
|
||||||
|
get_chatmaild_sdist,
|
||||||
|
get_git_hash,
|
||||||
|
get_version_string,
|
||||||
|
shell,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_collapse():
|
||||||
|
text = """
|
||||||
|
line 1
|
||||||
|
line 2
|
||||||
|
"""
|
||||||
|
assert collapse(text) == "line 1 line 2"
|
||||||
|
assert collapse(" single line ") == "single line"
|
||||||
|
|
||||||
|
|
||||||
|
def test_git_helpers_no_git(tmp_path):
|
||||||
|
# Not a git repo
|
||||||
|
assert get_git_hash(root=tmp_path) is None
|
||||||
|
assert get_version_string(root=tmp_path) == "unknown"
|
||||||
|
|
||||||
|
|
||||||
|
def test_git_helpers_empty_repo(tmp_path):
|
||||||
|
shell("git init", cwd=tmp_path, check=True)
|
||||||
|
# No commits yet
|
||||||
|
assert get_git_hash(root=tmp_path) is None
|
||||||
|
assert get_version_string(root=tmp_path) == "unknown"
|
||||||
|
|
||||||
|
|
||||||
|
def test_git_helpers_with_commits_and_diffs(tmp_path):
|
||||||
|
shell("git init", cwd=tmp_path, check=True)
|
||||||
|
shell("git config user.email you@example.com", cwd=tmp_path, check=True)
|
||||||
|
shell("git config user.name 'Your Name'", cwd=tmp_path, check=True)
|
||||||
|
|
||||||
|
# First commit
|
||||||
|
path = tmp_path / "file.txt"
|
||||||
|
path.write_text("content")
|
||||||
|
shell("git add file.txt", cwd=tmp_path, check=True)
|
||||||
|
shell("git commit -m initial", cwd=tmp_path, check=True)
|
||||||
|
|
||||||
|
git_hash = get_git_hash(root=tmp_path)
|
||||||
|
assert len(git_hash) >= 7 # usually 40, but git is git
|
||||||
|
assert get_version_string(root=tmp_path) == git_hash
|
||||||
|
|
||||||
|
# Create a diff
|
||||||
|
path.write_text("new content")
|
||||||
|
v = get_version_string(root=tmp_path)
|
||||||
|
assert v.startswith(git_hash + "\n")
|
||||||
|
assert "new content" in v
|
||||||
|
assert not v.endswith("\n")
|
||||||
|
|
||||||
|
# Commit again -> no diff
|
||||||
|
shell("git add file.txt", cwd=tmp_path, check=True)
|
||||||
|
shell("git commit -m second", cwd=tmp_path, check=True)
|
||||||
|
new_hash = get_git_hash(root=tmp_path)
|
||||||
|
assert new_hash != git_hash
|
||||||
|
assert get_version_string(root=tmp_path) == new_hash
|
||||||
|
|
||||||
|
# Diffs inside excluded test dirs are invisible to the version string
|
||||||
|
test_dir = tmp_path / "cmdeploy" / "src" / "cmdeploy" / "tests"
|
||||||
|
test_dir.mkdir(parents=True)
|
||||||
|
test_file = test_dir / "test_foo.py"
|
||||||
|
test_file.write_text("pass")
|
||||||
|
shell("git add .", cwd=tmp_path, check=True)
|
||||||
|
shell("git commit -m 'add test file'", cwd=tmp_path, check=True)
|
||||||
|
test_file.write_text("assert True")
|
||||||
|
assert get_version_string(root=tmp_path) == get_git_hash(root=tmp_path)
|
||||||
|
|
||||||
|
|
||||||
|
def test_build_chatmaild_sdist(tmp_path):
|
||||||
|
dist_dir = tmp_path / "dist"
|
||||||
|
|
||||||
|
# First call builds the sdist
|
||||||
|
result = build_chatmaild_sdist(dist_dir)
|
||||||
|
assert result.name.endswith(".tar.gz")
|
||||||
|
assert result.stat().st_size > 0
|
||||||
|
|
||||||
|
# Second call is idempotent - returns the same file, no rebuild
|
||||||
|
mtime = result.stat().st_mtime
|
||||||
|
result2 = build_chatmaild_sdist(dist_dir)
|
||||||
|
assert result2 == result
|
||||||
|
assert result2.stat().st_mtime == mtime
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_chatmaild_sdist_errors(tmp_path):
|
||||||
|
with pytest.raises(FileNotFoundError):
|
||||||
|
get_chatmaild_sdist(tmp_path / "nonexistent")
|
||||||
|
|
||||||
|
empty = tmp_path / "empty"
|
||||||
|
empty.mkdir()
|
||||||
|
with pytest.raises(FileNotFoundError):
|
||||||
|
get_chatmaild_sdist(empty)
|
||||||
4
cmdeploy/src/cmdeploy/unbound/unbound.conf.j2
Normal file
4
cmdeploy/src/cmdeploy/unbound/unbound.conf.j2
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
# Managed by cmdeploy: disable IPv6 in unbound.
|
||||||
|
server:
|
||||||
|
interface: 127.0.0.1
|
||||||
|
do-ip6: no
|
||||||
126
cmdeploy/src/cmdeploy/util.py
Normal file
126
cmdeploy/src/cmdeploy/util.py
Normal file
@@ -0,0 +1,126 @@
|
|||||||
|
"""Shared utility functions for cmdeploy."""
|
||||||
|
|
||||||
|
import fcntl
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import textwrap
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
|
def _project_root():
|
||||||
|
"""Return the project root directory."""
|
||||||
|
return Path(__file__).resolve().parent.parent.parent.parent
|
||||||
|
|
||||||
|
|
||||||
|
def collapse(text):
|
||||||
|
"""Dedent, join lines, and strip a (triple-quoted) string.
|
||||||
|
|
||||||
|
Handy for writing shell commands across multiple lines::
|
||||||
|
|
||||||
|
cmd = collapse(f\"""
|
||||||
|
cmdeploy run
|
||||||
|
--config {ct.ini}
|
||||||
|
--ssh-host {ct.domain}
|
||||||
|
\""")
|
||||||
|
"""
|
||||||
|
return textwrap.dedent(text).replace("\n", " ").strip()
|
||||||
|
|
||||||
|
|
||||||
|
def shell(cmd, check=False, **kwargs):
|
||||||
|
"""Run a shell command string with sensible defaults.
|
||||||
|
|
||||||
|
*cmd* is passed through :func:`collapse` first, so callers
|
||||||
|
can use triple-quoted f-strings freely.
|
||||||
|
Captures stdout/stderr by default; pass ``capture_output=False``
|
||||||
|
to stream output to the terminal instead.
|
||||||
|
"""
|
||||||
|
if "capture_output" not in kwargs and "stdout" not in kwargs:
|
||||||
|
kwargs["capture_output"] = True
|
||||||
|
return subprocess.run(collapse(cmd), shell=True, text=True, check=check, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def get_git_hash(root=None):
|
||||||
|
"""Return the local HEAD commit hash, or None."""
|
||||||
|
if root is None:
|
||||||
|
root = _project_root()
|
||||||
|
result = shell(
|
||||||
|
"git rev-parse HEAD",
|
||||||
|
cwd=str(root),
|
||||||
|
)
|
||||||
|
if result.returncode == 0:
|
||||||
|
return result.stdout.strip()
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
DIFF_EXCLUDES = (
|
||||||
|
":(exclude)cmdeploy/src/cmdeploy/tests",
|
||||||
|
":(exclude)chatmaild/src/chatmaild/tests",
|
||||||
|
)
|
||||||
|
"""Git pathspecs appended to ``git diff`` so that changes
|
||||||
|
limited to test files do not affect the deployed version string."""
|
||||||
|
|
||||||
|
|
||||||
|
def get_version_string(root=None):
|
||||||
|
"""Return ``git_hash\\ngit_diff`` for the local working tree.
|
||||||
|
|
||||||
|
Used by :class:`~cmdeploy.deployers.GithashDeployer` to write
|
||||||
|
``/etc/chatmail-version`` and by ``lxc-status`` to compare
|
||||||
|
the deployed state against the local checkout.
|
||||||
|
|
||||||
|
Changes inside directories listed in :data:`DIFF_EXCLUDES`
|
||||||
|
are ignored so that test-only edits do not trigger
|
||||||
|
a redeployment.
|
||||||
|
"""
|
||||||
|
if root is None:
|
||||||
|
root = _project_root()
|
||||||
|
git_hash = get_git_hash(root=root) or "unknown"
|
||||||
|
excludes = " ".join(f"'{e}'" for e in DIFF_EXCLUDES)
|
||||||
|
try:
|
||||||
|
git_diff = shell(
|
||||||
|
f"git diff -- . {excludes}",
|
||||||
|
cwd=str(root),
|
||||||
|
).stdout.strip()
|
||||||
|
except Exception:
|
||||||
|
git_diff = ""
|
||||||
|
if git_diff:
|
||||||
|
return f"{git_hash}\n{git_diff}"
|
||||||
|
return git_hash
|
||||||
|
|
||||||
|
|
||||||
|
def _chatmaild_default_dist_dir():
|
||||||
|
return _project_root() / "chatmaild" / "dist"
|
||||||
|
|
||||||
|
|
||||||
|
def build_chatmaild_sdist(dist_dir=None):
|
||||||
|
"""Build the chatmaild sdist if not already present (idempotent, process-safe)."""
|
||||||
|
|
||||||
|
if dist_dir is None:
|
||||||
|
dist_dir = _chatmaild_default_dist_dir()
|
||||||
|
dist_dir = Path(dist_dir).resolve()
|
||||||
|
dist_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
lockfile = dist_dir.parent / ".dist.lock"
|
||||||
|
with open(lockfile, "w") as fh:
|
||||||
|
fcntl.flock(fh, fcntl.LOCK_EX)
|
||||||
|
existing = [p for p in dist_dir.iterdir() if p.suffix == ".gz"]
|
||||||
|
if existing:
|
||||||
|
return existing[0]
|
||||||
|
subprocess.check_output(
|
||||||
|
[sys.executable, "-m", "build", "-n"]
|
||||||
|
+ ["--sdist", "chatmaild", "--outdir", str(dist_dir)],
|
||||||
|
cwd=str(_project_root()),
|
||||||
|
)
|
||||||
|
return get_chatmaild_sdist(dist_dir)
|
||||||
|
|
||||||
|
|
||||||
|
def get_chatmaild_sdist(dist_dir=None):
|
||||||
|
"""Return the path to the pre-built chatmaild sdist."""
|
||||||
|
if dist_dir is None:
|
||||||
|
dist_dir = _chatmaild_default_dist_dir()
|
||||||
|
|
||||||
|
entries = list(Path(dist_dir).iterdir())
|
||||||
|
if len(entries) == 0:
|
||||||
|
raise FileNotFoundError(f"dist directory is empty: {dist_dir}")
|
||||||
|
if len(entries) > 1:
|
||||||
|
raise ValueError(f"expected one file in {dist_dir}, found {len(entries)}")
|
||||||
|
return entries[0]
|
||||||
@@ -140,34 +140,34 @@ def main():
|
|||||||
config.webdev = True
|
config.webdev = True
|
||||||
assert config.mail_domain
|
assert config.mail_domain
|
||||||
|
|
||||||
# start web page generation, open a browser and wait for changes
|
|
||||||
www_path, src_path, build_dir = get_paths(config)
|
www_path, src_path, build_dir = get_paths(config)
|
||||||
build_dir = build_webpages(src_path, build_dir, config)
|
build_dir = build_webpages(src_path, build_dir, config)
|
||||||
index_path = build_dir.joinpath("index.html")
|
index_path = build_dir.joinpath("index.html")
|
||||||
webbrowser.open(str(index_path))
|
webbrowser.open(str(index_path))
|
||||||
stats = snapshot_dir_stats(src_path)
|
|
||||||
print(f"\nOpened URL: file://{index_path.resolve()}\n")
|
print(f"\nOpened URL: file://{index_path.resolve()}\n")
|
||||||
print(f"watching {src_path} directory for changes")
|
print(f"Watching {src_path} directory for changes...")
|
||||||
|
|
||||||
|
stats = snapshot_dir_stats(src_path)
|
||||||
changenum = 0
|
changenum = 0
|
||||||
count = 0
|
debounce_time = 0.5 # wait 0.5s after detecting a change
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
|
time.sleep(1)
|
||||||
newstats = snapshot_dir_stats(src_path)
|
newstats = snapshot_dir_stats(src_path)
|
||||||
if newstats == stats and count % 60 != 0:
|
|
||||||
count += 1
|
|
||||||
time.sleep(1.0)
|
|
||||||
continue
|
|
||||||
|
|
||||||
for key in newstats:
|
if newstats != stats:
|
||||||
if stats[key] != newstats[key]:
|
changed_files = [f for f in newstats if stats.get(f) != newstats[f]]
|
||||||
print(f"*** CHANGED: {key}")
|
for f in changed_files:
|
||||||
changenum += 1
|
print(f"*** CHANGED: {f}")
|
||||||
|
|
||||||
stats = newstats
|
stats = newstats
|
||||||
build_webpages(src_path, build_dir, config)
|
changenum += 1
|
||||||
print(f"[{changenum}] regenerated web pages at: {index_path}")
|
build_webpages(src_path, build_dir, config)
|
||||||
print(f"URL: file://{index_path.resolve()}\n\n")
|
print(f"[{changenum}] regenerated web pages at: {index_path}")
|
||||||
count = 0
|
print(f"URL: file://{index_path.resolve()}\n\n")
|
||||||
|
|
||||||
|
time.sleep(debounce_time) # simple debounce
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ You can use the `make` command and `make html` to build web pages.
|
|||||||
|
|
||||||
You need a Python environment where the following install was excuted:
|
You need a Python environment where the following install was excuted:
|
||||||
|
|
||||||
pip install sphinx-build furo sphinx-autobuild
|
pip install furo sphinx-autobuild
|
||||||
|
|
||||||
To develop/change documentation, you can then do:
|
To develop/change documentation, you can then do:
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user