mirror of
https://github.com/chatmail/relay.git
synced 2026-05-10 16:04:37 +00:00
Compare commits
52 Commits
j4n/hpk-lx
...
ipv4-only-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d9e61cb8fd | ||
|
|
a69cbfcd26 | ||
|
|
de77c17f9d | ||
|
|
27df0a407e | ||
|
|
3d5b7b3b2b | ||
|
|
f310e92be9 | ||
|
|
0a2c5368bf | ||
|
|
92d3efb2f1 | ||
|
|
69c5de4503 | ||
|
|
ef82a186d8 | ||
|
|
e0f2254234 | ||
|
|
18342bcb66 | ||
|
|
6864d5bd93 | ||
|
|
e35d6dff6b | ||
|
|
b05e26819f | ||
|
|
1db586b3eb | ||
|
|
44fe2dc08f | ||
|
|
8721600d13 | ||
|
|
dfed2b4681 | ||
|
|
f5fd286663 | ||
|
|
16b00da373 | ||
|
|
75606f5eb8 | ||
|
|
d256538f81 | ||
|
|
fdf8e5e345 | ||
|
|
81a161d433 | ||
|
|
454ac6248a | ||
|
|
85915652b3 | ||
|
|
1e8c56e08a | ||
|
|
a65f082817 | ||
|
|
6c18d37772 | ||
|
|
df4ff92133 | ||
|
|
825831ee81 | ||
|
|
0aa08b7413 | ||
|
|
14dfabf2ff | ||
|
|
0a77b3339b | ||
|
|
001d8c80fc | ||
|
|
1e376f7945 | ||
|
|
1ae92e0639 | ||
|
|
56386c231b | ||
|
|
2bdfecff72 | ||
|
|
cef739e3b3 | ||
|
|
3d128d3c64 | ||
|
|
79f68342f4 | ||
|
|
54863453c2 | ||
|
|
74326a8c54 | ||
|
|
59e5dea597 | ||
|
|
d7d89d66c1 | ||
|
|
00d723bd6e | ||
|
|
c257bfca4b | ||
|
|
82c9831369 | ||
|
|
b835318ce9 | ||
|
|
b4a46d23e6 |
35
.github/workflows/ci-no-dns.yaml
vendored
Normal file
35
.github/workflows/ci-no-dns.yaml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: No-DNS
|
||||
|
||||
on:
|
||||
# Triggers when a PR is merged into main or a direct push occurs
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
|
||||
# Triggers for any PR (and its subsequent commits) targeting the main branch
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
|
||||
# Newest push wins: Prevents multiple runs from clashing and wasting runner efforts
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
|
||||
jobs:
|
||||
no-dns:
|
||||
name: LXC deploy and test
|
||||
uses: chatmail/cmlxc/.github/workflows/lxc-test.yml@d39fe34c39cee6d760c3479325e8dc82b66a8928
|
||||
with:
|
||||
cmlxc_commands: |
|
||||
cmlxc init
|
||||
# single cmdeploy relay test
|
||||
cmlxc -v deploy-cmdeploy --source ./repo --ipv4-only --no-dns cm0
|
||||
cmlxc -v test-cmdeploy --no-dns cm0
|
||||
|
||||
# cross cmdeploy relay test
|
||||
cmlxc -v deploy-cmdeploy --source ./repo cm1
|
||||
cmlxc -v test-cmdeploy --no-dns cm0 cm1
|
||||
|
||||
# cross cmdeploy/madmail relay tests
|
||||
cmlxc -v deploy-madmail mad0
|
||||
cmlxc -v test-cmdeploy --no-dns cm0 mad0
|
||||
47
.github/workflows/ci.yaml
vendored
47
.github/workflows/ci.yaml
vendored
@@ -1,21 +1,35 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
# Triggers when a PR is merged into main or a direct push occurs
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
|
||||
# Triggers for any PR (and its subsequent commits) targeting the main branch
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
|
||||
permissions: {}
|
||||
|
||||
# Newest push wins: Prevents multiple runs from clashing and wasting runner efforts
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
|
||||
jobs:
|
||||
tox:
|
||||
name: isolated chatmaild tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
# Checkout pull request HEAD commit instead of merge commit
|
||||
# Otherwise `test_deployed_state` will be unhappy.
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
persist-credentials: false
|
||||
- name: download filtermail
|
||||
run: curl -L https://github.com/chatmail/filtermail/releases/download/v0.6.0/filtermail-x86_64 -o /usr/local/bin/filtermail && chmod +x /usr/local/bin/filtermail
|
||||
run: curl -L https://github.com/chatmail/filtermail/releases/download/v0.6.4/filtermail-x86_64 -o /usr/local/bin/filtermail && chmod +x /usr/local/bin/filtermail
|
||||
- name: run chatmaild tests
|
||||
working-directory: chatmaild
|
||||
run: pipx run tox
|
||||
@@ -24,7 +38,10 @@ jobs:
|
||||
name: deploy-chatmail tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
persist-credentials: false
|
||||
|
||||
- name: initenv
|
||||
run: scripts/initenv.sh
|
||||
@@ -38,5 +55,23 @@ jobs:
|
||||
- name: run deploy-chatmail offline tests
|
||||
run: pytest --pyargs cmdeploy
|
||||
|
||||
# all other cmdeploy commands require a staging server
|
||||
# see https://github.com/deltachat/chatmail/issues/100
|
||||
lxc-test:
|
||||
name: LXC deploy and test
|
||||
uses: chatmail/cmlxc/.github/workflows/lxc-test.yml@d39fe34c39cee6d760c3479325e8dc82b66a8928
|
||||
with:
|
||||
cmlxc_commands: |
|
||||
cmlxc init
|
||||
# single cmdeploy relay test
|
||||
cmlxc -v deploy-cmdeploy --source ./repo cm0
|
||||
cmlxc -v test-mini cm0
|
||||
cmlxc -v test-cmdeploy cm0
|
||||
|
||||
# cross cmdeploy relay test
|
||||
cmlxc -v deploy-cmdeploy --source ./repo --ipv4-only cm1
|
||||
cmlxc -v test-cmdeploy cm0 cm1
|
||||
|
||||
# cross cmdeploy/madmail relay tests
|
||||
cmlxc -v deploy-madmail mad0
|
||||
cmlxc -v test-cmdeploy cm0 mad0
|
||||
cmlxc -v test-mini cm0 mad0
|
||||
cmlxc -v test-mini mad0 cm0
|
||||
|
||||
37
.github/workflows/docker-dispatch.yaml
vendored
Normal file
37
.github/workflows/docker-dispatch.yaml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
# Notify the docker repo to build and test a new image after relay CI passes.
|
||||
#
|
||||
# Sends a repository_dispatch event to chatmail/docker with the relay ref
|
||||
# and short SHA, which triggers docker-ci.yaml to build, push to GHCR,
|
||||
# and run integration tests via cmlxc.
|
||||
|
||||
name: Trigger Docker build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
workflow_dispatch:
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
dispatch:
|
||||
name: Dispatch build to chatmail/docker
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'chatmail/relay'
|
||||
steps:
|
||||
- name: Compute short SHA
|
||||
id: sha
|
||||
run: echo "short=$(echo '${{ github.sha }}' | cut -c1-7)" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Send repository_dispatch
|
||||
uses: peter-evans/repository-dispatch@ff45666b9427631e3450c54a1bcbee4d9ff4d7c0 # v3
|
||||
with:
|
||||
token: ${{ secrets.CHATMAIL_DOCKER_DISPATCH_TOKEN }}
|
||||
repository: chatmail/docker
|
||||
event-type: relay-updated
|
||||
client-payload: >-
|
||||
{
|
||||
"relay_ref": "${{ github.ref_name }}",
|
||||
"relay_sha": "${{ github.sha }}",
|
||||
"relay_sha_short": "${{ steps.sha.outputs.short }}"
|
||||
}
|
||||
14
.github/workflows/docs-preview.yaml
vendored
14
.github/workflows/docs-preview.yaml
vendored
@@ -7,6 +7,8 @@ on:
|
||||
- 'scripts/build-docs.sh'
|
||||
- '.github/workflows/docs-preview.yaml'
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
scripts:
|
||||
name: build
|
||||
@@ -16,6 +18,8 @@ jobs:
|
||||
url: https://staging.chatmail.at/doc/relay/${{ steps.prepare.outputs.prid }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: initenv
|
||||
run: scripts/initenv.sh
|
||||
@@ -34,18 +38,22 @@ jobs:
|
||||
- name: Get Pullrequest ID
|
||||
id: prepare
|
||||
run: |
|
||||
export PULLREQUEST_ID=$(echo "${{ github.ref }}" | cut -d "/" -f3)
|
||||
export PULLREQUEST_ID=$(echo "${GITHUB_REF}" | cut -d "/" -f3)
|
||||
echo "prid=$PULLREQUEST_ID" >> $GITHUB_OUTPUT
|
||||
if [ $(expr length "${{ secrets.USERNAME }}") -gt "1" ]; then echo "uploadtoserver=true" >> $GITHUB_OUTPUT; fi
|
||||
- run: |
|
||||
echo "baseurl: /${{ steps.prepare.outputs.prid }}" >> _config.yml
|
||||
echo "baseurl: /${STEPS_PREPARE_OUTPUTS_PRID}" >> _config.yml
|
||||
env:
|
||||
STEPS_PREPARE_OUTPUTS_PRID: ${{ steps.prepare.outputs.prid }}
|
||||
|
||||
- name: Upload preview
|
||||
run: |
|
||||
mkdir -p "$HOME/.ssh"
|
||||
echo "${{ secrets.CHATMAIL_STAGING_SSHKEY }}" > "$HOME/.ssh/key"
|
||||
chmod 600 "$HOME/.ssh/key"
|
||||
rsync -rILvh -e "ssh -i $HOME/.ssh/key -o StrictHostKeyChecking=no" $GITHUB_WORKSPACE/doc/build/ "${{ secrets.USERNAME }}@chatmail.at:/var/www/html/staging.chatmail.at/doc/relay/${{ steps.prepare.outputs.prid }}/"
|
||||
rsync -rILvh -e "ssh -i $HOME/.ssh/key -o StrictHostKeyChecking=no" $GITHUB_WORKSPACE/doc/build/ "${{ secrets.USERNAME }}@chatmail.at:/var/www/html/staging.chatmail.at/doc/relay/${STEPS_PREPARE_OUTPUTS_PRID}/"
|
||||
env:
|
||||
STEPS_PREPARE_OUTPUTS_PRID: ${{ steps.prepare.outputs.prid }}
|
||||
|
||||
- name: check links
|
||||
working-directory: doc
|
||||
|
||||
4
.github/workflows/docs.yaml
vendored
4
.github/workflows/docs.yaml
vendored
@@ -10,6 +10,8 @@ on:
|
||||
- 'scripts/build-docs.sh'
|
||||
- '.github/workflows/docs.yaml'
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
scripts:
|
||||
name: build
|
||||
@@ -19,6 +21,8 @@ jobs:
|
||||
url: https://chatmail.at/doc/relay/
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: initenv
|
||||
run: scripts/initenv.sh
|
||||
|
||||
104
.github/workflows/test-and-deploy-ipv4only.yaml
vendored
104
.github/workflows/test-and-deploy-ipv4only.yaml
vendored
@@ -1,104 +0,0 @@
|
||||
name: deploy on staging-ipv4.testrun.org, and run tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- 'scripts/**'
|
||||
- '**/README.md'
|
||||
- 'CHANGELOG.md'
|
||||
- 'LICENSE'
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: deploy on staging-ipv4.testrun.org, and run tests
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
environment:
|
||||
name: staging-ipv4.testrun.org
|
||||
url: https://staging-ipv4.testrun.org/
|
||||
concurrency: staging-ipv4.testrun.org
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: prepare SSH
|
||||
run: |
|
||||
mkdir ~/.ssh
|
||||
echo "${{ secrets.STAGING_SSH_KEY }}" >> ~/.ssh/id_ed25519
|
||||
chmod 600 ~/.ssh/id_ed25519
|
||||
ssh-keyscan staging-ipv4.testrun.org > ~/.ssh/known_hosts
|
||||
# save previous acme & dkim state
|
||||
rsync -avz root@staging-ipv4.testrun.org:/var/lib/acme acme-ipv4 || true
|
||||
rsync -avz root@staging-ipv4.testrun.org:/etc/dkimkeys dkimkeys-ipv4 || true
|
||||
# store previous acme & dkim state on ns.testrun.org, if it contains useful certs
|
||||
if [ -f dkimkeys-ipv4/dkimkeys/opendkim.private ]; then rsync -avz -e "ssh -o StrictHostKeyChecking=accept-new" dkimkeys-ipv4 root@ns.testrun.org:/tmp/ || true; fi
|
||||
if [ "$(ls -A acme-ipv4/acme/certs)" ]; then rsync -avz -e "ssh -o StrictHostKeyChecking=accept-new" acme-ipv4 root@ns.testrun.org:/tmp/ || true; fi
|
||||
# make sure CAA record isn't set
|
||||
scp -o StrictHostKeyChecking=accept-new .github/workflows/staging-ipv4.testrun.org-default.zone root@ns.testrun.org:/etc/nsd/staging-ipv4.testrun.org.zone
|
||||
ssh root@ns.testrun.org sed -i '/CAA/d' /etc/nsd/staging-ipv4.testrun.org.zone
|
||||
ssh root@ns.testrun.org nsd-checkzone staging-ipv4.testrun.org /etc/nsd/staging-ipv4.testrun.org.zone
|
||||
ssh root@ns.testrun.org systemctl reload nsd
|
||||
|
||||
- name: rebuild staging-ipv4.testrun.org to have a clean VPS
|
||||
run: |
|
||||
curl -X POST \
|
||||
-H "Authorization: Bearer ${{ secrets.HETZNER_API_TOKEN }}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"image":"debian-12"}' \
|
||||
"https://api.hetzner.cloud/v1/servers/${{ secrets.STAGING_IPV4_SERVER_ID }}/actions/rebuild"
|
||||
|
||||
- run: scripts/initenv.sh
|
||||
|
||||
- name: append venv/bin to PATH
|
||||
run: echo venv/bin >>$GITHUB_PATH
|
||||
|
||||
- name: upload TLS cert after rebuilding
|
||||
run: |
|
||||
echo " --- wait until staging-ipv4.testrun.org VPS is rebuilt --- "
|
||||
rm ~/.ssh/known_hosts
|
||||
while ! ssh -o ConnectTimeout=180 -o StrictHostKeyChecking=accept-new -v root@staging-ipv4.testrun.org id -u ; do sleep 1 ; done
|
||||
ssh -o StrictHostKeyChecking=accept-new -v root@staging-ipv4.testrun.org id -u
|
||||
# download acme & dkim state from ns.testrun.org
|
||||
rsync -e "ssh -o StrictHostKeyChecking=accept-new" -avz root@ns.testrun.org:/tmp/acme-ipv4/acme acme-restore || true
|
||||
rsync -avz root@ns.testrun.org:/tmp/dkimkeys-ipv4/dkimkeys dkimkeys-restore || true
|
||||
# restore acme & dkim state to staging2.testrun.org
|
||||
rsync -avz acme-restore/acme root@staging-ipv4.testrun.org:/var/lib/ || true
|
||||
rsync -avz dkimkeys-restore/dkimkeys root@staging-ipv4.testrun.org:/etc/ || true
|
||||
ssh -o StrictHostKeyChecking=accept-new -v root@staging-ipv4.testrun.org chown root:root -R /var/lib/acme || true
|
||||
|
||||
- name: run deploy-chatmail offline tests
|
||||
run: pytest --pyargs cmdeploy
|
||||
|
||||
- name: setup dependencies
|
||||
run: |
|
||||
ssh root@staging-ipv4.testrun.org apt update
|
||||
ssh root@staging-ipv4.testrun.org apt install -y git python3.11-venv python3-dev gcc
|
||||
ssh root@staging-ipv4.testrun.org git clone https://github.com/chatmail/relay
|
||||
ssh root@staging-ipv4.testrun.org "cd relay && git checkout " ${{ github.head_ref }}
|
||||
ssh root@staging-ipv4.testrun.org "cd relay && scripts/initenv.sh"
|
||||
|
||||
- name: initialize config
|
||||
run: |
|
||||
ssh root@staging-ipv4.testrun.org "cd relay && scripts/cmdeploy init staging-ipv4.testrun.org"
|
||||
ssh root@staging-ipv4.testrun.org "sed -i 's#disable_ipv6 = False#disable_ipv6 = True#' relay/chatmail.ini"
|
||||
ssh root@staging-ipv4.testrun.org "sed -i 's/#\s*mtail_address/mtail_address/' relay/chatmail.ini"
|
||||
|
||||
- run: ssh root@staging-ipv4.testrun.org "cd relay && scripts/cmdeploy run --verbose --skip-dns-check --ssh-host localhost"
|
||||
|
||||
- name: set DNS entries
|
||||
run: |
|
||||
ssh root@staging-ipv4.testrun.org "cd relay && scripts/cmdeploy dns --zonefile staging-generated.zone --ssh-host localhost"
|
||||
ssh root@staging-ipv4.testrun.org cat relay/staging-generated.zone >> .github/workflows/staging-ipv4.testrun.org-default.zone
|
||||
cat .github/workflows/staging-ipv4.testrun.org-default.zone
|
||||
scp .github/workflows/staging-ipv4.testrun.org-default.zone root@ns.testrun.org:/etc/nsd/staging-ipv4.testrun.org.zone
|
||||
ssh root@ns.testrun.org nsd-checkzone staging-ipv4.testrun.org /etc/nsd/staging-ipv4.testrun.org.zone
|
||||
ssh root@ns.testrun.org systemctl reload nsd
|
||||
|
||||
- name: cmdeploy test
|
||||
run: ssh root@staging-ipv4.testrun.org "cd relay && CHATMAIL_DOMAIN2=ci-chatmail.testrun.org scripts/cmdeploy test --slow --ssh-host localhost"
|
||||
|
||||
- name: cmdeploy dns
|
||||
run: ssh root@staging-ipv4.testrun.org "cd relay && scripts/cmdeploy dns -v --ssh-host localhost"
|
||||
|
||||
97
.github/workflows/test-and-deploy.yaml
vendored
97
.github/workflows/test-and-deploy.yaml
vendored
@@ -1,97 +0,0 @@
|
||||
name: deploy on staging2.testrun.org, and run tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- 'scripts/**'
|
||||
- '**/README.md'
|
||||
- 'CHANGELOG.md'
|
||||
- 'LICENSE'
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: deploy on staging2.testrun.org, and run tests
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
environment:
|
||||
name: staging2.testrun.org
|
||||
url: https://staging2.testrun.org/
|
||||
concurrency: staging2.testrun.org
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: prepare SSH
|
||||
run: |
|
||||
mkdir ~/.ssh
|
||||
echo "${{ secrets.STAGING_SSH_KEY }}" >> ~/.ssh/id_ed25519
|
||||
chmod 600 ~/.ssh/id_ed25519
|
||||
ssh-keyscan staging2.testrun.org > ~/.ssh/known_hosts
|
||||
# save previous acme & dkim state
|
||||
rsync -avz root@staging2.testrun.org:/var/lib/acme . || true
|
||||
rsync -avz root@staging2.testrun.org:/etc/dkimkeys . || true
|
||||
# store previous acme & dkim state on ns.testrun.org, if it contains useful certs
|
||||
if [ -f dkimkeys/opendkim.private ]; then rsync -avz -e "ssh -o StrictHostKeyChecking=accept-new" dkimkeys root@ns.testrun.org:/tmp/ || true; fi
|
||||
if [ "$(ls -A acme/certs)" ]; then rsync -avz -e "ssh -o StrictHostKeyChecking=accept-new" acme root@ns.testrun.org:/tmp/ || true; fi
|
||||
# make sure CAA record isn't set
|
||||
scp -o StrictHostKeyChecking=accept-new .github/workflows/staging.testrun.org-default.zone root@ns.testrun.org:/etc/nsd/staging2.testrun.org.zone
|
||||
ssh root@ns.testrun.org sed -i '/CAA/d' /etc/nsd/staging2.testrun.org.zone
|
||||
ssh root@ns.testrun.org nsd-checkzone staging2.testrun.org /etc/nsd/staging2.testrun.org.zone
|
||||
ssh root@ns.testrun.org systemctl reload nsd
|
||||
|
||||
- name: rebuild staging2.testrun.org to have a clean VPS
|
||||
run: |
|
||||
curl -X POST \
|
||||
-H "Authorization: Bearer ${{ secrets.HETZNER_API_TOKEN }}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"image":"debian-12"}' \
|
||||
"https://api.hetzner.cloud/v1/servers/${{ secrets.STAGING_SERVER_ID }}/actions/rebuild"
|
||||
|
||||
- run: scripts/initenv.sh
|
||||
|
||||
- name: append venv/bin to PATH
|
||||
run: echo venv/bin >>$GITHUB_PATH
|
||||
|
||||
- name: upload TLS cert after rebuilding
|
||||
run: |
|
||||
echo " --- wait until staging2.testrun.org VPS is rebuilt --- "
|
||||
rm ~/.ssh/known_hosts
|
||||
while ! ssh -o ConnectTimeout=180 -o StrictHostKeyChecking=accept-new -v root@staging2.testrun.org id -u ; do sleep 1 ; done
|
||||
ssh -o StrictHostKeyChecking=accept-new -v root@staging2.testrun.org id -u
|
||||
# download acme & dkim state from ns.testrun.org
|
||||
rsync -e "ssh -o StrictHostKeyChecking=accept-new" -avz root@ns.testrun.org:/tmp/acme acme-restore || true
|
||||
rsync -avz root@ns.testrun.org:/tmp/dkimkeys dkimkeys-restore || true
|
||||
# restore acme & dkim state to staging2.testrun.org
|
||||
rsync -avz acme-restore/acme root@staging2.testrun.org:/var/lib/ || true
|
||||
rsync -avz dkimkeys-restore/dkimkeys root@staging2.testrun.org:/etc/ || true
|
||||
ssh -o StrictHostKeyChecking=accept-new -v root@staging2.testrun.org chown root:root -R /var/lib/acme || true
|
||||
|
||||
- name: add hpk42 key to staging server
|
||||
run: ssh root@staging2.testrun.org 'curl -s https://github.com/hpk42.keys >> .ssh/authorized_keys'
|
||||
|
||||
- name: run deploy-chatmail offline tests
|
||||
run: pytest --pyargs cmdeploy
|
||||
|
||||
- run: |
|
||||
cmdeploy init staging2.testrun.org
|
||||
sed -i 's/#\s*mtail_address/mtail_address/' chatmail.ini
|
||||
|
||||
- run: cmdeploy run --verbose --skip-dns-check
|
||||
|
||||
- name: set DNS entries
|
||||
run: |
|
||||
cmdeploy dns --zonefile staging-generated.zone --verbose
|
||||
cat staging-generated.zone >> .github/workflows/staging.testrun.org-default.zone
|
||||
cat .github/workflows/staging.testrun.org-default.zone
|
||||
scp .github/workflows/staging.testrun.org-default.zone root@ns.testrun.org:/etc/nsd/staging2.testrun.org.zone
|
||||
ssh root@ns.testrun.org nsd-checkzone staging2.testrun.org /etc/nsd/staging2.testrun.org.zone
|
||||
ssh root@ns.testrun.org systemctl reload nsd
|
||||
|
||||
- name: cmdeploy test
|
||||
run: CHATMAIL_DOMAIN2=ci-chatmail.testrun.org cmdeploy test --slow
|
||||
|
||||
- name: cmdeploy dns
|
||||
run: cmdeploy dns -v
|
||||
|
||||
26
.github/workflows/zizmor-scan.yml
vendored
Normal file
26
.github/workflows/zizmor-scan.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
name: GitHub Actions Security Analysis with zizmor
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["main"]
|
||||
pull_request:
|
||||
branches: ["**"]
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
zizmor:
|
||||
name: Run zizmor
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
security-events: write # Required for upload-sarif (used by zizmor-action) to upload SARIF files.
|
||||
contents: read
|
||||
actions: read
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Run zizmor
|
||||
uses: zizmorcore/zizmor-action@b1d7e1fb5de872772f31590499237e7cce841e8e # v0.5.3
|
||||
7
.github/zizmor.yml
vendored
Normal file
7
.github/zizmor.yml
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
rules:
|
||||
unpinned-uses:
|
||||
config:
|
||||
policies:
|
||||
actions/*: ref-pin
|
||||
dependabot/*: ref-pin
|
||||
chatmail/*: ref-pin
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -5,7 +5,6 @@ __pycache__/
|
||||
*.swp
|
||||
*qr-*.png
|
||||
chatmail*.ini
|
||||
lxconfigs/
|
||||
|
||||
|
||||
# C extensions
|
||||
|
||||
84
CHANGELOG.md
84
CHANGELOG.md
@@ -1,5 +1,89 @@
|
||||
# Changelog for chatmail deployment
|
||||
|
||||
## 1.10.0 2026-04-30
|
||||
|
||||
* start mtail after networking is fully up <https://github.com/chatmail/relay/pull/942>
|
||||
* support specifying custom filtermail binary through environment variable <https://github.com/chatmail/relay/pull/941>
|
||||
* add automated zizmor scanning of github workflows <https://github.com/chatmail/relay/pull/938>
|
||||
* added dispatch for *automated builds of chatmail relay docker images* <https://github.com/chatmail/relay/pull/934>
|
||||
* do not bind SMTP client sockets to public addresses <https://github.com/chatmail/relay/pull/932>
|
||||
* underline in docs that scripts/initenv.sh should be used for building the docs <https://github.com/chatmail/relay/pull/933>
|
||||
* automatic oldest-first message removal from mailboxes to always stay under max_mailbox_size <https://github.com/chatmail/relay/pull/929>
|
||||
* remove --slow from cmdeploy test <https://github.com/chatmail/relay/pull/931>
|
||||
* handle missing inotify sysctl keys in containers <https://github.com/chatmail/relay/pull/930>
|
||||
* replace resolvconf with static resolv.conf <https://github.com/chatmail/relay/pull/928>
|
||||
* disable fsync for LMTP and IMAP services <https://github.com/chatmail/relay/pull/925>
|
||||
* re-use cmlxc workflow, replacing CI with hetzner staging servers with local lxc containers <https://github.com/chatmail/relay/pull/917>
|
||||
* explicitly install resolvconf <https://github.com/chatmail/relay/pull/924>
|
||||
* detect stale dovecot binary and force restart in activate() <https://github.com/chatmail/relay/pull/922>
|
||||
* Rename filtermail_http_port to filtermail_http_port_incoming <https://github.com/chatmail/relay/pull/921>
|
||||
* consolidated is_in_container() check https://github.com/chatmail/relay/pull/920>
|
||||
* restart dovecot after package replacement (rebase, test condense) <https://github.com/chatmail/relay/pull/913>
|
||||
* Set permissions on dovecot pin prefs <https://github.com/chatmail/relay/pull/915>
|
||||
* Route `/mxdeliv/` to configurable port <https://github.com/chatmail/relay/pull/901>
|
||||
* fix VM detection, automated testing fixes, use newer chatmail-turn and move to standard BIND DNS zone format <https://github.com/chatmail/relay/pull/912>
|
||||
* Upgrade to filtermail 0.6.1 <https://github.com/chatmail/relay/pull/910>
|
||||
* pin dovecot packages to prevent apt upgrades <https://github.com/chatmail/relay/pull/908>
|
||||
* add rpc server to cmdeploy along with client <https://github.com/chatmail/relay/pull/906>
|
||||
* remove unused deps from chatmaild <https://github.com/chatmail/relay/pull/905>
|
||||
* set default smtp_tls_security_level to "verify" unconditionally <https://github.com/chatmail/relay/pull/902>
|
||||
* featprefer IPv4 in SMTP client <https://github.com/chatmail/relay/pull/900>
|
||||
* Install dovecot .deb packages atomically <https://github.com/chatmail/relay/pull/899>
|
||||
* stop installing cron package <https://github.com/chatmail/relay/pull/898>
|
||||
* Rewrite dovecot install logic, update <https://github.com/chatmail/relay/pull/862>
|
||||
* fix a test and some linting fixes <https://github.com/chatmail/relay/pull/897>
|
||||
* Disable IP verification on domain-literal addresses <https://github.com/chatmail/relay/pull/895>
|
||||
* disable installing recommended packages globally on the relay <https://github.com/chatmail/relay/pull/887>
|
||||
* multiple bug fixes across chatmaild and cmdeploy <https://github.com/chatmail/relay/pull/883>
|
||||
* remove /metrics from the website <https://github.com/chatmail/relay/pull/703>
|
||||
* add Prometheus textfile output to fsreport <https://github.com/chatmail/relay/pull/881>
|
||||
* chown opendkim: private key <https://github.com/chatmail/relay/pull/879>
|
||||
* make sure chatmail-metadata was started <https://github.com/chatmail/relay/pull/882>
|
||||
* dovecot update url <https://github.com/chatmail/relay/pull/880>
|
||||
* upgrade to filtermail v0.5.2 <https://github.com/chatmail/relay/pull/876>
|
||||
* download dovecot packages from github release <https://github.com/chatmail/relay/pull/875>
|
||||
* replace DKIM verification with filtermail v0.5 <https://github.com/chatmail/relay/pull/831>
|
||||
* remove CFFI deltachat bindings usage, and consolidate test support with rpc-bindings <https://github.com/chatmail/relay/pull/872>
|
||||
* prepare chatmaild/cmdeploy changes for Docker support <https://github.com/chatmail/relay/pull/857>
|
||||
* stabilize online benchmark timing adding rate-limit-aware cooldown between iterations <https://github.com/chatmail/relay/pull/867>
|
||||
* move rate-limit cooldown to benchmark fixture <https://github.com/chatmail/relay/pull/868>
|
||||
* reconfigure acmetool from redirector to proxy mode <https://github.com/chatmail/relay/pull/861>
|
||||
* make tests work with `--ssh-host localhost` <https://github.com/chatmail/relay/pull/856>
|
||||
* mark f-string with f prefix in test_expunged <https://github.com/chatmail/relay/pull/863>
|
||||
* install also if dovecot.service=False in SystemdEnabled Fact <https://github.com/chatmail/relay/pull/841>
|
||||
* Introduce support for self-signed chatmail relays <https://github.com/chatmail/relay/pull/855>
|
||||
* Strip Received headers before delivery <https://github.com/chatmail/relay/pull/849>
|
||||
* upgrade to filtermail v0.3 <https://github.com/chatmail/relay/pull/850>
|
||||
* fix link to Maddy and update madmail URL <https://github.com/chatmail/relay/pull/847>
|
||||
* accept self-signed certificates for IP-only relays <https://github.com/chatmail/relay/pull/846>
|
||||
* enforce sending from public IP addresses <https://github.com/chatmail/relay/pull/845>
|
||||
* port check: check addresses, fix single services <https://github.com/chatmail/relay/pull/844>
|
||||
* remediates issue with improper concat on resolver injection <https://github.com/chatmail/relay/pull/834>
|
||||
* ipv6 boolean not being respected during operations <https://github.com/chatmail/relay/pull/832>
|
||||
* upgrade to filtermail v0.2 by <https://github.com/chatmail/relay/pull/825>
|
||||
* fix link to filtermail <https://github.com/chatmail/relay/pull/824>
|
||||
* print timestamps when sending messages <https://github.com/chatmail/relay/pull/823>
|
||||
* fix flaky test_exceed_rate_limit <https://github.com/chatmail/relay/pull/822>
|
||||
* Replace filtermail with rust reimplementation <https://github.com/chatmail/relay/pull/808>
|
||||
* Set default internal SMTP ports in Config <https://github.com/chatmail/relay/pull/819>
|
||||
* separate metrics for incoming and outgoing messages <https://github.com/chatmail/relay/pull/820>
|
||||
* disable appending the Received header <https://github.com/chatmail/relay/pull/815>
|
||||
* fail on errors in postfix/dovecot config <https://github.com/chatmail/relay/pull/813>
|
||||
* tweak idle/hibernate metrics some more <https://github.com/chatmail/relay/pull/811>
|
||||
* add config flag to export statistics <https://github.com/chatmail/relay/pull/806>
|
||||
* add --website-only option to run subcommand <https://github.com/chatmail/relay/pull/768>
|
||||
* Strip DKIM-Signature header before LMTP <https://github.com/chatmail/relay/pull/803>
|
||||
* properly make sure that postfix gets restarted on failure <https://github.com/chatmail/relay/pull/802>
|
||||
* expire.py: use absolute path to maildirsize <https://github.com/chatmail/relay/pull/807>
|
||||
* pin Dovecot documentation URLs to version 2.3 <https://github.com/chatmail/relay/pull/800>
|
||||
* try to use "build machine" and "deployment server" consistently <https://github.com/chatmail/relay/pull/797>
|
||||
* adds instructions for migrating control machines <https://github.com/chatmail/relay/pull/795>
|
||||
* use consistent naming schema in getting started <https://github.com/chatmail/relay/pull/793>
|
||||
* remove jsok/serialize-workflow-action dependency <https://github.com/chatmail/relay/pull/790>
|
||||
* streamline migration guide wording, provide titled steps <https://github.com/chatmail/relay/pull/789>
|
||||
* increases default max mailbox size <https://github.com/chatmail/relay/pull/792>
|
||||
* use daemon_name for OpenDKIM sign-verify decision instead of IP <https://github.com/chatmail/relay/pull/784>
|
||||
|
||||
## 1.9.0 2025-12-18
|
||||
|
||||
### Documentation
|
||||
|
||||
@@ -10,6 +10,7 @@ dependencies = [
|
||||
"filelock",
|
||||
"requests",
|
||||
"crypt-r >= 3.13.1 ; python_version >= '3.11'",
|
||||
"domain-validator",
|
||||
]
|
||||
|
||||
[tool.setuptools]
|
||||
@@ -21,7 +22,8 @@ where = ['src']
|
||||
[project.scripts]
|
||||
doveauth = "chatmaild.doveauth:main"
|
||||
chatmail-metadata = "chatmaild.metadata:main"
|
||||
chatmail-expire = "chatmaild.expire:main"
|
||||
chatmail-expire = "chatmaild.expire:daily_expire_main"
|
||||
chatmail-quota-expire = "chatmaild.expire:quota_expire_main"
|
||||
chatmail-fsreport = "chatmaild.fsreport:main"
|
||||
lastlogin = "chatmaild.lastlogin:main"
|
||||
turnserver = "chatmaild.turnserver:main"
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import os
|
||||
import ipaddress
|
||||
from pathlib import Path
|
||||
|
||||
import iniconfig
|
||||
from domain_validator import DomainValidator
|
||||
|
||||
from chatmaild.user import User
|
||||
|
||||
@@ -20,7 +21,19 @@ def read_config(inipath):
|
||||
class Config:
|
||||
def __init__(self, inipath, params):
|
||||
self._inipath = inipath
|
||||
self.mail_domain = params["mail_domain"]
|
||||
raw_domain = params["mail_domain"]
|
||||
self.mail_domain_bare = raw_domain
|
||||
|
||||
if is_valid_ipv4(raw_domain):
|
||||
self.ipv4_relay = raw_domain
|
||||
self.mail_domain = f"[{raw_domain}]"
|
||||
self.postfix_myhostname = ipaddress.IPv4Address(raw_domain).reverse_pointer
|
||||
else:
|
||||
DomainValidator().validate_domain_re(raw_domain)
|
||||
self.ipv4_relay = None
|
||||
self.mail_domain = raw_domain
|
||||
self.postfix_myhostname = raw_domain
|
||||
|
||||
self.max_user_send_per_minute = int(params.get("max_user_send_per_minute", 60))
|
||||
self.max_user_send_burst_size = int(params.get("max_user_send_burst_size", 10))
|
||||
self.max_mailbox_size = params["max_mailbox_size"]
|
||||
@@ -38,19 +51,23 @@ class Config:
|
||||
self.filtermail_smtp_port_incoming = int(
|
||||
params.get("filtermail_smtp_port_incoming", "10081")
|
||||
)
|
||||
self.filtermail_http_port_incoming = int(
|
||||
params.get("filtermail_http_port_incoming", "10082")
|
||||
)
|
||||
self.filtermail_lmtp_port_transport = int(
|
||||
params.get("filtermail_lmtp_port_transport", "10083")
|
||||
)
|
||||
self.postfix_reinject_port = int(params.get("postfix_reinject_port", "10025"))
|
||||
self.postfix_reinject_port_incoming = int(
|
||||
params.get("postfix_reinject_port_incoming", "10026")
|
||||
)
|
||||
self.mtail_address = params.get("mtail_address")
|
||||
self.disable_ipv6 = params.get("disable_ipv6", "false").lower() == "true"
|
||||
self.addr_v4 = os.environ.get("CHATMAIL_ADDR_V4", "")
|
||||
self.addr_v6 = os.environ.get("CHATMAIL_ADDR_V6", "")
|
||||
self.acme_email = params.get("acme_email", "")
|
||||
self.imap_rawlog = params.get("imap_rawlog", "false").lower() == "true"
|
||||
self.imap_compress = params.get("imap_compress", "false").lower() == "true"
|
||||
if "iroh_relay" not in params:
|
||||
self.iroh_relay = "https://" + params["mail_domain"]
|
||||
self.iroh_relay = "https://" + raw_domain
|
||||
self.enable_iroh_relay = True
|
||||
else:
|
||||
self.iroh_relay = params["iroh_relay"].strip()
|
||||
@@ -76,22 +93,27 @@ class Config:
|
||||
)
|
||||
self.tls_cert_mode = "external"
|
||||
self.tls_cert_path, self.tls_key_path = parts
|
||||
elif self.mail_domain.startswith("_"):
|
||||
elif raw_domain.startswith("_") or self.ipv4_relay:
|
||||
self.tls_cert_mode = "self"
|
||||
self.tls_cert_path = "/etc/ssl/certs/mailserver.pem"
|
||||
self.tls_key_path = "/etc/ssl/private/mailserver.key"
|
||||
else:
|
||||
self.tls_cert_mode = "acme"
|
||||
self.tls_cert_path = f"/var/lib/acme/live/{self.mail_domain}/fullchain"
|
||||
self.tls_key_path = f"/var/lib/acme/live/{self.mail_domain}/privkey"
|
||||
self.tls_cert_path = f"/var/lib/acme/live/{raw_domain}/fullchain"
|
||||
self.tls_key_path = f"/var/lib/acme/live/{raw_domain}/privkey"
|
||||
|
||||
# deprecated option
|
||||
mbdir = params.get("mailboxes_dir", f"/home/vmail/mail/{self.mail_domain}")
|
||||
mbdir = params.get("mailboxes_dir", f"/home/vmail/mail/{raw_domain}")
|
||||
self.mailboxes_dir = Path(mbdir.strip())
|
||||
|
||||
# old unused option (except for first migration from sqlite to maildir store)
|
||||
self.passdb_path = Path(params.get("passdb_path", "/home/vmail/passdb.sqlite"))
|
||||
|
||||
@property
|
||||
def max_mailbox_size_mb(self):
|
||||
"""Return max_mailbox_size as an integer in megabytes."""
|
||||
return parse_size_mb(self.max_mailbox_size)
|
||||
|
||||
def _getbytefile(self):
|
||||
return open(self._inipath, "rb")
|
||||
|
||||
@@ -105,6 +127,16 @@ class Config:
|
||||
return User(maildir, addr, password_path, uid="vmail", gid="vmail")
|
||||
|
||||
|
||||
def parse_size_mb(limit):
|
||||
"""Parse a size string like ``500M`` or ``2G`` and return megabytes."""
|
||||
value = limit.strip().upper().removesuffix("B")
|
||||
if value.endswith("G"):
|
||||
return int(value[:-1]) * 1024
|
||||
if value.endswith("M"):
|
||||
return int(value[:-1])
|
||||
return int(value)
|
||||
|
||||
|
||||
def write_initial_config(inipath, mail_domain, overrides):
|
||||
"""Write out default config file, using the specified config value overrides."""
|
||||
content = get_default_config_content(mail_domain, **overrides)
|
||||
@@ -157,3 +189,27 @@ def get_default_config_content(mail_domain, **overrides):
|
||||
lines.append(line)
|
||||
content = "\n".join(lines)
|
||||
return content
|
||||
|
||||
|
||||
def is_valid_ipv4(address: str) -> bool:
|
||||
"""Check if a mail_domain is an IPv4 address."""
|
||||
try:
|
||||
ipaddress.IPv4Address(address)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
|
||||
def format_arpa_address(address: str) -> str:
|
||||
if is_valid_ipv4(address):
|
||||
return ipaddress.IPv4Address(address).reverse_pointer
|
||||
DomainValidator().validate_domain_re(address)
|
||||
return address
|
||||
|
||||
|
||||
def format_mail_domain(raw_domain: str) -> str:
|
||||
if is_valid_ipv4(raw_domain):
|
||||
return f"[{raw_domain}]"
|
||||
DomainValidator().validate_domain_re(raw_domain)
|
||||
return raw_domain
|
||||
|
||||
@@ -4,17 +4,26 @@ Expire old messages and addresses.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
from argparse import ArgumentParser
|
||||
from collections import namedtuple
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from stat import S_ISREG
|
||||
|
||||
from chatmaild.config import read_config
|
||||
|
||||
FileEntry = namedtuple("FileEntry", ("path", "mtime", "size"))
|
||||
QuotaFileEntry = namedtuple("QuotaFileEntry", ("mtime", "quota_size", "path"))
|
||||
|
||||
# Quota cleanup factor of max_mailbox_size. The mailbox is reset to this size.
|
||||
QUOTA_CLEANUP_FACTOR = 0.7
|
||||
|
||||
# e.g. "cur/1775324677.M448978P3029757.exam,S=3235,W=3305:2,S"
|
||||
_dovecot_fn_rex = re.compile(r".+/(\d+)\..+,S=(\d+)")
|
||||
|
||||
|
||||
def iter_mailboxes(basedir, maxnum):
|
||||
@@ -74,6 +83,42 @@ class MailboxStat:
|
||||
self.extrafiles.sort(key=lambda x: -x.size)
|
||||
|
||||
|
||||
def parse_dovecot_filename(relpath):
|
||||
m = _dovecot_fn_rex.match(relpath)
|
||||
if not m:
|
||||
return None
|
||||
return QuotaFileEntry(int(m.group(1)), int(m.group(2)), relpath)
|
||||
|
||||
|
||||
def scan_mailbox_messages(mbox):
|
||||
messages = []
|
||||
for sub in ("cur", "new"):
|
||||
for name in os_listdir_if_exists(mbox / sub):
|
||||
if entry := parse_dovecot_filename(f"{sub}/{name}"):
|
||||
messages.append(entry)
|
||||
return messages
|
||||
|
||||
|
||||
def expire_to_target(mbox, target_bytes):
|
||||
messages = scan_mailbox_messages(mbox)
|
||||
total_size = sum(m.quota_size for m in messages)
|
||||
# Keep recent 24 hours of messages protected from expiry because
|
||||
# likely something is wrong with interactions on that address
|
||||
# and quota-full signal can help the address owner's device to notice it
|
||||
undeletable_messages_cutoff = time.time() - (3600 * 24)
|
||||
removed = 0
|
||||
for entry in sorted(messages):
|
||||
if total_size <= target_bytes:
|
||||
break
|
||||
if entry.mtime > undeletable_messages_cutoff:
|
||||
break
|
||||
(mbox / entry.path).unlink(missing_ok=True)
|
||||
total_size -= entry.quota_size
|
||||
removed += 1
|
||||
|
||||
return removed
|
||||
|
||||
|
||||
def print_info(msg):
|
||||
print(msg, file=sys.stderr)
|
||||
|
||||
@@ -143,6 +188,19 @@ class Expiry:
|
||||
else:
|
||||
continue
|
||||
changed = True
|
||||
|
||||
target_bytes = (
|
||||
self.config.max_mailbox_size_mb * 1024 * 1024 * QUOTA_CLEANUP_FACTOR
|
||||
)
|
||||
removed = expire_to_target(Path(mbox.basedir), target_bytes)
|
||||
if removed:
|
||||
changed = True
|
||||
self.del_files += removed
|
||||
if self.verbose:
|
||||
print_info(
|
||||
f"quota-expire: removed {removed} message(s) from {mboxname}"
|
||||
)
|
||||
|
||||
if changed:
|
||||
self.remove_file(f"{mbox.basedir}/maildirsize")
|
||||
|
||||
@@ -154,9 +212,9 @@ class Expiry:
|
||||
)
|
||||
|
||||
|
||||
def main(args=None):
|
||||
def daily_expire_main(args=None):
|
||||
"""Expire mailboxes and messages according to chatmail config"""
|
||||
parser = ArgumentParser(description=main.__doc__)
|
||||
parser = ArgumentParser(description=daily_expire_main.__doc__)
|
||||
ini = "/usr/local/lib/chatmaild/chatmail.ini"
|
||||
parser.add_argument(
|
||||
"chatmail_ini",
|
||||
@@ -202,5 +260,33 @@ def main(args=None):
|
||||
print(exp.get_summary())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(sys.argv[1:])
|
||||
def quota_expire_main(args=None):
|
||||
"""Remove mailbox messages to stay within a megabyte target.
|
||||
|
||||
This entry point is called by dovecot when a quota threshold is passed.
|
||||
"""
|
||||
|
||||
parser = ArgumentParser(description=quota_expire_main.__doc__)
|
||||
parser.add_argument(
|
||||
"target_mb",
|
||||
type=int,
|
||||
help="target mailbox size in megabytes",
|
||||
)
|
||||
parser.add_argument(
|
||||
"mailbox_path",
|
||||
type=Path,
|
||||
help="path to a user mailbox",
|
||||
)
|
||||
args = parser.parse_args(args)
|
||||
|
||||
target_bytes = args.target_mb * 1024 * 1024
|
||||
|
||||
removed_count = expire_to_target(args.mailbox_path, target_bytes)
|
||||
if removed_count:
|
||||
(args.mailbox_path / "maildirsize").unlink(missing_ok=True)
|
||||
print(
|
||||
f"quota-expire: removed {removed_count} message(s)"
|
||||
f" from {args.mailbox_path.name}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
return 0
|
||||
|
||||
@@ -18,6 +18,7 @@ max_user_send_per_minute = 60
|
||||
max_user_send_burst_size = 10
|
||||
|
||||
# maximum mailbox size of a chatmail address
|
||||
# Oldest messages will be removed automatically, so mailboxes never run full.
|
||||
max_mailbox_size = 500M
|
||||
|
||||
# maximum message size for an e-mail in bytes
|
||||
|
||||
@@ -25,13 +25,19 @@ def create_newemail_dict(config: Config):
|
||||
return dict(email=f"{user}@{config.mail_domain}", password=f"{password}")
|
||||
|
||||
|
||||
def create_dclogin_url(email, password):
|
||||
def create_dclogin_url(config, email, password):
|
||||
"""Build a dclogin: URL with credentials and self-signed cert acceptance.
|
||||
|
||||
Uses ic=3 (AcceptInvalidCertificates) so chatmail clients
|
||||
can connect to servers with self-signed TLS certificates.
|
||||
"""
|
||||
return f"dclogin:{quote(email, safe='@')}?p={quote(password, safe='')}&v=1&ic=3"
|
||||
if config.ipv4_relay:
|
||||
imap_host = "&ih=" + config.ipv4_relay
|
||||
smtp_host = "&sh=" + config.ipv4_relay
|
||||
else:
|
||||
imap_host = ""
|
||||
smtp_host = ""
|
||||
return f"dclogin:{quote(email, safe='@[]')}?p={quote(password, safe='')}&v=1{imap_host}{smtp_host}&ic=3"
|
||||
|
||||
|
||||
def print_new_account():
|
||||
@@ -40,7 +46,9 @@ def print_new_account():
|
||||
|
||||
result = dict(email=creds["email"], password=creds["password"])
|
||||
if config.tls_cert_mode == "self":
|
||||
result["dclogin_url"] = create_dclogin_url(creds["email"], creds["password"])
|
||||
result["dclogin_url"] = create_dclogin_url(
|
||||
config, creds["email"], creds["password"]
|
||||
)
|
||||
|
||||
print("Content-Type: application/json")
|
||||
print("")
|
||||
|
||||
@@ -31,6 +31,11 @@ def example_config(make_config):
|
||||
return make_config("chat.example.org")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def ipv4_config(make_config):
|
||||
return make_config("1.3.3.7")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def maildomain(example_config):
|
||||
return example_config.mail_domain
|
||||
@@ -85,13 +90,13 @@ def mockout():
|
||||
captured_green = []
|
||||
captured_plain = []
|
||||
|
||||
def red(self, msg, **kw):
|
||||
def red(self, msg):
|
||||
self.captured_red.append(msg)
|
||||
|
||||
def green(self, msg, **kw):
|
||||
def green(self, msg):
|
||||
self.captured_green.append(msg)
|
||||
|
||||
def print(self, msg="", **kw):
|
||||
def __call__(self, msg):
|
||||
self.captured_plain.append(msg)
|
||||
|
||||
return MockOut()
|
||||
|
||||
@@ -1,6 +1,14 @@
|
||||
from contextlib import nullcontext as does_not_raise
|
||||
|
||||
import pytest
|
||||
|
||||
from chatmaild.config import read_config
|
||||
from chatmaild.config import (
|
||||
format_arpa_address,
|
||||
format_mail_domain,
|
||||
is_valid_ipv4,
|
||||
parse_size_mb,
|
||||
read_config,
|
||||
)
|
||||
|
||||
|
||||
def test_read_config_basic(example_config):
|
||||
@@ -13,6 +21,12 @@ def test_read_config_basic(example_config):
|
||||
example_config = read_config(inipath)
|
||||
assert example_config.max_user_send_per_minute == 37
|
||||
assert example_config.mail_domain == "chat.example.org"
|
||||
assert example_config.ipv4_relay is None
|
||||
|
||||
|
||||
def test_read_config_ipv4(ipv4_config):
|
||||
assert ipv4_config.ipv4_relay == "1.3.3.7"
|
||||
assert ipv4_config.mail_domain == "[1.3.3.7]"
|
||||
|
||||
|
||||
def test_read_config_basic_using_defaults(tmp_path, maildomain):
|
||||
@@ -121,3 +135,59 @@ def test_config_tls_external_bad_format(make_config):
|
||||
"tls_external_cert_and_key": "/only/one/path.pem",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def test_parse_size_mb():
|
||||
assert parse_size_mb("500M") == 500
|
||||
assert parse_size_mb("2G") == 2048
|
||||
assert parse_size_mb(" 1g ") == 1024
|
||||
assert parse_size_mb("100MB") == 100
|
||||
assert parse_size_mb("256") == 256
|
||||
|
||||
|
||||
def test_max_mailbox_size_mb(make_config):
|
||||
config = make_config("chat.example.org")
|
||||
assert config.max_mailbox_size == "500M"
|
||||
assert config.max_mailbox_size_mb == 500
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
["input", "result"],
|
||||
[
|
||||
("example.org", False),
|
||||
("1.3.3.7", True),
|
||||
("fe::1", False),
|
||||
("ad.1e.dag.adf", False),
|
||||
("12394142", False),
|
||||
],
|
||||
)
|
||||
def test_is_valid_ipv4(input, result):
|
||||
assert result == is_valid_ipv4(input)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
["input", "result", "exception"],
|
||||
[
|
||||
("example.org", "example.org", does_not_raise()),
|
||||
("1.3.3.7", "7.3.3.1.in-addr.arpa", does_not_raise()),
|
||||
("fe::1", None, pytest.raises(ValueError)),
|
||||
("12394142", None, pytest.raises(ValueError)),
|
||||
],
|
||||
)
|
||||
def test_format_arpa_address(input, result, exception):
|
||||
with exception:
|
||||
assert result == format_arpa_address(input)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
["input", "result", "exception"],
|
||||
[
|
||||
("example.org", "example.org", does_not_raise()),
|
||||
("1.3.3.7", "[1.3.3.7]", does_not_raise()),
|
||||
("fe::1", None, pytest.raises(ValueError)),
|
||||
("12394142", None, pytest.raises(ValueError)),
|
||||
],
|
||||
)
|
||||
def test_format_mail_domain(input, result, exception):
|
||||
with exception:
|
||||
assert result == format_mail_domain(input)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import time
|
||||
|
||||
from chatmaild.doveauth import AuthDictProxy
|
||||
from chatmaild.expire import main as main_expire
|
||||
from chatmaild.expire import daily_expire_main as main_expire
|
||||
|
||||
|
||||
def test_login_timestamps(example_config):
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
import itertools
|
||||
import os
|
||||
import random
|
||||
import time
|
||||
from datetime import datetime
|
||||
from fnmatch import fnmatch
|
||||
from pathlib import Path
|
||||
@@ -9,13 +11,19 @@ import pytest
|
||||
from chatmaild.expire import (
|
||||
FileEntry,
|
||||
MailboxStat,
|
||||
expire_to_target,
|
||||
get_file_entry,
|
||||
iter_mailboxes,
|
||||
os_listdir_if_exists,
|
||||
parse_dovecot_filename,
|
||||
quota_expire_main,
|
||||
scan_mailbox_messages,
|
||||
)
|
||||
from chatmaild.expire import main as expiry_main
|
||||
from chatmaild.expire import daily_expire_main as expiry_main
|
||||
from chatmaild.fsreport import main as report_main
|
||||
|
||||
MB = 1024 * 1024
|
||||
|
||||
|
||||
def fill_mbox(folderdir):
|
||||
password = folderdir.joinpath("password")
|
||||
@@ -196,3 +204,51 @@ def test_os_listdir_if_exists(tmp_path):
|
||||
tmp_path.joinpath("x").write_text("hello")
|
||||
assert len(os_listdir_if_exists(str(tmp_path))) == 1
|
||||
assert len(os_listdir_if_exists(str(tmp_path.joinpath("123123")))) == 0
|
||||
|
||||
|
||||
# --- quota expire tests ---
|
||||
|
||||
_msg_counter = itertools.count(1)
|
||||
|
||||
|
||||
def _create_message(basedir, sub, size, days_old=0, disk_size=None):
|
||||
seq = next(_msg_counter)
|
||||
mtime = int(time.time() - days_old * 86400)
|
||||
name = f"{mtime}.M1P1Q{seq}.hostname,S={size},W={size}:2,S"
|
||||
path = basedir / sub / name
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
path.write_bytes(b"x" * (disk_size if disk_size is not None else size))
|
||||
os.utime(path, (mtime, mtime))
|
||||
return path
|
||||
|
||||
|
||||
def test_parse_dovecot_filename():
|
||||
e = parse_dovecot_filename("cur/1775324677.M448978P3029757.exam,S=3235,W=3305:2,S")
|
||||
assert e.path == "cur/1775324677.M448978P3029757.exam,S=3235,W=3305:2,S"
|
||||
assert e.mtime == 1775324677
|
||||
assert e.quota_size == 3235
|
||||
assert parse_dovecot_filename("cur/msg_without_structure") is None
|
||||
|
||||
|
||||
def test_expire_to_target(tmp_path):
|
||||
_create_message(tmp_path, "cur", MB, days_old=10, disk_size=100)
|
||||
_create_message(tmp_path, "new", MB, days_old=5)
|
||||
_create_message(tmp_path, "cur", MB, days_old=0) # undeletable (<1 hour)
|
||||
assert len(scan_mailbox_messages(tmp_path)) == 3
|
||||
# removes oldest first, uses S= size not disk size
|
||||
removed = expire_to_target(tmp_path, MB)
|
||||
assert removed == 2
|
||||
msgs = scan_mailbox_messages(tmp_path)
|
||||
assert len(msgs) == 1
|
||||
# the surviving message is the fresh undeletable one
|
||||
assert msgs[0].mtime > time.time() - 3600
|
||||
|
||||
|
||||
def test_quota_expire_main(tmp_path, capsys):
|
||||
mbox = tmp_path / "user@example.org"
|
||||
_create_message(mbox, "cur", 2 * MB, days_old=5)
|
||||
(mbox / "maildirsize").write_text("x")
|
||||
quota_expire_main([str(1), str(mbox)])
|
||||
_, err = capsys.readouterr()
|
||||
assert "quota-expire: removed 1 message(s) from user@example.org" in err
|
||||
assert not (mbox / "maildirsize").exists()
|
||||
|
||||
@@ -19,18 +19,35 @@ def test_create_newemail_dict(example_config):
|
||||
assert ac1["password"] != ac2["password"]
|
||||
|
||||
|
||||
def test_create_dclogin_url():
|
||||
url = create_dclogin_url("user@example.org", "p@ss w+rd")
|
||||
def test_create_newemail_dict_ip(ipv4_config):
|
||||
ac = create_newemail_dict(ipv4_config)
|
||||
assert ac["email"].endswith("@[1.3.3.7]")
|
||||
|
||||
|
||||
def test_create_dclogin_url(example_config):
|
||||
addr = "user@example.org"
|
||||
password = "p@ss w+rd"
|
||||
url = create_dclogin_url(example_config, addr, password)
|
||||
assert url.startswith("dclogin:")
|
||||
assert "v=1" in url
|
||||
assert "ic=3" in url
|
||||
|
||||
assert "user@example.org" in url
|
||||
assert addr in url
|
||||
# password special chars must be encoded
|
||||
assert "p%40ss" in url
|
||||
assert "w%2Brd" in url
|
||||
|
||||
|
||||
def test_create_dclogin_url_ipv4(ipv4_config):
|
||||
addr = "user@[1.3.3.7]"
|
||||
password = "p@ss w+rd"
|
||||
url = create_dclogin_url(ipv4_config, addr, password)
|
||||
assert url.startswith("dclogin:")
|
||||
assert "v=1" in url
|
||||
assert "ic=3" in url
|
||||
assert addr in url
|
||||
|
||||
|
||||
def test_print_new_account(capsys, monkeypatch, maildomain, tmpdir, example_config):
|
||||
monkeypatch.setattr(chatmaild.newemail, "CONFIG_PATH", str(example_config._inipath))
|
||||
print_new_account()
|
||||
|
||||
@@ -3,6 +3,8 @@ import io
|
||||
import os
|
||||
from contextlib import contextmanager
|
||||
|
||||
from pyinfra import host
|
||||
from pyinfra.facts.server import Command
|
||||
from pyinfra.operations import files, server, systemd
|
||||
|
||||
|
||||
@@ -11,6 +13,17 @@ def has_systemd():
|
||||
return os.path.isdir("/run/systemd/system")
|
||||
|
||||
|
||||
def is_in_container() -> bool:
|
||||
"""Return True if running inside a container (Docker, LXC, etc.)."""
|
||||
return (
|
||||
host.get_fact(
|
||||
Command,
|
||||
"systemd-detect-virt --container --quiet 2>/dev/null && echo yes || true",
|
||||
)
|
||||
== "yes"
|
||||
)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def blocked_service_startup():
|
||||
"""Prevent services from auto-starting during package installation.
|
||||
|
||||
@@ -15,27 +15,10 @@ from pathlib import Path
|
||||
import pyinfra
|
||||
from chatmaild.config import read_config, write_initial_config
|
||||
from packaging import version
|
||||
from termcolor import colored
|
||||
|
||||
from . import dns, remote
|
||||
from .lxc.cli import (
|
||||
lxc_start_cmd,
|
||||
lxc_start_cmd_options,
|
||||
lxc_status_cmd,
|
||||
lxc_status_cmd_options,
|
||||
lxc_stop_cmd,
|
||||
lxc_stop_cmd_options,
|
||||
lxc_test_cmd,
|
||||
lxc_test_cmd_options,
|
||||
)
|
||||
from .lxc.incus import DNSConfigurationError
|
||||
from .sshexec import (
|
||||
LocalExec,
|
||||
SSHExec,
|
||||
resolve_host_from_ssh_config,
|
||||
resolve_key_from_ssh_config,
|
||||
)
|
||||
from .util import Out
|
||||
from .www import main as webdev_main
|
||||
from .sshexec import LocalExec, SSHExec
|
||||
|
||||
#
|
||||
# cmdeploy sub commands and options
|
||||
@@ -99,21 +82,20 @@ def run_cmd_options(parser):
|
||||
help="disable checks nslookup for dns",
|
||||
)
|
||||
add_ssh_host_option(parser)
|
||||
add_ssh_config_option(parser)
|
||||
|
||||
|
||||
def run_cmd(args, out):
|
||||
"""Deploy chatmail services on the remote server."""
|
||||
|
||||
ssh_host = args.ssh_host if args.ssh_host else args.config.mail_domain
|
||||
sshexec = get_sshexec(ssh_host, ssh_config=args.ssh_config)
|
||||
ssh_host = args.ssh_host if args.ssh_host else args.config.mail_domain_bare
|
||||
sshexec = get_sshexec(ssh_host)
|
||||
require_iroh = args.config.enable_iroh_relay
|
||||
strict_tls = args.config.tls_cert_mode == "acme"
|
||||
if args.config.ipv4_relay:
|
||||
args.dns_check_disabled = True
|
||||
if not args.dns_check_disabled:
|
||||
remote_data = dns.get_initial_remote_data(sshexec, args.config.mail_domain)
|
||||
if not dns.check_initial_remote_data(
|
||||
remote_data, strict_tls=strict_tls, print=out.red
|
||||
):
|
||||
if not dns.check_initial_remote_data(remote_data, strict_tls=strict_tls, print=out.red):
|
||||
return 1
|
||||
|
||||
env = os.environ.copy()
|
||||
@@ -121,31 +103,11 @@ def run_cmd(args, out):
|
||||
env["CHATMAIL_WEBSITE_ONLY"] = "True" if args.website_only else ""
|
||||
env["CHATMAIL_DISABLE_MAIL"] = "True" if args.disable_mail else ""
|
||||
env["CHATMAIL_REQUIRE_IROH"] = "True" if require_iroh else ""
|
||||
if not args.dns_check_disabled:
|
||||
env["CHATMAIL_ADDR_V4"] = remote_data.get("A") or ""
|
||||
env["CHATMAIL_ADDR_V6"] = remote_data.get("AAAA") or ""
|
||||
env["DEBIAN_FRONTEND"] = "noninteractive"
|
||||
env["TERM"] = "linux"
|
||||
deploy_path = importlib.resources.files(__package__).joinpath("run.py").resolve()
|
||||
pyinf = "pyinfra --dry" if args.dry_run else "pyinfra"
|
||||
|
||||
cmd = f"{pyinf} --ssh-user root {ssh_host} {deploy_path} -y"
|
||||
ssh_config = args.ssh_config
|
||||
if ssh_config:
|
||||
ssh_config = str(Path(ssh_config).resolve())
|
||||
|
||||
# Use pyinfra's native SSH data keys to configure the connection directly
|
||||
# rather than relying on paramiko config parsing (see also sshexec.py)
|
||||
ip = resolve_host_from_ssh_config(ssh_host, ssh_config)
|
||||
key = resolve_key_from_ssh_config(ssh_host, ssh_config)
|
||||
data_args = f"--data ssh_hostname={ip} --data ssh_known_hosts_file=/dev/null"
|
||||
if key:
|
||||
data_args += f" --data ssh_key={key}"
|
||||
cmd = f"{pyinf} --ssh-user root {ssh_host} {deploy_path} -y {data_args}"
|
||||
if ssh_host in ["localhost", "@docker"]:
|
||||
if ssh_host == "@docker":
|
||||
env["CHATMAIL_NOPORTCHECK"] = "True"
|
||||
env["CHATMAIL_NOSYSCTL"] = "True"
|
||||
if ssh_host == "localhost":
|
||||
cmd = f"{pyinf} @local {deploy_path} -y"
|
||||
|
||||
if version.parse(pyinfra.__version__) < version.parse("3"):
|
||||
@@ -153,19 +115,14 @@ def run_cmd(args, out):
|
||||
return 1
|
||||
|
||||
try:
|
||||
ret = out.shell(cmd, env=env)
|
||||
if ret:
|
||||
out.red("Deploy failed")
|
||||
return 1
|
||||
out.check_call(cmd, env=env)
|
||||
if args.website_only:
|
||||
out.green("Website deployment completed.")
|
||||
elif (
|
||||
not args.dns_check_disabled
|
||||
and strict_tls
|
||||
and not remote_data["acme_account_url"]
|
||||
):
|
||||
elif not args.dns_check_disabled and strict_tls and not remote_data["acme_account_url"]:
|
||||
out.red("Deploy completed but letsencrypt not configured")
|
||||
out.red("Run 'cmdeploy run' again")
|
||||
elif args.config.ipv4_relay:
|
||||
out.green("Deploy completed.")
|
||||
else:
|
||||
out.green("Deploy completed, call `cmdeploy dns` next.")
|
||||
return 0
|
||||
@@ -180,16 +137,19 @@ def dns_cmd_options(parser):
|
||||
dest="zonefile",
|
||||
type=pathlib.Path,
|
||||
default=None,
|
||||
help="write DNS records in standard BIND format to the given file",
|
||||
help="write out a zonefile",
|
||||
)
|
||||
add_ssh_host_option(parser)
|
||||
add_ssh_config_option(parser)
|
||||
|
||||
|
||||
def dns_cmd(args, out):
|
||||
"""Check DNS entries and optionally generate dns zone file."""
|
||||
if args.config.ipv4_relay:
|
||||
ipv4 = args.config.ipv4_relay
|
||||
print(f"[WARNING] {ipv4} is not a domain, skipping DNS checks.")
|
||||
return 0
|
||||
ssh_host = args.ssh_host if args.ssh_host else args.config.mail_domain
|
||||
sshexec = get_sshexec(ssh_host, verbose=args.verbose, ssh_config=args.ssh_config)
|
||||
sshexec = get_sshexec(ssh_host, verbose=args.verbose)
|
||||
tls_cert_mode = args.config.tls_cert_mode
|
||||
strict_tls = tls_cert_mode == "acme"
|
||||
remote_data = dns.get_initial_remote_data(sshexec, args.config.mail_domain)
|
||||
@@ -220,14 +180,13 @@ def dns_cmd(args, out):
|
||||
|
||||
def status_cmd_options(parser):
|
||||
add_ssh_host_option(parser)
|
||||
add_ssh_config_option(parser)
|
||||
|
||||
|
||||
def status_cmd(args, out):
|
||||
"""Display status for online chatmail instance."""
|
||||
|
||||
ssh_host = args.ssh_host if args.ssh_host else args.config.mail_domain
|
||||
sshexec = get_sshexec(ssh_host, verbose=args.verbose, ssh_config=args.ssh_config)
|
||||
ssh_host = args.ssh_host if args.ssh_host else args.config.mail_domain_bare
|
||||
sshexec = get_sshexec(ssh_host, verbose=args.verbose)
|
||||
|
||||
out.green(f"chatmail domain: {args.config.mail_domain}")
|
||||
if args.config.privacy_mail:
|
||||
@@ -240,21 +199,16 @@ def status_cmd(args, out):
|
||||
|
||||
|
||||
def test_cmd_options(parser):
|
||||
parser.add_argument(
|
||||
"--slow",
|
||||
dest="slow",
|
||||
action="store_true",
|
||||
help="also run slow tests",
|
||||
)
|
||||
add_ssh_host_option(parser)
|
||||
add_ssh_config_option(parser)
|
||||
|
||||
|
||||
def test_cmd(args, out):
|
||||
"""Run local and online tests for chatmail deployment."""
|
||||
|
||||
env = os.environ.copy()
|
||||
env["CHATMAIL_INI"] = str(args.inipath.resolve())
|
||||
env["CHATMAIL_INI"] = str(args.inipath.absolute())
|
||||
if args.ssh_host:
|
||||
env["CHATMAIL_SSH"] = args.ssh_host
|
||||
|
||||
pytest_path = shutil.which("pytest")
|
||||
pytest_args = [
|
||||
@@ -266,13 +220,7 @@ def test_cmd(args, out):
|
||||
"-v",
|
||||
"--durations=5",
|
||||
]
|
||||
if args.slow:
|
||||
pytest_args.append("--slow")
|
||||
if args.ssh_host:
|
||||
pytest_args.extend(["--ssh-host", args.ssh_host])
|
||||
if args.ssh_config:
|
||||
pytest_args.extend(["--ssh-config", str(Path(args.ssh_config).resolve())])
|
||||
ret = out.shell(" ".join(pytest_args), env=env)
|
||||
ret = out.run_ret(pytest_args, env=env)
|
||||
return ret
|
||||
|
||||
|
||||
@@ -309,8 +257,8 @@ def fmt_cmd(args, out):
|
||||
format_args.extend(sources)
|
||||
check_args.extend(sources)
|
||||
|
||||
out.shell(" ".join(format_args), quiet=not args.verbose)
|
||||
out.shell(" ".join(check_args), quiet=not args.verbose)
|
||||
out.check_call(" ".join(format_args), quiet=not args.verbose)
|
||||
out.check_call(" ".join(check_args), quiet=not args.verbose)
|
||||
|
||||
|
||||
def bench_cmd(args, out):
|
||||
@@ -323,7 +271,9 @@ def bench_cmd(args, out):
|
||||
|
||||
def webdev_cmd(args, out):
|
||||
"""Run local web development loop for static web pages."""
|
||||
webdev_main()
|
||||
from .www import main
|
||||
|
||||
main()
|
||||
|
||||
|
||||
#
|
||||
@@ -331,25 +281,41 @@ def webdev_cmd(args, out):
|
||||
#
|
||||
|
||||
|
||||
class Out:
|
||||
"""Convenience output printer providing coloring."""
|
||||
|
||||
def red(self, msg, file=sys.stderr):
|
||||
print(colored(msg, "red"), file=file)
|
||||
|
||||
def green(self, msg, file=sys.stderr):
|
||||
print(colored(msg, "green"), file=file)
|
||||
|
||||
def __call__(self, msg, red=False, green=False, file=sys.stdout):
|
||||
color = "red" if red else ("green" if green else None)
|
||||
print(colored(msg, color), file=file)
|
||||
|
||||
def check_call(self, arg, env=None, quiet=False):
|
||||
if not quiet:
|
||||
self(f"[$ {arg}]", file=sys.stderr)
|
||||
return subprocess.check_call(arg, shell=True, env=env)
|
||||
|
||||
def run_ret(self, args, env=None, quiet=False):
|
||||
if not quiet:
|
||||
cmdstring = " ".join(args)
|
||||
self(f"[$ {cmdstring}]", file=sys.stderr)
|
||||
proc = subprocess.run(args, env=env, check=False)
|
||||
return proc.returncode
|
||||
|
||||
|
||||
def add_ssh_host_option(parser):
|
||||
parser.add_argument(
|
||||
"--ssh-host",
|
||||
dest="ssh_host",
|
||||
help="Run commands on 'localhost', via '@docker', or on a specific SSH host "
|
||||
help="Run commands on 'localhost' or on a specific SSH host "
|
||||
"instead of chatmail.ini's mail_domain.",
|
||||
)
|
||||
|
||||
|
||||
def add_ssh_config_option(parser):
|
||||
parser.add_argument(
|
||||
"--ssh-config",
|
||||
dest="ssh_config",
|
||||
type=Path,
|
||||
default=None,
|
||||
help="Path to an SSH config file (e.g. lxconfigs/ssh-config).",
|
||||
)
|
||||
|
||||
|
||||
def add_config_option(parser):
|
||||
parser.add_argument(
|
||||
"--config",
|
||||
@@ -359,26 +325,25 @@ def add_config_option(parser):
|
||||
type=Path,
|
||||
help="path to the chatmail.ini file",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--verbose",
|
||||
"-v",
|
||||
dest="verbose",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="provide verbose logging",
|
||||
)
|
||||
|
||||
|
||||
def add_subcommand(subparsers, func, add_config=True):
|
||||
def add_subcommand(subparsers, func):
|
||||
name = func.__name__
|
||||
assert name.endswith("_cmd")
|
||||
name = name[:-4].replace("_", "-")
|
||||
name = name[:-4]
|
||||
doc = func.__doc__.strip()
|
||||
help = doc.split("\n")[0].strip(".")
|
||||
p = subparsers.add_parser(name, description=doc, help=help)
|
||||
p.set_defaults(func=func)
|
||||
if add_config:
|
||||
add_config_option(p)
|
||||
p.add_argument(
|
||||
"-v",
|
||||
"--verbose",
|
||||
dest="verbose",
|
||||
action="count",
|
||||
default=0,
|
||||
help="increase verbosity (can be repeated: -v, -vv)",
|
||||
)
|
||||
add_config_option(p)
|
||||
return p
|
||||
|
||||
|
||||
@@ -387,60 +352,43 @@ Setup your chatmail server configuration and
|
||||
deploy it via SSH to your remote location.
|
||||
"""
|
||||
|
||||
# Explicit subcommand registry: (cmd_func, options_func_or_None, needs_config).
|
||||
# LXC commands don't need a chatmail.ini (no config); all others do.
|
||||
SUBCOMMANDS = [
|
||||
(init_cmd, init_cmd_options, True),
|
||||
(run_cmd, run_cmd_options, True),
|
||||
(dns_cmd, dns_cmd_options, True),
|
||||
(status_cmd, status_cmd_options, True),
|
||||
(test_cmd, test_cmd_options, True),
|
||||
(fmt_cmd, fmt_cmd_options, True),
|
||||
(bench_cmd, None, True),
|
||||
(webdev_cmd, None, True),
|
||||
(lxc_start_cmd, lxc_start_cmd_options, False),
|
||||
(lxc_stop_cmd, lxc_stop_cmd_options, False),
|
||||
(lxc_status_cmd, lxc_status_cmd_options, False),
|
||||
(lxc_test_cmd, lxc_test_cmd_options, False),
|
||||
]
|
||||
|
||||
|
||||
def get_parser():
|
||||
"""Return an ArgumentParser for the 'cmdeploy' CLI"""
|
||||
|
||||
parser = argparse.ArgumentParser(description=description.strip())
|
||||
parser.set_defaults(func=None, inipath=None)
|
||||
subparsers = parser.add_subparsers(title="subcommands")
|
||||
|
||||
for func, addopts, needs_config in SUBCOMMANDS:
|
||||
subparser = add_subcommand(subparsers, func, add_config=needs_config)
|
||||
if addopts is not None:
|
||||
addopts(subparser)
|
||||
# find all subcommands in the module namespace
|
||||
glob = globals()
|
||||
for name, func in glob.items():
|
||||
if name.endswith("_cmd"):
|
||||
subparser = add_subcommand(subparsers, func)
|
||||
addopts = glob.get(name + "_options")
|
||||
if addopts is not None:
|
||||
addopts(subparser)
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
def get_sshexec(ssh_host: str, verbose=True, ssh_config=None):
|
||||
def get_sshexec(ssh_host: str, verbose=True):
|
||||
if ssh_host in ["localhost", "@local"]:
|
||||
return LocalExec(verbose, docker=False)
|
||||
elif ssh_host == "@docker":
|
||||
return LocalExec(verbose, docker=True)
|
||||
return LocalExec(verbose)
|
||||
if verbose:
|
||||
print(f"[ssh] login to {ssh_host}")
|
||||
return SSHExec(ssh_host, verbose=verbose, ssh_config=ssh_config)
|
||||
return SSHExec(ssh_host, verbose=verbose)
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""Provide main entry point for 'cmdeploy' CLI invocation."""
|
||||
parser = get_parser()
|
||||
args = parser.parse_args(args=args)
|
||||
if args.func is None:
|
||||
if not hasattr(args, "func"):
|
||||
return parser.parse_args(["-h"])
|
||||
|
||||
out = Out(verbosity=args.verbose)
|
||||
out = Out()
|
||||
kwargs = {}
|
||||
|
||||
if args.inipath is not None and args.func.__name__ not in ("init_cmd", "fmt_cmd"):
|
||||
if args.func.__name__ not in ("init_cmd", "fmt_cmd"):
|
||||
if not args.inipath.exists():
|
||||
out.red(f"expecting {args.inipath} to exist, run init first?")
|
||||
raise SystemExit(1)
|
||||
@@ -455,9 +403,6 @@ def main(args=None):
|
||||
if res is None:
|
||||
res = 0
|
||||
return res
|
||||
except DNSConfigurationError as exc:
|
||||
out.red(str(exc))
|
||||
return 1
|
||||
except KeyboardInterrupt:
|
||||
out.red("KeyboardInterrupt")
|
||||
sys.exit(130)
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
Chat Mail pyinfra deploy.
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
@@ -17,6 +16,8 @@ from pyinfra.facts.files import Sha256File
|
||||
from pyinfra.facts.systemd import SystemdEnabled
|
||||
from pyinfra.operations import apt, files, pip, server, systemd
|
||||
|
||||
from cmdeploy.cmdeploy import Out
|
||||
|
||||
from .acmetool import AcmetoolDeployer
|
||||
from .basedeploy import (
|
||||
Deployer,
|
||||
@@ -26,6 +27,7 @@ from .basedeploy import (
|
||||
configure_remote_units,
|
||||
get_resource,
|
||||
has_systemd,
|
||||
is_in_container,
|
||||
)
|
||||
from .dovecot.deployer import DovecotDeployer
|
||||
from .external.deployer import ExternalTlsDeployer
|
||||
@@ -35,7 +37,6 @@ from .nginx.deployer import NginxDeployer
|
||||
from .opendkim.deployer import OpendkimDeployer
|
||||
from .postfix.deployer import PostfixDeployer
|
||||
from .selfsigned.deployer import SelfSignedTlsDeployer
|
||||
from .util import Out, get_version_string
|
||||
from .www import build_webpages, find_merge_conflict, get_paths
|
||||
|
||||
|
||||
@@ -149,9 +150,6 @@ class UnboundDeployer(Deployer):
|
||||
self.need_restart = False
|
||||
|
||||
def install(self):
|
||||
# Run local DNS resolver `unbound`. `resolvconf` takes care of
|
||||
# setting up /etc/resolv.conf to use 127.0.0.1 as the resolver.
|
||||
|
||||
# On an IPv4-only system, if unbound is started but not configured,
|
||||
# it causes subsequent steps to fail to resolve hosts.
|
||||
with blocked_service_startup():
|
||||
@@ -161,6 +159,31 @@ class UnboundDeployer(Deployer):
|
||||
)
|
||||
|
||||
def configure(self):
|
||||
# Remove dynamic resolver managers that compete for /etc/resolv.conf.
|
||||
apt.packages(
|
||||
name="Purge resolvconf",
|
||||
packages=["resolvconf"],
|
||||
present=False,
|
||||
extra_uninstall_args="--purge",
|
||||
)
|
||||
# systemd-resolved can't be purged due to dependencies; stop and mask.
|
||||
server.shell(
|
||||
name="Stop and mask systemd-resolved",
|
||||
commands=[
|
||||
"systemctl stop systemd-resolved.service || true",
|
||||
"systemctl mask systemd-resolved.service",
|
||||
],
|
||||
)
|
||||
# Configure unbound resolver with Quad9 fallback and a trailing newline
|
||||
# (SolusVM bug).
|
||||
files.put(
|
||||
name="Write static resolv.conf",
|
||||
src=BytesIO(b"nameserver 127.0.0.1\nnameserver 9.9.9.9\n"),
|
||||
dest="/etc/resolv.conf",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
server.shell(
|
||||
name="Generate root keys for validating DNSSEC",
|
||||
commands=[
|
||||
@@ -254,14 +277,8 @@ class WebsiteDeployer(Deployer):
|
||||
logger.warning("Web page build failed, skipping website deployment")
|
||||
return
|
||||
# if it is not a hugo page, upload it as is
|
||||
# pyinfra files.rsync (experimental) causes problems with ssh-config configuration
|
||||
# the stable files.sync should do
|
||||
files.sync(
|
||||
src=str(www_path),
|
||||
dest="/var/www/html",
|
||||
user="www-data",
|
||||
group="www-data",
|
||||
delete=True,
|
||||
files.rsync(
|
||||
f"{www_path}/", "/var/www/html", flags=["-avz", "--chown=www-data"]
|
||||
)
|
||||
|
||||
|
||||
@@ -451,7 +468,7 @@ class ChatmailVenvDeployer(Deployer):
|
||||
|
||||
def configure(self):
|
||||
_configure_remote_venv_with_chatmaild(self.config)
|
||||
configure_remote_units(self.config.mail_domain, self.units)
|
||||
configure_remote_units(self.config.mail_domain_bare, self.units)
|
||||
|
||||
def activate(self):
|
||||
activate_remote_units(self.units)
|
||||
@@ -490,11 +507,10 @@ class ChatmailDeployer(Deployer):
|
||||
)
|
||||
|
||||
def configure(self):
|
||||
# Ensure the per-domain mailbox directory exists before
|
||||
# chatmail-metadata starts (it crashes without it).
|
||||
# metadata crashes if the mailboxes dir does not exist
|
||||
files.directory(
|
||||
name="Ensure vmail mailbox directory exists",
|
||||
path=f"/home/vmail/mail/{self.mail_domain}",
|
||||
path=str(self.config.mailboxes_dir),
|
||||
user="vmail",
|
||||
group="vmail",
|
||||
mode="700",
|
||||
@@ -510,15 +526,6 @@ class ChatmailDeployer(Deployer):
|
||||
],
|
||||
)
|
||||
|
||||
files.directory(
|
||||
name=f"Ensure mailboxes directory {self.config.mailboxes_dir} exists",
|
||||
path=str(self.config.mailboxes_dir),
|
||||
user="vmail",
|
||||
group="vmail",
|
||||
mode="700",
|
||||
present=True,
|
||||
)
|
||||
|
||||
|
||||
class FcgiwrapDeployer(Deployer):
|
||||
def install(self):
|
||||
@@ -538,9 +545,17 @@ class FcgiwrapDeployer(Deployer):
|
||||
|
||||
class GithashDeployer(Deployer):
|
||||
def activate(self):
|
||||
try:
|
||||
git_hash = subprocess.check_output(["git", "rev-parse", "HEAD"]).decode()
|
||||
except Exception:
|
||||
git_hash = "unknown\n"
|
||||
try:
|
||||
git_diff = subprocess.check_output(["git", "diff"]).decode()
|
||||
except Exception:
|
||||
git_diff = ""
|
||||
files.put(
|
||||
name="Upload chatmail relay git commit hash",
|
||||
src=StringIO(get_version_string()),
|
||||
src=StringIO(git_hash + git_diff),
|
||||
dest="/etc/chatmail-version",
|
||||
mode="700",
|
||||
)
|
||||
@@ -569,35 +584,21 @@ def deploy_chatmail(config_path: Path, disable_mail: bool, website_only: bool) -
|
||||
"""
|
||||
config = read_config(config_path)
|
||||
check_config(config)
|
||||
mail_domain = config.mail_domain
|
||||
bare_host = config.mail_domain_bare
|
||||
|
||||
if website_only:
|
||||
Deployment().perform_stages([WebsiteDeployer(config)])
|
||||
return
|
||||
|
||||
if host.get_fact(Port, port=53) != "unbound":
|
||||
files.line(
|
||||
name="Add 9.9.9.9 to resolv.conf",
|
||||
path="/etc/resolv.conf",
|
||||
# Guard against resolv.conf missing a trailing newline (SolusVM bug).
|
||||
line="\nnameserver 9.9.9.9",
|
||||
)
|
||||
|
||||
# Check if mtail_address interface is available (if configured)
|
||||
if config.mtail_address and config.mtail_address not in (
|
||||
"127.0.0.1",
|
||||
"::1",
|
||||
"localhost",
|
||||
):
|
||||
if config.mtail_address and config.mtail_address not in ('127.0.0.1', '::1', 'localhost'):
|
||||
ipv4_addrs = host.get_fact(hardware.Ipv4Addrs)
|
||||
all_addresses = [addr for addrs in ipv4_addrs.values() for addr in addrs]
|
||||
if config.mtail_address not in all_addresses:
|
||||
Out().red(
|
||||
f"Deploy failed: mtail_address {config.mtail_address} is not available (VPN up?).\n"
|
||||
)
|
||||
Out().red(f"Deploy failed: mtail_address {config.mtail_address} is not available (VPN up?).\n")
|
||||
exit(1)
|
||||
|
||||
if not os.environ.get("CHATMAIL_NOPORTCHECK"):
|
||||
if not is_in_container():
|
||||
port_services = [
|
||||
(["master", "smtpd"], 25),
|
||||
("unbound", 53),
|
||||
@@ -634,7 +635,7 @@ def deploy_chatmail(config_path: Path, disable_mail: bool, website_only: bool) -
|
||||
)
|
||||
exit(1)
|
||||
|
||||
tls_deployer = get_tls_deployer(config, mail_domain)
|
||||
tls_deployer = get_tls_deployer(config, bare_host)
|
||||
|
||||
all_deployers = [
|
||||
ChatmailDeployer(config),
|
||||
@@ -642,13 +643,13 @@ def deploy_chatmail(config_path: Path, disable_mail: bool, website_only: bool) -
|
||||
FiltermailDeployer(),
|
||||
JournaldDeployer(),
|
||||
UnboundDeployer(config),
|
||||
TurnDeployer(mail_domain),
|
||||
TurnDeployer(bare_host),
|
||||
IrohDeployer(config.enable_iroh_relay),
|
||||
tls_deployer,
|
||||
WebsiteDeployer(config),
|
||||
ChatmailVenvDeployer(config),
|
||||
MtastsDeployer(),
|
||||
OpendkimDeployer(mail_domain),
|
||||
OpendkimDeployer(config.mail_domain),
|
||||
# Dovecot should be started before Postfix
|
||||
# because it creates authentication socket
|
||||
# required by Postfix.
|
||||
|
||||
@@ -4,11 +4,7 @@ from . import remote
|
||||
|
||||
|
||||
def parse_zone_records(text):
|
||||
"""Yield ``(name, ttl, rtype, rdata)`` from standard BIND-format text.
|
||||
|
||||
Skips comment lines (starting with ``;``) and blank lines.
|
||||
Each record line must have the format ``name TTL IN type rdata``.
|
||||
"""
|
||||
"""Yield ``(name, ttl, rtype, rdata)`` from standard BIND-format text."""
|
||||
for raw_line in text.splitlines():
|
||||
line = raw_line.strip()
|
||||
if not line or line.startswith(";"):
|
||||
@@ -48,39 +44,35 @@ def get_filled_zone_file(remote_data):
|
||||
|
||||
d = remote_data["mail_domain"]
|
||||
|
||||
def rec(name, rtype, rdata, ttl=3600):
|
||||
return f"{name:<40} {ttl:<6} IN {rtype:<5} {rdata}"
|
||||
def append_record(name, rtype, rdata, ttl=3600):
|
||||
lines.append(f"{name:<40} {ttl:<6} IN {rtype:<5} {rdata}")
|
||||
|
||||
lines = ["; Required DNS entries"]
|
||||
if remote_data.get("A"):
|
||||
lines.append(rec(f"{d}.", "A", remote_data["A"]))
|
||||
append_record(f"{d}.", "A", remote_data["A"])
|
||||
if remote_data.get("AAAA"):
|
||||
lines.append(rec(f"{d}.", "AAAA", remote_data["AAAA"]))
|
||||
lines.append(rec(f"{d}.", "MX", f"10 {d}."))
|
||||
append_record(f"{d}.", "AAAA", remote_data["AAAA"])
|
||||
append_record(f"{d}.", "MX", f"10 {d}.")
|
||||
if remote_data.get("strict_tls"):
|
||||
lines.append(
|
||||
rec(f"_mta-sts.{d}.", "TXT", f'"v=STSv1; id={remote_data["sts_id"]}"')
|
||||
)
|
||||
lines.append(rec(f"mta-sts.{d}.", "CNAME", f"{d}."))
|
||||
lines.append(rec(f"www.{d}.", "CNAME", f"{d}."))
|
||||
append_record(f"_mta-sts.{d}.", "TXT", f'"v=STSv1; id={remote_data["sts_id"]}"')
|
||||
append_record(f"mta-sts.{d}.", "CNAME", f"{d}.")
|
||||
append_record(f"www.{d}.", "CNAME", f"{d}.")
|
||||
lines.append(remote_data["dkim_entry"])
|
||||
lines.append("")
|
||||
lines.append("; Recommended DNS entries")
|
||||
lines.append(rec(f"{d}.", "TXT", '"v=spf1 a ~all"'))
|
||||
lines.append(rec(f"_dmarc.{d}.", "TXT", '"v=DMARC1;p=reject;adkim=s;aspf=s"'))
|
||||
append_record(f"{d}.", "TXT", '"v=spf1 a ~all"')
|
||||
append_record(f"_dmarc.{d}.", "TXT", '"v=DMARC1;p=reject;adkim=s;aspf=s"')
|
||||
if remote_data.get("acme_account_url"):
|
||||
lines.append(
|
||||
rec(
|
||||
f"{d}.",
|
||||
"CAA",
|
||||
f'0 issue "letsencrypt.org;accounturi={remote_data["acme_account_url"]}"',
|
||||
)
|
||||
append_record(
|
||||
f"{d}.",
|
||||
"CAA",
|
||||
f'0 issue "letsencrypt.org;accounturi={remote_data["acme_account_url"]}"',
|
||||
)
|
||||
lines.append(rec(f"_adsp._domainkey.{d}.", "TXT", '"dkim=discardable"'))
|
||||
lines.append(rec(f"_submission._tcp.{d}.", "SRV", f"0 1 587 {d}."))
|
||||
lines.append(rec(f"_submissions._tcp.{d}.", "SRV", f"0 1 465 {d}."))
|
||||
lines.append(rec(f"_imap._tcp.{d}.", "SRV", f"0 1 143 {d}."))
|
||||
lines.append(rec(f"_imaps._tcp.{d}.", "SRV", f"0 1 993 {d}."))
|
||||
append_record(f"_adsp._domainkey.{d}.", "TXT", '"dkim=discardable"')
|
||||
append_record(f"_submission._tcp.{d}.", "SRV", f"0 1 587 {d}.")
|
||||
append_record(f"_submissions._tcp.{d}.", "SRV", f"0 1 465 {d}.")
|
||||
append_record(f"_imap._tcp.{d}.", "SRV", f"0 1 143 {d}.")
|
||||
append_record(f"_imaps._tcp.{d}.", "SRV", f"0 1 993 {d}.")
|
||||
lines.append("")
|
||||
return "\n".join(lines)
|
||||
|
||||
@@ -98,19 +90,19 @@ def check_full_zone(sshexec, remote_data, out, zonefile) -> int:
|
||||
if required_diff:
|
||||
out.red("Please set required DNS entries at your DNS provider:\n")
|
||||
for line in required_diff:
|
||||
out.print(line)
|
||||
out.print()
|
||||
out(line)
|
||||
out("")
|
||||
returncode = 1
|
||||
if remote_data.get("dkim_entry") in required_diff:
|
||||
out.print(
|
||||
out(
|
||||
"If the DKIM entry above does not work with your DNS provider,"
|
||||
" you can try this one:\n"
|
||||
)
|
||||
out.print(remote_data.get("web_dkim_entry") + "\n")
|
||||
out(remote_data.get("web_dkim_entry") + "\n")
|
||||
if recommended_diff:
|
||||
out.print("WARNING: these recommended DNS entries are not set:\n")
|
||||
out("WARNING: these recommended DNS entries are not set:\n")
|
||||
for line in recommended_diff:
|
||||
out.print(line)
|
||||
out(line)
|
||||
|
||||
if not (recommended_diff or required_diff):
|
||||
out.green("Great! All your DNS entries are verified and correct.")
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import io
|
||||
import urllib.request
|
||||
|
||||
from chatmaild.config import Config
|
||||
@@ -12,9 +13,11 @@ from cmdeploy.basedeploy import (
|
||||
blocked_service_startup,
|
||||
configure_remote_units,
|
||||
get_resource,
|
||||
is_in_container,
|
||||
)
|
||||
|
||||
DOVECOT_VERSION = "2.3.21+dfsg1-3"
|
||||
DOVECOT_ARCHIVE_VERSION = "2.3.21+dfsg1-3"
|
||||
DOVECOT_PACKAGE_VERSION = f"1:{DOVECOT_ARCHIVE_VERSION}"
|
||||
|
||||
DOVECOT_SHA256 = {
|
||||
("core", "amd64"): "dd060706f52a306fa863d874717210b9fe10536c824afe1790eec247ded5b27d",
|
||||
@@ -39,11 +42,14 @@ class DovecotDeployer(Deployer):
|
||||
with blocked_service_startup():
|
||||
debs = []
|
||||
for pkg in ("core", "imapd", "lmtpd"):
|
||||
deb = _download_dovecot_package(pkg, arch)
|
||||
deb, changed = _download_dovecot_package(pkg, arch)
|
||||
self.need_restart |= changed
|
||||
if deb:
|
||||
debs.append(deb)
|
||||
if debs:
|
||||
deb_list = " ".join(debs)
|
||||
# First dpkg may fail on missing dependencies (stderr suppressed);
|
||||
# apt-get --fix-broken pulls them in, then dpkg retries cleanly.
|
||||
server.shell(
|
||||
name="Install dovecot packages",
|
||||
commands=[
|
||||
@@ -52,14 +58,39 @@ class DovecotDeployer(Deployer):
|
||||
f"dpkg --force-confdef --force-confold -i {deb_list}",
|
||||
],
|
||||
)
|
||||
self.need_restart = True
|
||||
files.put(
|
||||
name="Pin dovecot packages to block Debian dist-upgrades",
|
||||
src=io.StringIO(
|
||||
"Package: dovecot-*\n"
|
||||
"Pin: version *\n"
|
||||
"Pin-Priority: -1\n"
|
||||
),
|
||||
dest="/etc/apt/preferences.d/pin-dovecot",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
|
||||
def configure(self):
|
||||
configure_remote_units(self.config.mail_domain, self.units)
|
||||
self.need_restart, self.daemon_reload = _configure_dovecot(self.config)
|
||||
configure_remote_units(self.config.mail_domain_bare, self.units)
|
||||
config_restart, self.daemon_reload = _configure_dovecot(self.config)
|
||||
self.need_restart |= config_restart
|
||||
|
||||
def activate(self):
|
||||
activate_remote_units(self.units)
|
||||
|
||||
# Detect stale binary: package installed but service still runs old (deleted) binary.
|
||||
if not self.disable_mail and not self.need_restart:
|
||||
stale = host.get_fact(
|
||||
Command,
|
||||
'pid=$(systemctl show -p MainPID --value dovecot.service 2>/dev/null);'
|
||||
' [ "${pid:-0}" != "0" ] && readlink "/proc/$pid/exe" 2>/dev/null | grep -q "(deleted)"'
|
||||
" && echo STALE || true",
|
||||
)
|
||||
if stale == "STALE":
|
||||
self.need_restart = True
|
||||
|
||||
restart = False if self.disable_mail else self.need_restart
|
||||
|
||||
systemd.service(
|
||||
@@ -84,22 +115,22 @@ def _pick_url(primary, fallback):
|
||||
return fallback
|
||||
|
||||
|
||||
def _download_dovecot_package(package: str, arch: str):
|
||||
"""Download a dovecot .deb if needed, return its path (or None)."""
|
||||
def _download_dovecot_package(package: str, arch: str) -> tuple[str | None, bool]:
|
||||
"""Download a dovecot .deb if needed, return (path, changed)."""
|
||||
arch = "amd64" if arch == "x86_64" else arch
|
||||
arch = "arm64" if arch == "aarch64" else arch
|
||||
|
||||
pkg_name = f"dovecot-{package}"
|
||||
sha256 = DOVECOT_SHA256.get((package, arch))
|
||||
if sha256 is None:
|
||||
apt.packages(packages=[pkg_name])
|
||||
return None
|
||||
op = apt.packages(packages=[pkg_name])
|
||||
return None, bool(getattr(op, "changed", False))
|
||||
|
||||
installed_versions = host.get_fact(DebPackages).get(pkg_name, [])
|
||||
if DOVECOT_VERSION in installed_versions:
|
||||
return None
|
||||
if DOVECOT_PACKAGE_VERSION in installed_versions:
|
||||
return None, False
|
||||
|
||||
url_version = DOVECOT_VERSION.replace("+", "%2B")
|
||||
url_version = DOVECOT_ARCHIVE_VERSION.replace("+", "%2B")
|
||||
deb_base = f"{pkg_name}_{url_version}_{arch}.deb"
|
||||
primary_url = f"https://download.delta.chat/dovecot/{deb_base}"
|
||||
fallback_url = f"https://github.com/chatmail/dovecot/releases/download/upstream%2F{url_version}/{deb_base}"
|
||||
@@ -114,10 +145,10 @@ def _download_dovecot_package(package: str, arch: str):
|
||||
cache_time=60 * 60 * 24 * 365 * 10, # never redownload the package
|
||||
)
|
||||
|
||||
return deb_filename
|
||||
return deb_filename, True
|
||||
|
||||
|
||||
def _configure_dovecot(config: Config, debug: bool = False) -> (bool, bool):
|
||||
def _configure_dovecot(config: Config, debug: bool = False) -> tuple[bool, bool]:
|
||||
"""Configures Dovecot IMAP server."""
|
||||
need_restart = False
|
||||
daemon_reload = False
|
||||
@@ -152,15 +183,15 @@ def _configure_dovecot(config: Config, debug: bool = False) -> (bool, bool):
|
||||
|
||||
# as per https://doc.dovecot.org/2.3/configuration_manual/os/
|
||||
# it is recommended to set the following inotify limits
|
||||
can_modify = host.get_fact(Command, "systemd-detect-virt -c || true") == "none"
|
||||
can_modify = not is_in_container()
|
||||
for name in ("max_user_instances", "max_user_watches"):
|
||||
key = f"fs.inotify.{name}"
|
||||
value = host.get_fact(Sysctl)[key]
|
||||
value = host.get_fact(Sysctl).get(key, 0)
|
||||
if value > 65534:
|
||||
continue
|
||||
if not can_modify:
|
||||
print(
|
||||
"\n!!!! refusing to attempt sysctl setting in shared-kernel containers\n"
|
||||
"\n!!!! refusing to attempt sysctl setting in containers\n"
|
||||
f"!!!! dovecot: sysctl {key!r}={value}, should be >65534 for production setups\n"
|
||||
"!!!!"
|
||||
)
|
||||
|
||||
@@ -7,6 +7,7 @@ listen = 0.0.0.0
|
||||
protocols = imap lmtp
|
||||
|
||||
auth_mechanisms = plain
|
||||
auth_username_chars = abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890.-_@[]
|
||||
|
||||
{% if debug == true %}
|
||||
auth_verbose = yes
|
||||
@@ -133,6 +134,11 @@ protocol lmtp {
|
||||
# mail_lua and push_notification_lua are needed for Lua push notification handler.
|
||||
# <https://doc.dovecot.org/2.3/configuration_manual/push_notification/#configuration>
|
||||
mail_plugins = $mail_plugins mail_lua notify push_notification push_notification_lua
|
||||
|
||||
# Disable fsync for LMTP. May lose delivered message,
|
||||
# but unlikely to cause problems with multiple relays.
|
||||
# https://doc.dovecot.org/2.3/admin_manual/mailbox_formats/#fsyncing
|
||||
mail_fsync = never
|
||||
}
|
||||
|
||||
plugin {
|
||||
@@ -144,12 +150,26 @@ plugin {
|
||||
}
|
||||
|
||||
plugin {
|
||||
# for now we define static quota-rules for all users
|
||||
quota = maildir:User quota
|
||||
quota_rule = *:storage={{ config.max_mailbox_size }}
|
||||
quota_max_mail_size={{ config.max_message_size }}
|
||||
quota_grace = 0
|
||||
# quota_over_flag_value = TRUE
|
||||
|
||||
quota_rule = *:storage={{ config.max_mailbox_size_mb }}M
|
||||
|
||||
# Trigger at 75%% of quota, expire oldest messages down to 70%%.
|
||||
# The percentages are chosen to prevent current Delta Chat users
|
||||
# from seeing "quota warnings" which trigger at 80% and 95%.
|
||||
|
||||
quota_warning = storage=75%% quota-warning {{ config.max_mailbox_size_mb * 70 // 100 }} {{ config.mailboxes_dir }}/%u
|
||||
}
|
||||
|
||||
service quota-warning {
|
||||
executable = script /usr/local/lib/chatmaild/venv/bin/chatmail-quota-expire
|
||||
user = vmail
|
||||
unix_listener quota-warning {
|
||||
user = vmail
|
||||
mode = 0600
|
||||
}
|
||||
}
|
||||
|
||||
# push_notification configuration
|
||||
@@ -252,6 +272,9 @@ protocol imap {
|
||||
# sort -sn <(sed 's/ / C: /' *.in) <(sed 's/ / S: /' cat *.out)
|
||||
|
||||
rawlog_dir = %h
|
||||
|
||||
# Disable fsync for IMAP. May lose IMAP changes like setting flags.
|
||||
mail_fsync = never
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
import os
|
||||
|
||||
from pyinfra import facts, host
|
||||
from pyinfra.operations import files, systemd
|
||||
|
||||
@@ -5,7 +7,7 @@ from cmdeploy.basedeploy import Deployer, get_resource
|
||||
|
||||
|
||||
class FiltermailDeployer(Deployer):
|
||||
services = ["filtermail", "filtermail-incoming"]
|
||||
services = ["filtermail", "filtermail-incoming", "filtermail-transport"]
|
||||
bin_path = "/usr/local/bin/filtermail"
|
||||
config_path = "/usr/local/lib/chatmaild/chatmail.ini"
|
||||
|
||||
@@ -13,11 +15,21 @@ class FiltermailDeployer(Deployer):
|
||||
self.need_restart = False
|
||||
|
||||
def install(self):
|
||||
local_bin = os.environ.get("CHATMAIL_FILTERMAIL_BINARY")
|
||||
if local_bin:
|
||||
self.need_restart |= files.put(
|
||||
name="Upload locally built filtermail",
|
||||
src=local_bin,
|
||||
dest=self.bin_path,
|
||||
mode="755",
|
||||
).changed
|
||||
return
|
||||
|
||||
arch = host.get_fact(facts.server.Arch)
|
||||
url = f"https://github.com/chatmail/filtermail/releases/download/v0.6.0/filtermail-{arch}"
|
||||
url = f"https://github.com/chatmail/filtermail/releases/download/v0.6.4/filtermail-{arch}"
|
||||
sha256sum = {
|
||||
"x86_64": "3fd8b18282252c75a5bbfa603d8c1b65f6563e5e920bddf3e64e451b7cdb43ce",
|
||||
"aarch64": "2bd191de205f7fd60158dd8e3516ab7e3efb14627696f3d7dc186bdcd9e10a43",
|
||||
"x86_64": "5295115952c72e4c4ec3c85546e094b4155a4c702c82bd71fcdcb744dc73adf6",
|
||||
"aarch64": "6892244f17b8f26ccb465766e96028e7222b3c8adefca9fc6bfe9ff332ca8dff",
|
||||
}[arch]
|
||||
self.need_restart |= files.download(
|
||||
name="Download filtermail",
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
[Unit]
|
||||
Description=Chatmail transport service
|
||||
|
||||
[Service]
|
||||
ExecStart={{ bin_path }} {{ config_path }} transport
|
||||
Restart=always
|
||||
RestartSec=30
|
||||
User=vmail
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,475 +0,0 @@
|
||||
"""lxc-start/stop/status/test subcommands for testing with local containers."""
|
||||
|
||||
import os
|
||||
import time
|
||||
|
||||
from ..util import get_git_hash, get_version_string, shell
|
||||
from .incus import RELAY_IMAGE_ALIAS, Incus, RelayContainer
|
||||
|
||||
RELAY_NAMES = ("test0", "test1")
|
||||
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# lxc-start
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
|
||||
def lxc_start_cmd_options(parser):
|
||||
_add_name_args(
|
||||
parser,
|
||||
help_text="User relay name(s) to create (default: test0).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ipv4-only",
|
||||
dest="ipv4_only",
|
||||
action="store_true",
|
||||
help="Create an IPv4-only container.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--run",
|
||||
action="store_true",
|
||||
help="Run 'cmdeploy run' on each container after starting it.",
|
||||
)
|
||||
|
||||
|
||||
def lxc_start_cmd(args, out):
|
||||
"""Create/Ensure and start LXC relay and DNS containers."""
|
||||
|
||||
with out.section("Preparing container setup"):
|
||||
_lxc_start_cmd(args, out)
|
||||
|
||||
|
||||
def _lxc_start_cmd(args, out):
|
||||
ix = Incus(out)
|
||||
sub = out.new_prefixed_out()
|
||||
out.green("Ensuring base image ...")
|
||||
ix.ensure_base_image()
|
||||
out.green("Ensuring DNS container (ns-localchat) ...")
|
||||
dns_ct = ix.get_dns_container()
|
||||
dns_ct.ensure()
|
||||
sub.print(f"DNS container IP: {dns_ct.ipv4}")
|
||||
|
||||
names = args.names if args.names else RELAY_NAMES
|
||||
relays = list(ix.get_container(n) for n in names)
|
||||
for ct in relays:
|
||||
out.green(f"Ensuring container {ct.name!r} ({ct.domain}) ...")
|
||||
ct.ensure()
|
||||
ip = ct.ipv4
|
||||
|
||||
sub.print("Configuring container hostname ...")
|
||||
ct.configure_hosts(ip)
|
||||
|
||||
sub.print(f"Writing {ct.ini.name} ...")
|
||||
ct.write_ini(disable_ipv6=args.ipv4_only)
|
||||
sub.print(f"Config: {ct.ini}")
|
||||
if args.ipv4_only:
|
||||
ct.disable_ipv6()
|
||||
ipv6 = None
|
||||
else:
|
||||
output = ct.bash(
|
||||
"ip -6 addr show scope global -deprecated"
|
||||
" | grep -oP '(?<=inet6 )[^/]+'",
|
||||
check=False,
|
||||
)
|
||||
ipv6 = output.strip() if output else None
|
||||
sub.print(f"{_format_addrs(ip, ipv6)}")
|
||||
|
||||
sub.green(f"Container {ct.name!r} ready: {ct.domain} -> {ip}")
|
||||
out.print()
|
||||
|
||||
# Reset DNS zones only for the containers we just started
|
||||
started_cnames = {ct.name for ct in relays}
|
||||
managed = ix.list_managed()
|
||||
started = [c for c in managed if c["name"] in started_cnames]
|
||||
|
||||
if started:
|
||||
out.print(
|
||||
f"Resetting DNS zones for {len(started)} domain(s) (A + AAAA records) ..."
|
||||
)
|
||||
dns_ct.reset_dns_records(dns_ct.ipv4, started)
|
||||
|
||||
for ct in relays:
|
||||
if ct.name in started_cnames:
|
||||
sub.print(f"Configuring DNS in {ct.name} ...")
|
||||
ct.configure_dns(dns_ct.ipv4)
|
||||
|
||||
# Generate the unified SSH config
|
||||
out.green("Writing ssh-config ...")
|
||||
ssh_cfg = ix.write_ssh_config()
|
||||
sub.print(f"{ssh_cfg}")
|
||||
|
||||
# Verify SSH via the generated config
|
||||
for ct in relays:
|
||||
sub.print(f"Verifying SSH to {ct.name} via ssh-config ...")
|
||||
if ct.verify_ssh(ssh_cfg):
|
||||
sub.print(f"SSH OK: ssh -F lxconfigs/ssh-config {ct.domain}")
|
||||
else:
|
||||
sub.red(f"WARNING: SSH verification failed for {ct.name}")
|
||||
|
||||
# Print integration suggestions
|
||||
ssh_cfg = ix.ssh_config_path
|
||||
if not ix.check_ssh_include():
|
||||
sub.green(
|
||||
"\n(Optional) To use containers from any SSH client, add to ~/.ssh/config:"
|
||||
)
|
||||
sub.green(f" Include {ssh_cfg}")
|
||||
|
||||
# Optionally run cmdeploy run + dns on each relay
|
||||
if args.run:
|
||||
for ct in relays:
|
||||
with out.section(f"cmdeploy run: {ct.sname} ({ct.domain})"):
|
||||
ret = _run_cmdeploy("run", ct, ix, out, extra=["--skip-dns-check"])
|
||||
if ret:
|
||||
out.red(f"Deploy to {ct.sname} failed (exit {ret})")
|
||||
return ret
|
||||
|
||||
with out.section("loading DNS zones"):
|
||||
for ct in relays:
|
||||
ret = _run_cmdeploy(
|
||||
"dns", ct, ix, out,
|
||||
extra=["--zonefile", str(ct.zone)],
|
||||
)
|
||||
if ret:
|
||||
out.red(f"DNS for {ct.sname} failed (exit {ret})")
|
||||
return ret
|
||||
if ct.zone.exists():
|
||||
dns_ct.set_dns_records(ct.zone.read_text())
|
||||
out.print(f"Restarting filtermail-incoming on {ct.name}")
|
||||
ct.bash("systemctl restart filtermail-incoming")
|
||||
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# lxc-stop
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
|
||||
def lxc_stop_cmd_options(parser):
|
||||
parser.add_argument(
|
||||
"--destroy",
|
||||
action="store_true",
|
||||
help="Delete containers and their config files after stopping.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--destroy-all",
|
||||
dest="destroy_all",
|
||||
action="store_true",
|
||||
help="Like --destroy, but also remove the ns-localchat DNS container.",
|
||||
)
|
||||
_add_name_args(
|
||||
parser,
|
||||
help_text="Container name(s) to stop (default: test0 + test1).",
|
||||
)
|
||||
|
||||
|
||||
def lxc_stop_cmd(args, out):
|
||||
"""Stop (and optionally destroy) local LXC relay containers."""
|
||||
ix = Incus(out)
|
||||
names = args.names or RELAY_NAMES
|
||||
destroy = args.destroy or args.destroy_all
|
||||
|
||||
for ct in map(ix.get_container, names):
|
||||
if destroy:
|
||||
out.green(f"Destroying container {ct.name!r} ...")
|
||||
ct.destroy()
|
||||
else:
|
||||
out.green(f"Stopping container {ct.name!r} ...")
|
||||
ct.stop(force=True)
|
||||
|
||||
if args.destroy_all:
|
||||
dns_ct = ix.get_dns_container()
|
||||
out.green(f"Destroying DNS container {dns_ct.name!r} ...")
|
||||
dns_ct.destroy()
|
||||
ix.delete_images()
|
||||
|
||||
if destroy:
|
||||
ix.write_ssh_config()
|
||||
out.green("LXC containers destroyed.")
|
||||
else:
|
||||
out.green("LXC containers stopped.")
|
||||
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# lxc-test
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
|
||||
def lxc_test_cmd_options(parser):
|
||||
parser.add_argument(
|
||||
"--one",
|
||||
action="store_true",
|
||||
help="Only deploy and test against test0 (skip test1).",
|
||||
)
|
||||
|
||||
|
||||
def lxc_test_cmd(args, out):
|
||||
"""Run full LXC pipeline: start, deploy, DNS, zone files, and tests.
|
||||
|
||||
All commands run directly on the host using
|
||||
``--ssh-config lxconfigs/ssh-config`` for SSH access.
|
||||
"""
|
||||
ix = Incus(out)
|
||||
t_total = time.time()
|
||||
relay_names = list(RELAY_NAMES)
|
||||
if args.one:
|
||||
relay_names = relay_names[:1]
|
||||
|
||||
local_hash = get_git_hash()
|
||||
|
||||
# Per-relay: start, deploy, then snapshot the first relay as a
|
||||
# reusable image so the second relay launches pre-deployed.
|
||||
ipv4_only_flags = {RELAY_NAMES[0]: False, RELAY_NAMES[1]: True}
|
||||
|
||||
for ct in map(ix.get_container, relay_names):
|
||||
name = ct.sname
|
||||
ipv4_only = ipv4_only_flags.get(name, False)
|
||||
v_flag = " -" + "v" * out.verbosity if out.verbosity > 0 else ""
|
||||
start_cmd = f"cmdeploy lxc-start{v_flag} {name}"
|
||||
if ipv4_only:
|
||||
start_cmd += " --ipv4-only"
|
||||
with out.section(f"cmdeploy lxc-start: {name}"):
|
||||
ret = out.shell(start_cmd, cwd=str(ix.project_root))
|
||||
if ret:
|
||||
return ret
|
||||
|
||||
status = _deploy_status(ct, local_hash, ix)
|
||||
with out.section(f"cmdeploy run: {name}"):
|
||||
if "IN-SYNC" in status:
|
||||
out.print(f"{name} is {status}, skipping")
|
||||
else:
|
||||
ret = _run_cmdeploy("run", ct, ix, out, extra=["--skip-dns-check"])
|
||||
if ret:
|
||||
out.red(f"Deploy to {name} failed (exit {ret})")
|
||||
return ret
|
||||
|
||||
# Snapshot the first relay so subsequent ones launch pre-deployed
|
||||
if not ix.find_image([RELAY_IMAGE_ALIAS]):
|
||||
with out.section("lxc-test: caching relay image"):
|
||||
ct.publish_as_relay_image()
|
||||
|
||||
for ct in map(ix.get_container, relay_names):
|
||||
with out.section(f"cmdeploy dns: {ct.sname} ({ct.domain})"):
|
||||
ret = _run_cmdeploy("dns", ct, ix, out, extra=["--zonefile", str(ct.zone)])
|
||||
if ret:
|
||||
out.red(f"DNS for {ct.sname} failed (exit {ret})")
|
||||
return ret
|
||||
|
||||
with out.section(f"lxc-test: loading DNS zones {' & '.join(relay_names)}"):
|
||||
dns_ct = ix.get_dns_container()
|
||||
for ct in map(ix.get_container, relay_names):
|
||||
if ct.zone.exists():
|
||||
zone_data = ct.zone.read_text()
|
||||
out.print(f"Loading {ct.zone} into PowerDNS ...")
|
||||
dns_ct.set_dns_records(zone_data)
|
||||
|
||||
# Restart filtermail so its in-process DNS cache
|
||||
# does not hold stale negative DKIM responses
|
||||
# from before the zones were loaded.
|
||||
for ct in map(ix.get_container, relay_names):
|
||||
out.print(f"Restarting filtermail-incoming on {ct.name} ...")
|
||||
ct.bash("systemctl restart filtermail-incoming")
|
||||
|
||||
with out.section("cmdeploy test"):
|
||||
first = ix.get_container(relay_names[0])
|
||||
env = None
|
||||
if len(relay_names) > 1:
|
||||
env = os.environ.copy()
|
||||
env["CHATMAIL_DOMAIN2"] = ix.get_container(relay_names[1]).domain
|
||||
ret = _run_cmdeploy("test", first, ix, out, **({"env": env} if env else {}))
|
||||
if ret:
|
||||
out.red(f"Tests failed (exit {ret})")
|
||||
return ret
|
||||
|
||||
elapsed = time.time() - t_total
|
||||
out.section_line(f"lxc-test complete ({elapsed:.1f}s)")
|
||||
if out.section_timings:
|
||||
out.print("Section timings:")
|
||||
for name, secs in out.section_timings:
|
||||
out.print(f" {name:.<50s} {secs:5.1f}s")
|
||||
out.print(f" {'total':.<50s} {elapsed:5.1f}s")
|
||||
out.section_timings.clear()
|
||||
return 0
|
||||
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# lxc-status
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
|
||||
def lxc_status_cmd_options(parser):
|
||||
pass
|
||||
|
||||
|
||||
def lxc_status_cmd(args, out):
|
||||
"""Show status of local LXC chatmail containers."""
|
||||
ix = Incus(out)
|
||||
containers = ix.list_managed()
|
||||
if not containers:
|
||||
out.red("No LXC containers found. Run 'cmdeploy lxc-start' first.")
|
||||
return 1
|
||||
|
||||
local_hash = get_git_hash()
|
||||
|
||||
# Get storage pool path for display
|
||||
storage_path = None
|
||||
data = ix.run_json(["storage", "show", "default"], check=False)
|
||||
if data:
|
||||
storage_path = data.get("config", {}).get("source")
|
||||
msg = "Container status"
|
||||
if storage_path:
|
||||
msg += f": {storage_path}"
|
||||
out.section_line(msg)
|
||||
|
||||
dns_ip = None
|
||||
for c in containers:
|
||||
_print_container_status(out, c, ix, local_hash)
|
||||
if c["name"] == ix.get_dns_container().name:
|
||||
dns_ip = c["ip"]
|
||||
|
||||
out.section_line("Host ssh and DNS configuration")
|
||||
_print_ssh_status(out, ix)
|
||||
_print_dns_forwarding_status(out, dns_ip)
|
||||
return 0
|
||||
|
||||
|
||||
def _print_container_status(out, c, ix, local_hash):
|
||||
"""Print name/status, domain/IPs, and RAM for one container."""
|
||||
cname = c["name"]
|
||||
is_running = c.get("status") == "Running"
|
||||
ct = ix.get_container(cname)
|
||||
|
||||
# First line: name + running/STOPPED + deploy status
|
||||
if not is_running:
|
||||
tag = "STOPPED"
|
||||
elif not isinstance(ct, RelayContainer):
|
||||
tag = "running"
|
||||
else:
|
||||
tag = f"running {_deploy_status(ct, local_hash, ix)}"
|
||||
out.print(f"{cname:20s} {tag}")
|
||||
|
||||
# Second line: domain, IPv4, IPv6
|
||||
domain = c.get("domain", "")
|
||||
ip = c.get("ip") or "?"
|
||||
ipv6 = c.get("ipv6")
|
||||
out.print(f"{domain:20s} {_format_addrs(ip, ipv6)}")
|
||||
|
||||
# Third line: RAM (RSS), config
|
||||
detail_out = out.new_prefixed_out(" " * 21)
|
||||
try:
|
||||
used, total = ct.rss_mib()
|
||||
except Exception:
|
||||
ram_str = "RSS ?"
|
||||
else:
|
||||
ram_str = f"RSS {used}/{total} MiB ({used * 100 // total}%)"
|
||||
|
||||
if isinstance(ct, RelayContainer):
|
||||
detail = f"{ram_str}, config: {os.path.relpath(ct.ini)}"
|
||||
else:
|
||||
detail = ram_str
|
||||
|
||||
detail_out.print(detail)
|
||||
out.print()
|
||||
|
||||
|
||||
def _print_ssh_status(out, ix):
|
||||
"""Print SSH integration status."""
|
||||
ssh_cfg = ix.ssh_config_path
|
||||
if ix.check_ssh_include():
|
||||
out.green("SSH: ~/.ssh/config includes lxconfigs/ssh-config ✓")
|
||||
else:
|
||||
out.red("SSH: ~/.ssh/config does NOT include lxconfigs/ssh-config")
|
||||
sub = out.new_prefixed_out()
|
||||
sub.print("Add to ~/.ssh/config:")
|
||||
sub.print(f" Include {ssh_cfg}")
|
||||
|
||||
|
||||
def _print_dns_forwarding_status(out, dns_ip):
|
||||
"""Print host DNS forwarding status for .localchat."""
|
||||
sub = out.new_prefixed_out()
|
||||
if not dns_ip:
|
||||
out.red("DNS: ns-localchat container not found")
|
||||
return
|
||||
try:
|
||||
rv = shell("resolvectl status incusbr0")
|
||||
dns_ok = dns_ip in rv.stdout and "localchat" in rv.stdout
|
||||
except Exception:
|
||||
dns_ok = None
|
||||
if dns_ok is True:
|
||||
out.green(f"DNS: .localchat forwarding to {dns_ip} ✓")
|
||||
elif dns_ok is False:
|
||||
out.red("DNS: .localchat forwarding NOT configured")
|
||||
sub.print("Run:")
|
||||
sub.print(f" sudo resolvectl dns incusbr0 {dns_ip}")
|
||||
sub.print(" sudo resolvectl domain incusbr0 ~localchat")
|
||||
else:
|
||||
sub.print("DNS: .localchat forwarding status UNKNOWN")
|
||||
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Internal helpers
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
|
||||
def _format_addrs(ip, ipv6=None):
|
||||
parts = [f"IPv4 {ip}"]
|
||||
if ipv6:
|
||||
parts.append(f"IPv6 {ipv6}")
|
||||
return ", ".join(parts)
|
||||
|
||||
|
||||
def _deploy_status(ct, local_hash, ix):
|
||||
"""Return a human-readable deploy status string.
|
||||
|
||||
Compares the full deployed version (hash + diff) against
|
||||
the local state built by :func:`~cmdeploy.util.get_version_string`.
|
||||
"""
|
||||
deployed = ct.deployed_version()
|
||||
if deployed is None:
|
||||
return "NOT DEPLOYED"
|
||||
|
||||
# A container launched from the relay image has the same
|
||||
# git hash but a different domain — always redeploy.
|
||||
deployed_domain = ct.deployed_domain()
|
||||
if deployed_domain and deployed_domain != ct.domain:
|
||||
return f"DOMAIN-MISMATCH (deployed: {deployed_domain})"
|
||||
|
||||
deployed_lines = deployed.splitlines()
|
||||
deployed_hash = deployed_lines[0] if deployed_lines else ""
|
||||
short = deployed_hash[:12]
|
||||
|
||||
if not local_hash:
|
||||
return f"UNKNOWN (deployed: {short})"
|
||||
|
||||
local_short = local_hash[:12]
|
||||
if deployed_hash != local_hash:
|
||||
return f"STALE (deployed: {short}, local: {local_short})"
|
||||
|
||||
# Hash matches — check for uncommitted diffs
|
||||
local_version = get_version_string()
|
||||
if deployed != local_version:
|
||||
return f"DIRTY ({local_short}, undeployed changes)"
|
||||
|
||||
return f"IN-SYNC ({short})"
|
||||
|
||||
|
||||
def _add_name_args(parser, help_text):
|
||||
parser.add_argument("names", nargs="*", metavar="NAME", help=help_text)
|
||||
|
||||
|
||||
def _run_cmdeploy(subcmd, ct, ix, out, extra=None, **kwargs):
|
||||
"""Run ``cmdeploy <subcmd>`` with standard --config/--ssh flags.
|
||||
|
||||
*ct* is a Container (uses ``ct.ini`` and ``ct.domain``).
|
||||
Returns the subprocess exit code.
|
||||
"""
|
||||
extra_str = " ".join(extra) if extra else ""
|
||||
v_flag = " -" + "v" * out.verbosity if out.verbosity > 0 else ""
|
||||
cmd = f"""
|
||||
cmdeploy {subcmd}{v_flag}
|
||||
--config {ct.ini}
|
||||
--ssh-config {ix.ssh_config_path}
|
||||
--ssh-host {ct.domain}
|
||||
{extra_str}
|
||||
"""
|
||||
if "cwd" not in kwargs:
|
||||
kwargs["cwd"] = str(ix.project_root)
|
||||
return out.shell(cmd, **kwargs)
|
||||
@@ -1,768 +0,0 @@
|
||||
"""Core Incus operations for local chatmail LXC containers."""
|
||||
|
||||
import json
|
||||
import subprocess
|
||||
import textwrap
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
from ..util import shell
|
||||
|
||||
LABEL_KEY = "user.localchat-managed"
|
||||
SSH_KEY_NAME = "id_localchat"
|
||||
DOMAIN_SUFFIX = ".localchat"
|
||||
UPSTREAM_IMAGE = "images:debian/12"
|
||||
BASE_IMAGE_ALIAS = "localchat-base"
|
||||
BASE_SETUP_NAME = "localchat-base-setup"
|
||||
RELAY_IMAGE_ALIAS = "localchat-relay"
|
||||
|
||||
DNS_CONTAINER_NAME = "ns-localchat"
|
||||
DNS_DOMAIN = "ns.localchat"
|
||||
|
||||
|
||||
class DNSConfigurationError(Exception):
|
||||
"""Raised when the DNS container is not reachable or not answering."""
|
||||
|
||||
|
||||
def _extract_ip(net_data, family="inet"):
|
||||
"""Extract the first global-scope IP of *family* from network state data.
|
||||
|
||||
*net_data* is the ``state.network`` dict from ``incus list --format=json``.
|
||||
*family* is ``"inet"`` for IPv4 or ``"inet6"`` for IPv6.
|
||||
Returns the address string, or None.
|
||||
"""
|
||||
for iface_name, iface in net_data.items():
|
||||
if iface_name == "lo":
|
||||
continue
|
||||
for addr in iface.get("addresses", []):
|
||||
if addr["family"] == family and addr["scope"] == "global":
|
||||
return addr["address"]
|
||||
return None
|
||||
|
||||
|
||||
class Incus:
|
||||
"""Gateway for all Incus container operations.
|
||||
|
||||
Instantiated once per CLI command and passed around so that
|
||||
all modules share a single entry point for Incus interactions.
|
||||
"""
|
||||
|
||||
def __init__(self, out):
|
||||
self.out = out
|
||||
self.project_root = Path(__file__).resolve().parent.parent.parent.parent.parent
|
||||
self.lxconfigs_dir = self.project_root / "lxconfigs"
|
||||
self.lxconfigs_dir.mkdir(exist_ok=True)
|
||||
self.ssh_key_path = self.lxconfigs_dir / SSH_KEY_NAME
|
||||
if not self.ssh_key_path.exists():
|
||||
shell(
|
||||
f"ssh-keygen -t ed25519 -f {self.ssh_key_path} -N '' -C localchat",
|
||||
check=True,
|
||||
)
|
||||
self.ssh_config_path = self.lxconfigs_dir / "ssh-config"
|
||||
|
||||
def write_ssh_config(self):
|
||||
"""Write ``lxconfigs/ssh-config`` mapping all containers to their IPs.
|
||||
|
||||
Each Host block maps the container name, the domain name, and the
|
||||
short relay name (e.g. ``_test0``) to the container's IP, using the
|
||||
shared localchat SSH key. Returns the path to the file.
|
||||
"""
|
||||
containers = self.list_managed()
|
||||
key_path = self.ssh_key_path
|
||||
lines = ["# Auto-generated by cmdeploy lxc-start — do not edit\n"]
|
||||
for c in containers:
|
||||
hosts = [c["name"]]
|
||||
domain = c.get("domain", "")
|
||||
if domain and domain != c["name"]:
|
||||
hosts.append(domain)
|
||||
short = domain.split(".")[0]
|
||||
if short and short not in hosts:
|
||||
hosts.append(short)
|
||||
lines.append(f"\nHost {' '.join(hosts)}\n")
|
||||
lines.append(f" Hostname {c['ip']}\n")
|
||||
lines.append(" User root\n")
|
||||
lines.append(f" IdentityFile {key_path}\n")
|
||||
lines.append(" IdentitiesOnly yes\n")
|
||||
lines.append(" StrictHostKeyChecking accept-new\n")
|
||||
lines.append(" UserKnownHostsFile /dev/null\n")
|
||||
lines.append(" LogLevel ERROR\n")
|
||||
path = self.ssh_config_path
|
||||
path.write_text("".join(lines))
|
||||
return path
|
||||
|
||||
def check_ssh_include(self):
|
||||
"""Check if the user's ~/.ssh/config already includes our ssh-config."""
|
||||
user_ssh_config = Path.home() / ".ssh" / "config"
|
||||
if not user_ssh_config.exists():
|
||||
return False
|
||||
lines = user_ssh_config.read_text().splitlines()
|
||||
target = f"include {self.ssh_config_path}".lower()
|
||||
return any(line.strip().lower() == target for line in lines)
|
||||
|
||||
def get_host_nameservers(self):
|
||||
"""Return upstream nameservers found on the host."""
|
||||
ns = []
|
||||
for path in ["/run/systemd/resolve/resolv.conf", "/etc/resolv.conf"]:
|
||||
p = Path(path)
|
||||
if p.exists():
|
||||
for line in p.read_text().splitlines():
|
||||
if line.strip().startswith("nameserver "):
|
||||
addr = line.split()[1]
|
||||
if addr not in ("127.0.0.1", "127.0.0.53", "::1"):
|
||||
if addr not in ns:
|
||||
ns.append(addr)
|
||||
if ns:
|
||||
break
|
||||
return ns
|
||||
|
||||
def run(self, args, check=True, capture=True, input=None):
|
||||
"""Run an incus command.
|
||||
|
||||
When *capture* is True and *verbosity* >= 1, output is streamed
|
||||
to the terminal line-by-line while also being captured for
|
||||
later return via result.stdout.
|
||||
"""
|
||||
cmd = ["incus", "--quiet"] + list(args)
|
||||
sub = self.out.new_prefixed_out(" ")
|
||||
|
||||
if not capture:
|
||||
# Simple case: let subprocess handle streams (no capture)
|
||||
if self.out.verbosity >= 1:
|
||||
sub.print(f"$ {' '.join(cmd)}")
|
||||
return subprocess.run(
|
||||
cmd, text=True, input=input, check=check, stdout=None, stderr=None
|
||||
)
|
||||
|
||||
# Capture case: we may need to stream while capturing
|
||||
if sub.verbosity >= 1:
|
||||
cmd_lines = " ".join(cmd).splitlines()
|
||||
sub.print(f"$ {cmd_lines.pop(0)}")
|
||||
if sub.verbosity >= 2:
|
||||
for line in cmd_lines:
|
||||
sub.print(f" {line}")
|
||||
|
||||
proc = subprocess.Popen(
|
||||
cmd,
|
||||
text=True,
|
||||
stdin=subprocess.PIPE if input else subprocess.DEVNULL,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
|
||||
stdout_lines = []
|
||||
if input:
|
||||
proc.stdin.write(input)
|
||||
proc.stdin.close()
|
||||
|
||||
for line in proc.stdout:
|
||||
stdout_lines.append(line)
|
||||
if sub.verbosity >= 2:
|
||||
sub.print(f" > {line.rstrip()}")
|
||||
|
||||
stderr = proc.stderr.read()
|
||||
ret = proc.wait()
|
||||
stdout = "".join(stdout_lines)
|
||||
if check and ret != 0:
|
||||
full_output = stdout + stderr
|
||||
for line in full_output.splitlines():
|
||||
if sub.verbosity < 1: # and we haven't printed it yet
|
||||
sub.red(line)
|
||||
raise subprocess.CalledProcessError(ret, cmd, output=stdout, stderr=stderr)
|
||||
|
||||
return subprocess.CompletedProcess(cmd, ret, stdout=stdout, stderr=stderr)
|
||||
|
||||
def run_json(self, args, check=True):
|
||||
"""Run an incus command with ``--format=json``.
|
||||
|
||||
Returns the parsed JSON on success.
|
||||
When *check* is True raises ``subprocess.CalledProcessError``
|
||||
on non-zero exit; when False returns *None* instead.
|
||||
"""
|
||||
result = self.run(
|
||||
list(args) + ["--format=json"],
|
||||
check=check,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
return None
|
||||
return json.loads(result.stdout)
|
||||
|
||||
def run_output(self, args, check=True):
|
||||
"""Run an incus command and return its stripped stdout.
|
||||
|
||||
When *check* is False, returns *None* on non-zero exit
|
||||
instead of raising.
|
||||
"""
|
||||
result = self.run(args, check=check)
|
||||
if result.returncode != 0:
|
||||
return None
|
||||
return result.stdout.strip()
|
||||
|
||||
def find_image(self, aliases):
|
||||
"""Return the first alias from *aliases* that exists, else None."""
|
||||
images = self.run_json(["image", "list"], check=False) or []
|
||||
existing = {a.get("name") for img in images for a in img.get("aliases", [])}
|
||||
for alias in aliases:
|
||||
if alias in existing:
|
||||
return alias
|
||||
return None
|
||||
|
||||
def delete_images(self):
|
||||
"""Delete the cached base and relay images."""
|
||||
for alias in (RELAY_IMAGE_ALIAS, BASE_IMAGE_ALIAS):
|
||||
self.run(["image", "delete", alias], check=False) # ok if absent
|
||||
|
||||
def list_managed(self):
|
||||
"""Return list of dicts with name, ip, ipv6, domain, status, memory_usage."""
|
||||
containers = []
|
||||
for ct in self.run_json(["list"]):
|
||||
config = ct.get("config", {})
|
||||
if config.get(LABEL_KEY) != "true":
|
||||
continue
|
||||
name = ct["name"]
|
||||
state = ct.get("state", {})
|
||||
net = state.get("network") or {}
|
||||
containers.append(
|
||||
{
|
||||
"name": name,
|
||||
"ip": _extract_ip(net, "inet"),
|
||||
"ipv6": _extract_ip(net, "inet6"),
|
||||
"domain": config.get(
|
||||
"user.localchat-domain", f"{name}{DOMAIN_SUFFIX}"
|
||||
),
|
||||
"status": ct.get("status", "Unknown"),
|
||||
"memory_usage": state.get("memory", {}).get("usage", 0),
|
||||
}
|
||||
)
|
||||
return containers
|
||||
|
||||
def ensure_base_image(self):
|
||||
"""Build and cache a base image with openssh and the SSH key.
|
||||
|
||||
The image is published as a local incus image with alias
|
||||
'localchat-base'. Subsequent container launches use this
|
||||
image instead of the upstream Debian 12, skipping the
|
||||
slow apt-get install step.
|
||||
Returns the image alias.
|
||||
"""
|
||||
if self.find_image([BASE_IMAGE_ALIAS]):
|
||||
self.out.print(f" Base image '{BASE_IMAGE_ALIAS}' already cached.")
|
||||
return BASE_IMAGE_ALIAS
|
||||
|
||||
self.out.print(" Building base image (one-time setup) ...")
|
||||
|
||||
self.run(["delete", BASE_SETUP_NAME, "--force"], check=False)
|
||||
self.run(["image", "delete", BASE_IMAGE_ALIAS], check=False)
|
||||
self.run(["launch", UPSTREAM_IMAGE, BASE_SETUP_NAME])
|
||||
|
||||
ct = Container(self, BASE_SETUP_NAME)
|
||||
ct.wait_ready()
|
||||
|
||||
key_path = self.ssh_key_path
|
||||
pub_key = key_path.with_suffix(".pub").read_text().strip()
|
||||
host_ns = self.get_host_nameservers()
|
||||
ns_lines = "\n".join(f"nameserver {n}" for n in host_ns)
|
||||
ct.bash(f"""
|
||||
printf '{ns_lines}\n' > /etc/resolv.conf
|
||||
apt-get -o DPkg::Lock::Timeout=60 update
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y openssh-server python3
|
||||
systemctl enable ssh
|
||||
apt-get clean
|
||||
mkdir -p /root/.ssh
|
||||
chmod 700 /root/.ssh
|
||||
echo '{pub_key}' > /root/.ssh/authorized_keys
|
||||
chmod 600 /root/.ssh/authorized_keys
|
||||
""")
|
||||
|
||||
self.run(["stop", BASE_SETUP_NAME])
|
||||
self.run(["publish", BASE_SETUP_NAME, f"--alias={BASE_IMAGE_ALIAS}"])
|
||||
self.run(["delete", BASE_SETUP_NAME, "--force"])
|
||||
self.out.print(f" Base image '{BASE_IMAGE_ALIAS}' ready.")
|
||||
return BASE_IMAGE_ALIAS
|
||||
|
||||
def get_container(self, name):
|
||||
"""Return a container handle for the given name.
|
||||
|
||||
Accepts both short relay names (``test0``) and full Incus
|
||||
container names (``test0-localchat``). Returns
|
||||
``DNSContainer`` for the DNS container and
|
||||
``RelayContainer`` for everything else.
|
||||
"""
|
||||
if name == DNS_CONTAINER_NAME:
|
||||
return DNSContainer(self)
|
||||
return RelayContainer(self, name.removesuffix("-localchat"))
|
||||
|
||||
def get_dns_container(self):
|
||||
"""Return a DNSContainer handle."""
|
||||
return DNSContainer(self)
|
||||
|
||||
|
||||
class Container:
|
||||
"""The base container handle wraps all interactions with incus."""
|
||||
|
||||
def __init__(self, incus, name, domain=None):
|
||||
self.incus = incus
|
||||
self.out = incus.out
|
||||
self.name = name
|
||||
self.domain = domain or f"{name}{DOMAIN_SUFFIX}"
|
||||
self.ipv4 = None
|
||||
self.ipv6 = None
|
||||
|
||||
def bash(self, script, check=True):
|
||||
"""Returns stdout from executing ``bash -ec <script>`` inside this container.
|
||||
|
||||
*script* is dedented and stripped so callers can use triple-quoted strings.
|
||||
When *check* is False, returns *None* on non-zero exit instead of raising.
|
||||
"""
|
||||
script = textwrap.dedent(script).strip()
|
||||
cmd = ["exec", self.name, "--", "bash", "-ec", script]
|
||||
return self.incus.run_output(cmd, check=check)
|
||||
|
||||
def run_cmd(self, *args, check=True):
|
||||
"""Return stdout from running a command directly in the container (no shell).
|
||||
|
||||
When *check* is False, returns *None* on non-zero exit instead of raising.
|
||||
"""
|
||||
return self.incus.run_output(
|
||||
["exec", self.name, "--", *args],
|
||||
check=check,
|
||||
)
|
||||
|
||||
def start(self):
|
||||
self.incus.run(["start", self.name])
|
||||
|
||||
def stop(self, force=False):
|
||||
cmd = ["stop", self.name]
|
||||
if force:
|
||||
cmd.append("--force")
|
||||
self.incus.run(cmd, check=False)
|
||||
|
||||
def launch(self):
|
||||
"""Launch from the best available image, return the alias used."""
|
||||
image = self.incus.find_image([RELAY_IMAGE_ALIAS, BASE_IMAGE_ALIAS])
|
||||
if not image:
|
||||
raise RuntimeError(
|
||||
f"No base image '{BASE_IMAGE_ALIAS}' found. "
|
||||
"Call ensure_base_image() before launching containers."
|
||||
)
|
||||
self.out.print(f" Launching from '{image}' image ...")
|
||||
cfg = []
|
||||
cfg += ("-c", f"{LABEL_KEY}=true")
|
||||
cfg += ("-c", f"user.localchat-domain={self.domain}")
|
||||
self.incus.run(["launch", image, self.name, *cfg])
|
||||
return image
|
||||
|
||||
def ensure(self):
|
||||
"""Create/start this container from the cached base image.
|
||||
|
||||
On first call, builds the base image (~30s).
|
||||
Subsequent containers launch in ~2s from the cached image.
|
||||
Returns ``self`` for chaining.
|
||||
"""
|
||||
data = self.incus.run_json(["list", self.name], check=False) or []
|
||||
|
||||
existing = [c for c in data if c["name"] == self.name]
|
||||
if existing:
|
||||
if existing[0]["status"] != "Running":
|
||||
self.start()
|
||||
else:
|
||||
self.launch()
|
||||
self.wait_ready()
|
||||
return self
|
||||
|
||||
def destroy(self):
|
||||
"""Stop, delete, and clean up config files."""
|
||||
self.stop(force=True)
|
||||
self.incus.run(["delete", self.name, "--force"], check=False)
|
||||
|
||||
def push_file_content(self, dest_path, content):
|
||||
"""Write *content* to *dest_path* inside the container.
|
||||
|
||||
*content* is dedented and stripped so callers can use
|
||||
indented triple-quoted strings.
|
||||
"""
|
||||
content = textwrap.dedent(content).strip() + "\n"
|
||||
self.incus.run(
|
||||
["file", "push", "-", f"{self.name}{dest_path}"],
|
||||
input=content,
|
||||
)
|
||||
self.bash(f"chmod 644 {dest_path}")
|
||||
|
||||
def wait_ready(self, timeout=60):
|
||||
"""Wait until the container is running with an IPv4 address.
|
||||
|
||||
Sets ``self.ipv4`` and ``self.ipv6`` (may be *None*),
|
||||
or raises ``TimeoutError``.
|
||||
"""
|
||||
deadline = time.time() + timeout
|
||||
while time.time() < deadline:
|
||||
data = self.incus.run_json(
|
||||
["list", self.name],
|
||||
check=False,
|
||||
)
|
||||
if data and data[0].get("status") == "Running":
|
||||
net = data[0].get("state", {}).get("network", {})
|
||||
self.ipv4 = _extract_ip(net, "inet")
|
||||
self.ipv6 = _extract_ip(net, "inet6")
|
||||
if self.ipv4:
|
||||
return
|
||||
time.sleep(1)
|
||||
raise TimeoutError(
|
||||
f"Container {self.name!r} did not become ready within {timeout}s"
|
||||
)
|
||||
|
||||
def rss_mib(self):
|
||||
"""Return ``(used, total)`` memory from container (or None if unobtainable)."""
|
||||
output = self.bash("free -m", check=False)
|
||||
if output:
|
||||
for line in output.splitlines():
|
||||
if line.startswith("Mem:"):
|
||||
parts = line.split()
|
||||
return int(parts[2]), int(parts[1])
|
||||
|
||||
|
||||
class RelayContainer(Container):
|
||||
"""Container handle for a chatmail relay.
|
||||
|
||||
Accepts the short relay name (e.g. ``test0``) and derives
|
||||
the Incus container name and mail domain automatically.
|
||||
"""
|
||||
|
||||
def __init__(self, incus, name):
|
||||
super().__init__(
|
||||
incus,
|
||||
f"{name}-localchat",
|
||||
domain=f"_{name}{DOMAIN_SUFFIX}",
|
||||
)
|
||||
self.sname = name
|
||||
self.ini = incus.lxconfigs_dir / f"chatmail-{name}.ini"
|
||||
self.zone = incus.lxconfigs_dir / f"{name}.zone"
|
||||
|
||||
def launch(self):
|
||||
"""Launch (from a potentially cached image) and clear inherited chatmail-version."""
|
||||
image = super().launch()
|
||||
self.bash("rm -f /etc/chatmail-version")
|
||||
return image
|
||||
|
||||
def destroy(self):
|
||||
"""Stop, delete, and clean up config files."""
|
||||
super().destroy()
|
||||
if self.ini.exists():
|
||||
self.ini.unlink()
|
||||
|
||||
def disable_ipv6(self):
|
||||
"""Disable IPv6 inside the container via sysctl."""
|
||||
# incus provides net.* virtualization for LXC containers so that
|
||||
# these sysctls only affect the container's network namespace.
|
||||
self.bash("""
|
||||
sysctl -w net.ipv6.conf.all.disable_ipv6=1
|
||||
sysctl -w net.ipv6.conf.default.disable_ipv6=1
|
||||
""")
|
||||
self.push_file_content(
|
||||
"/etc/sysctl.d/99-disable-ipv6.conf",
|
||||
"""
|
||||
net.ipv6.conf.all.disable_ipv6=1
|
||||
net.ipv6.conf.default.disable_ipv6=1
|
||||
""",
|
||||
)
|
||||
|
||||
def configure_hosts(self, ip):
|
||||
"""Set hostname and /etc/hosts inside the container."""
|
||||
self.bash(f"""
|
||||
echo '{self.name}' > /etc/hostname
|
||||
hostname {self.name}
|
||||
sed -i '/ {self.domain}$/d' /etc/hosts
|
||||
echo '{ip} {self.name} {self.domain}' >> /etc/hosts
|
||||
""")
|
||||
|
||||
def publish_as_relay_image(self):
|
||||
"""Publish this container as a reusable relay image.
|
||||
|
||||
Stops the container, 'publishes' it as 'localchat-relay', then restarts it.
|
||||
"""
|
||||
if self.incus.find_image([RELAY_IMAGE_ALIAS]):
|
||||
return
|
||||
self.out.print(
|
||||
f" Locally caching {self.name!r} as '{RELAY_IMAGE_ALIAS}' image ..."
|
||||
)
|
||||
self.incus.run(
|
||||
["publish", self.name, f"--alias={RELAY_IMAGE_ALIAS}", "--force"]
|
||||
)
|
||||
self.wait_ready()
|
||||
self.out.print(f" Relay image '{RELAY_IMAGE_ALIAS}' ready.")
|
||||
|
||||
def deployed_version(self):
|
||||
"""Read /etc/chatmail-version, or None if absent."""
|
||||
return self.bash("cat /etc/chatmail-version", check=False)
|
||||
|
||||
def deployed_domain(self):
|
||||
"""Read the domain deployed on the container (postfix myhostname)."""
|
||||
return self.bash(
|
||||
"postconf -h myhostname 2>/dev/null",
|
||||
check=False,
|
||||
)
|
||||
|
||||
def verify_ssh(self, ssh_config):
|
||||
"""Verify SSH connectivity to this container."""
|
||||
cmd = f"ssh -F {ssh_config} -o ConnectTimeout=60 root@{self.domain} hostname"
|
||||
return shell(cmd, timeout=60).returncode == 0
|
||||
|
||||
def configure_dns(self, dns_ip):
|
||||
"""Point this container's resolver at *dns_ip* and verify DNS is reachable."""
|
||||
self.bash(f"""
|
||||
systemctl disable --now systemd-resolved 2>/dev/null || true
|
||||
rm -f /etc/resolv.conf
|
||||
printf 'nameserver {dns_ip}\\n' >/etc/resolv.conf
|
||||
mkdir -p /etc/unbound/unbound.conf.d
|
||||
""")
|
||||
self.push_file_content(
|
||||
"/etc/unbound/unbound.conf.d/localchat-forward.conf",
|
||||
f"""
|
||||
server:
|
||||
domain-insecure: "localchat"
|
||||
|
||||
forward-zone:
|
||||
name: "localchat"
|
||||
forward-addr: {dns_ip}
|
||||
""",
|
||||
)
|
||||
self.bash("systemctl restart unbound 2>/dev/null || true")
|
||||
self._wait_dns_reachable(dns_ip)
|
||||
|
||||
def _wait_dns_reachable(self, dns_ip, timeout=10):
|
||||
"""Poll until *dns_ip* answers a DNS query from this container."""
|
||||
if self.bash("which dig", check=False) is None:
|
||||
self.bash(
|
||||
"DEBIAN_FRONTEND=noninteractive "
|
||||
"apt-get install -y dnsutils 2>/dev/null || true"
|
||||
)
|
||||
deadline = time.time() + timeout
|
||||
while time.time() < deadline:
|
||||
result = self.bash(
|
||||
f"dig @{dns_ip} . SOA +short +time=1 +tries=1",
|
||||
check=False,
|
||||
)
|
||||
if result and result.strip():
|
||||
return
|
||||
time.sleep(0.5)
|
||||
raise DNSConfigurationError(
|
||||
f"DNS at {dns_ip} not reachable from {self.name} after {timeout}s"
|
||||
)
|
||||
|
||||
def write_ini(self, disable_ipv6=False):
|
||||
"""Generate a chatmail.ini config file in lxconfigs/."""
|
||||
from chatmaild.config import write_initial_config
|
||||
|
||||
overrides = {
|
||||
"max_user_send_per_minute": 600,
|
||||
"max_user_send_burst_size": 100,
|
||||
"mtail_address": "127.0.0.1",
|
||||
}
|
||||
if disable_ipv6:
|
||||
overrides["disable_ipv6"] = "True"
|
||||
write_initial_config(self.ini, self.domain, overrides)
|
||||
return self.ini
|
||||
|
||||
|
||||
class DNSContainer(Container):
|
||||
"""Container handle for the PowerDNS name server.
|
||||
|
||||
Manages the authoritative and recursive DNS services required for
|
||||
name resolution in the local testing environment.
|
||||
"""
|
||||
|
||||
def __init__(self, incus):
|
||||
super().__init__(incus, DNS_CONTAINER_NAME, domain=DNS_DOMAIN)
|
||||
|
||||
def pdnsutil(self, *args, check=True):
|
||||
"""Run ``pdnsutil <args>`` inside the DNS container."""
|
||||
return self.run_cmd("pdnsutil", *args, check=check)
|
||||
|
||||
def replace_rrset(self, zone, name, rtype, ttl, rdata):
|
||||
"""Shortcut for ``pdnsutil replace-rrset``."""
|
||||
self.pdnsutil("replace-rrset", zone, name, rtype, ttl, rdata)
|
||||
|
||||
def restart_services(self):
|
||||
"""Restart pdns and pdns-recursor, then wait until DNS is answering."""
|
||||
self.bash("""
|
||||
systemctl restart pdns
|
||||
systemctl restart pdns-recursor || true
|
||||
""")
|
||||
self._wait_dns_ready()
|
||||
|
||||
def _wait_dns_ready(self, timeout=60):
|
||||
"""Poll until the recursor answers a query on port 53."""
|
||||
deadline = time.time() + timeout
|
||||
while time.time() < deadline:
|
||||
result = self.bash(
|
||||
"dig @127.0.0.1 . SOA +short +time=1 +tries=1",
|
||||
check=False,
|
||||
)
|
||||
if result and result.strip():
|
||||
return
|
||||
time.sleep(0.5)
|
||||
raise DNSConfigurationError(f"DNS recursor not answering after {timeout}s")
|
||||
|
||||
def ensure(self):
|
||||
"""Create the DNS container with PowerDNS if needed.
|
||||
|
||||
Calls ``super().ensure()`` to create/start the container
|
||||
and set up SSH, then installs PowerDNS and configures
|
||||
the Incus bridge to use this container as DNS.
|
||||
"""
|
||||
super().ensure()
|
||||
self._install_powerdns()
|
||||
self.incus.run(
|
||||
["network", "set", "incusbr0", "dns.mode=none"],
|
||||
check=False,
|
||||
)
|
||||
self.incus.run(
|
||||
["network", "set", "incusbr0", f"raw.dnsmasq=dhcp-option=6,{self.ipv4}"],
|
||||
check=False,
|
||||
)
|
||||
|
||||
def destroy(self):
|
||||
"""Stop, delete, and reset bridge DNS config."""
|
||||
super().destroy()
|
||||
self.incus.run(["network", "unset", "incusbr0", "dns.mode"], check=False)
|
||||
self.incus.run(["network", "unset", "incusbr0", "raw.dnsmasq"], check=False)
|
||||
|
||||
def _install_powerdns(self):
|
||||
"""Install and configure PowerDNS if not already present."""
|
||||
if self.run_cmd("which", "pdns_server", check=False) is not None:
|
||||
return
|
||||
|
||||
host_ns = self.incus.get_host_nameservers()
|
||||
ns_lines = "\n".join(f"nameserver {n}" for n in host_ns)
|
||||
|
||||
self.bash(f"""
|
||||
systemctl disable --now systemd-resolved 2>/dev/null || true
|
||||
rm -f /etc/resolv.conf
|
||||
printf '{ns_lines}\n' > /etc/resolv.conf
|
||||
|
||||
# Block automatic service startup during package installation
|
||||
printf '#!/bin/sh\\nexit 101\\n' > /usr/sbin/policy-rc.d
|
||||
chmod +x /usr/sbin/policy-rc.d
|
||||
|
||||
apt-get -o DPkg::Lock::Timeout=60 update
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
pdns-server pdns-backend-sqlite3 sqlite3 pdns-recursor dnsutils
|
||||
|
||||
# Remove the startup block
|
||||
rm /usr/sbin/policy-rc.d
|
||||
|
||||
systemctl stop pdns pdns-recursor || true
|
||||
mkdir -p /var/lib/powerdns
|
||||
sqlite3 /var/lib/powerdns/pdns.sqlite3 \
|
||||
</usr/share/doc/pdns-backend-sqlite3/schema.sqlite3.sql
|
||||
chown -R pdns:pdns /var/lib/powerdns
|
||||
""")
|
||||
|
||||
self.push_file_content(
|
||||
"/etc/powerdns/pdns.conf",
|
||||
"""
|
||||
launch=gsqlite3
|
||||
gsqlite3-database=/var/lib/powerdns/pdns.sqlite3
|
||||
local-address=127.0.0.1
|
||||
local-port=5353
|
||||
""",
|
||||
)
|
||||
|
||||
self.push_file_content(
|
||||
"/etc/powerdns/recursor.conf",
|
||||
"""
|
||||
local-address=0.0.0.0
|
||||
local-port=53
|
||||
forward-zones=localchat=127.0.0.1:5353
|
||||
allow-from=0.0.0.0/0
|
||||
dont-query=
|
||||
dnssec=off
|
||||
""",
|
||||
)
|
||||
|
||||
self.bash("""
|
||||
systemctl start pdns
|
||||
systemctl start pdns-recursor
|
||||
echo 'nameserver 127.0.0.1' > /etc/resolv.conf
|
||||
""")
|
||||
self._wait_dns_ready()
|
||||
|
||||
def reset_dns_records(self, dns_ip, domains):
|
||||
"""Create DNS zones with initial A records via pdnsutil.
|
||||
|
||||
Only sets SOA, NS, and A records as the minimal set
|
||||
needed for SSH connectivity. Full records (MX, TXT, SRV,
|
||||
CNAME, DKIM) are added later by ``cmdeploy dns``.
|
||||
|
||||
Args:
|
||||
dns_ip: IP of the DNS container
|
||||
domains: list of dicts with 'name', 'domain', 'ip'
|
||||
"""
|
||||
for d in domains:
|
||||
domain = d["domain"]
|
||||
ip = d["ip"]
|
||||
self.out.print(f" {domain} -> {ip}")
|
||||
|
||||
# Delete and recreate zone fresh (removes stale records)
|
||||
self.pdnsutil("delete-zone", domain, check=False)
|
||||
self.pdnsutil("create-zone", domain, f"ns.{domain}")
|
||||
|
||||
serial = str(int(time.time()))
|
||||
soa = f"ns.{domain} hostmaster.{domain} {serial} 3600 900 604800 300"
|
||||
self.replace_rrset(domain, ".", "SOA", "3600", soa)
|
||||
self.replace_rrset(domain, ".", "NS", "3600", f"ns.{domain}.")
|
||||
self.replace_rrset(domain, ".", "A", "3600", ip)
|
||||
self.replace_rrset(domain, "ns", "A", "3600", dns_ip)
|
||||
|
||||
# AAAA (domain -> container IPv6, if available)
|
||||
ipv6 = d.get("ipv6")
|
||||
if ipv6:
|
||||
self.replace_rrset(domain, ".", "AAAA", "3600", ipv6)
|
||||
self.out.print(f" zone reset: SOA, NS, A, AAAA ({ip}, {ipv6})")
|
||||
else:
|
||||
# Remove any stale AAAA record
|
||||
self.pdnsutil("delete-rrset", domain, ".", "AAAA", check=False)
|
||||
self.out.print(f" zone reset: SOA, NS, A ({ip}, IPv4-only)")
|
||||
|
||||
self.restart_services()
|
||||
|
||||
def set_dns_records(self, text):
|
||||
"""Add or overwrite DNS records from standard BIND format.
|
||||
|
||||
Uses ``cmdeploy.dns.parse_zone_records`` to parse.
|
||||
Zones are created automatically from the record names.
|
||||
"""
|
||||
from ..dns import parse_zone_records
|
||||
|
||||
zones_seen = set()
|
||||
|
||||
for name, ttl, rtype, rdata in parse_zone_records(text):
|
||||
# Derive zone from name: find top-level .localchat domain
|
||||
name_parts = name.split(".")
|
||||
zone = name # fallback
|
||||
for i in range(len(name_parts) - 1):
|
||||
if name_parts[i + 1 :] == ["localchat"]:
|
||||
zone = ".".join(name_parts[i:])
|
||||
break
|
||||
|
||||
# Create zone if first time seeing it
|
||||
if zone not in zones_seen:
|
||||
self.pdnsutil(
|
||||
"create-zone",
|
||||
zone,
|
||||
f"ns.{zone}",
|
||||
check=False,
|
||||
)
|
||||
zones_seen.add(zone)
|
||||
|
||||
# Figure out the record name relative to zone
|
||||
if name == zone:
|
||||
relative = "."
|
||||
elif name.endswith(f".{zone}"):
|
||||
relative = name[: -(len(zone) + 1)]
|
||||
else:
|
||||
relative = name
|
||||
|
||||
self.replace_rrset(zone, relative, rtype, ttl, rdata)
|
||||
|
||||
if zones_seen:
|
||||
self.restart_services()
|
||||
@@ -78,3 +78,11 @@ counter rejected_unencrypted_mail_count
|
||||
/Rejected unencrypted mail/ {
|
||||
rejected_unencrypted_mail_count++
|
||||
}
|
||||
|
||||
counter quota_expire_runs
|
||||
counter quota_expire_removed_files
|
||||
|
||||
/quota-expire: removed (?P<count>\d+) message\(s\)/ {
|
||||
quota_expire_runs++
|
||||
quota_expire_removed_files += $count
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
[Unit]
|
||||
Description=mtail
|
||||
After=multi-user.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
|
||||
@@ -73,6 +73,10 @@ http {
|
||||
|
||||
access_log syslog:server=unix:/dev/log,facility=local7;
|
||||
|
||||
location /mxdeliv {
|
||||
proxy_pass http://127.0.0.1:{{ config.filtermail_http_port_incoming }};
|
||||
}
|
||||
|
||||
location / {
|
||||
# First attempt to serve request as file, then
|
||||
# as directory, then fall back to displaying a 404.
|
||||
|
||||
@@ -54,14 +54,16 @@ smtpd_tls_exclude_ciphers = aNULL, RC4, MD5, DES
|
||||
tls_preempt_cipherlist = yes
|
||||
|
||||
smtpd_relay_restrictions = permit_mynetworks permit_sasl_authenticated defer_unauth_destination
|
||||
myhostname = {{ config.mail_domain }}
|
||||
myhostname = {{ config.postfix_myhostname }}
|
||||
alias_maps = hash:/etc/aliases
|
||||
alias_database = hash:/etc/aliases
|
||||
|
||||
# Postfix does not deliver mail for any domain by itself.
|
||||
# Primary domain is listed in `virtual_mailbox_domains` instead
|
||||
# and handed over to Dovecot.
|
||||
mydestination =
|
||||
# When postfix receives mail for $mydestination,
|
||||
# it hands it over to dovecot via $local_transport.
|
||||
mydestination = {{ config.mail_domain }}
|
||||
local_transport = lmtp:unix:private/dovecot-lmtp
|
||||
# postfix doesn't check whether local users exist or not:
|
||||
local_recipient_maps =
|
||||
|
||||
relayhost =
|
||||
{% if disable_ipv6 %}
|
||||
@@ -69,15 +71,6 @@ mynetworks = 127.0.0.0/8
|
||||
{% else %}
|
||||
mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128
|
||||
{% endif %}
|
||||
{% if config.addr_v4 %}
|
||||
smtp_bind_address = {{ config.addr_v4 }}
|
||||
{% endif %}
|
||||
{% if config.addr_v6 %}
|
||||
smtp_bind_address6 = {{ config.addr_v6 }}
|
||||
{% endif %}
|
||||
{% if config.addr_v4 or config.addr_v6 %}
|
||||
smtp_bind_address_enforce = yes
|
||||
{% endif %}
|
||||
mailbox_size_limit = 0
|
||||
message_size_limit = {{config.max_message_size}}
|
||||
recipient_delimiter = +
|
||||
@@ -88,24 +81,6 @@ inet_protocols = ipv4
|
||||
inet_protocols = all
|
||||
{% endif %}
|
||||
|
||||
# Postfix does not try IPv4 and IPv6 connections
|
||||
# concurrently as of version 3.7.11.
|
||||
#
|
||||
# When relay has both A (IPv4) and AAAA (IPv6) records,
|
||||
# but broken IPv6 connectivity,
|
||||
# every second message is delayed by the connection timeout
|
||||
# <https://www.postfix.org/postconf.5.html#smtp_connect_timeout>
|
||||
# which defaults to 30 seconds. Reducing timeouts is not a solution
|
||||
# as this will result in a failure to connect to slow servers.
|
||||
#
|
||||
# As a workaround we always prefer IPv4 when it is available.
|
||||
#
|
||||
# The setting is documented at
|
||||
# <https://www.postfix.org/postconf.5.html#smtp_address_preference>
|
||||
smtp_address_preference=ipv4
|
||||
|
||||
virtual_transport = lmtp:unix:private/dovecot-lmtp
|
||||
virtual_mailbox_domains = {{ config.mail_domain }}
|
||||
lmtp_header_checks = regexp:/etc/postfix/lmtp_header_cleanup
|
||||
|
||||
mua_client_restrictions = permit_sasl_authenticated, reject
|
||||
@@ -118,3 +93,12 @@ smtpd_sender_login_maps = regexp:/etc/postfix/login_map
|
||||
# Do not lookup SMTP client hostnames to reduce delays
|
||||
# and avoid unnecessary DNS requests.
|
||||
smtpd_peername_lookup = no
|
||||
|
||||
# Use filtermail-transport to relay messages.
|
||||
# We can't force postfix to split messages per destination,
|
||||
# when specifying a custom next-hop,
|
||||
# so instead this is handled in filtermail.
|
||||
# We use LMTP instead SMTP so we can communicate per-recipient errors back to postfix.
|
||||
default_transport = lmtp-filtermail:inet:[127.0.0.1]:{{ config.filtermail_lmtp_port_transport }}
|
||||
lmtp-filtermail_initial_destination_concurrency=10000
|
||||
lmtp-filtermail_destination_concurrency_limit=10000
|
||||
|
||||
@@ -80,8 +80,9 @@ filter unix - n n - - lmtp
|
||||
127.0.0.1:{{ config.postfix_reinject_port }} inet n - n - 100 smtpd
|
||||
-o syslog_name=postfix/reinject
|
||||
-o milter_macro_daemon_name=ORIGINATING
|
||||
-o smtpd_milters=unix:opendkim/opendkim.sock
|
||||
-o cleanup_service_name=authclean
|
||||
{% if not config.ipv4_relay %} -o smtpd_milters=unix:opendkim/opendkim.sock
|
||||
{% endif %}
|
||||
|
||||
# Local SMTP server for reinjecting incoming filtered mail
|
||||
127.0.0.1:{{ config.postfix_reinject_port_incoming }} inet n - n - 100 smtpd
|
||||
@@ -100,3 +101,8 @@ filter unix - n n - - lmtp
|
||||
# cannot send unprotected Subject.
|
||||
authclean unix n - - - 0 cleanup
|
||||
-o header_checks=regexp:/etc/postfix/submission_header_cleanup
|
||||
|
||||
lmtp-filtermail unix - - y - 10000 lmtp
|
||||
-o syslog_name=postfix/lmtp-filtermail
|
||||
-o lmtp_header_checks=
|
||||
-o lmtp_tls_security_level=none
|
||||
|
||||
@@ -95,11 +95,9 @@ def check_zonefile(zonefile, verbose=True):
|
||||
if not zf_line.strip() or zf_line.startswith(";"):
|
||||
continue
|
||||
print(f"dns-checking {zf_line!r}") if verbose else log_progress("")
|
||||
parts = zf_line.split(None, 4)
|
||||
zf_domain = parts[0].rstrip(".")
|
||||
# parts[1]=TTL, parts[2]=IN, parts[3]=type, parts[4]=rdata
|
||||
zf_typ = parts[3]
|
||||
zf_value = parts[4].strip()
|
||||
zf_domain, _ttl, _in, zf_typ, zf_value = zf_line.split(None, 4)
|
||||
zf_domain = zf_domain.rstrip(".")
|
||||
zf_value = zf_value.strip()
|
||||
query_value = query_dns(zf_typ, zf_domain)
|
||||
if zf_value != query_value:
|
||||
assert zf_typ in ("A", "AAAA", "CNAME", "CAA", "SRV", "MX", "TXT"), zf_line
|
||||
|
||||
@@ -12,27 +12,15 @@ def openssl_selfsigned_args(domain, cert_path, key_path, days=36500):
|
||||
``www.<domain>`` and ``mta-sts.<domain>``.
|
||||
"""
|
||||
return [
|
||||
"openssl",
|
||||
"req",
|
||||
"-x509",
|
||||
"-newkey",
|
||||
"ec",
|
||||
"-pkeyopt",
|
||||
"ec_paramgen_curve:P-256",
|
||||
"-noenc",
|
||||
"-days",
|
||||
str(days),
|
||||
"-keyout",
|
||||
str(key_path),
|
||||
"-out",
|
||||
str(cert_path),
|
||||
"-subj",
|
||||
f"/CN={domain}",
|
||||
"openssl", "req", "-x509",
|
||||
"-newkey", "ec", "-pkeyopt", "ec_paramgen_curve:P-256",
|
||||
"-noenc", "-days", str(days),
|
||||
"-keyout", str(key_path),
|
||||
"-out", str(cert_path),
|
||||
"-subj", f"/CN={domain}",
|
||||
# Mark as end-entity cert so it cannot be used as a CA to sign others.
|
||||
"-addext",
|
||||
"basicConstraints=critical,CA:FALSE",
|
||||
"-addext",
|
||||
"extendedKeyUsage=serverAuth,clientAuth",
|
||||
"-addext", "basicConstraints=critical,CA:FALSE",
|
||||
"-addext", "extendedKeyUsage=serverAuth,clientAuth",
|
||||
"-addext",
|
||||
f"subjectAltName=DNS:{domain},DNS:www.{domain},DNS:mta-sts.{domain}",
|
||||
]
|
||||
@@ -54,9 +42,7 @@ class SelfSignedTlsDeployer(Deployer):
|
||||
|
||||
def configure(self):
|
||||
args = openssl_selfsigned_args(
|
||||
self.mail_domain,
|
||||
self.cert_path,
|
||||
self.key_path,
|
||||
self.mail_domain, self.cert_path, self.key_path,
|
||||
)
|
||||
cmd = shlex.join(args)
|
||||
server.shell(
|
||||
|
||||
@@ -49,13 +49,8 @@ class SSHExec:
|
||||
RemoteError = execnet.RemoteError
|
||||
FuncError = FuncError
|
||||
|
||||
def __init__(
|
||||
self, host, verbose=False, python="python3", timeout=60, ssh_config=None
|
||||
):
|
||||
spec = f"ssh=root@{host}//python={python}"
|
||||
if ssh_config:
|
||||
spec += f"//ssh_config={ssh_config}"
|
||||
self.gateway = execnet.makegateway(spec)
|
||||
def __init__(self, host, verbose=False, python="python3", timeout=60):
|
||||
self.gateway = execnet.makegateway(f"ssh=root@{host}//python={python}")
|
||||
self._remote_cmdloop_channel = bootstrap_remote(self.gateway, remote)
|
||||
self.timeout = timeout
|
||||
self.verbose = verbose
|
||||
@@ -92,9 +87,8 @@ class SSHExec:
|
||||
class LocalExec:
|
||||
FuncError = FuncError
|
||||
|
||||
def __init__(self, verbose=False, docker=False):
|
||||
def __init__(self, verbose=False):
|
||||
self.verbose = verbose
|
||||
self.docker = docker
|
||||
|
||||
def __call__(self, call, kwargs=None, log_callback=None):
|
||||
if kwargs is None:
|
||||
@@ -106,10 +100,6 @@ class LocalExec:
|
||||
if not title:
|
||||
title = call.__name__
|
||||
where = "locally"
|
||||
if self.docker:
|
||||
if call == remote.rdns.perform_initial_checks:
|
||||
kwargs["pre_command"] = "docker exec chatmail "
|
||||
where = "in docker"
|
||||
if self.verbose:
|
||||
print_stderr(f"Running {where}: {title}(**{kwargs})")
|
||||
return self(call, kwargs, log_callback=print_stderr)
|
||||
@@ -118,46 +108,3 @@ class LocalExec:
|
||||
res = self(call, kwargs, log_callback=remote.rshell.log_progress)
|
||||
print_stderr()
|
||||
return res
|
||||
|
||||
|
||||
# pyinfra exposes a ``ssh_config_file`` data key that *should* let
|
||||
# paramiko parse an SSH config file directly. In practice it silently
|
||||
# fails to connect (zero hosts / zero operations), so we resolve the
|
||||
# hostname and identity-file ourselves and pass them via
|
||||
# ``--data ssh_hostname`` / ``--data ssh_key`` instead.
|
||||
# Execnet uses ssh natively (and not paramiko) and doesn't have this problem.
|
||||
|
||||
|
||||
def _get_from_ssh_config(host, ssh_config_path, key):
|
||||
"""Internal helper to parse a value for a specific key from ssh-config."""
|
||||
current_hosts = []
|
||||
found_value = None
|
||||
with open(ssh_config_path) as f:
|
||||
for raw_line in f:
|
||||
line = raw_line.strip()
|
||||
if not line or line.startswith("#"):
|
||||
continue
|
||||
parts = line.split(None, 1)
|
||||
if not parts:
|
||||
continue
|
||||
directive = parts[0].lower()
|
||||
if directive == "host":
|
||||
if host in current_hosts and found_value:
|
||||
return found_value
|
||||
current_hosts = parts[1].split()
|
||||
found_value = None
|
||||
elif directive == key.lower():
|
||||
found_value = parts[1]
|
||||
if host in current_hosts and found_value:
|
||||
return found_value
|
||||
return None
|
||||
|
||||
|
||||
def resolve_host_from_ssh_config(host, ssh_config_path):
|
||||
"""Resolve a host alias to its IP from an ssh-config file."""
|
||||
return _get_from_ssh_config(host, ssh_config_path, "Hostname") or host
|
||||
|
||||
|
||||
def resolve_key_from_ssh_config(host, ssh_config_path):
|
||||
"""Resolve a host alias to its IdentityFile from an ssh-config file."""
|
||||
return _get_from_ssh_config(host, ssh_config_path, "IdentityFile")
|
||||
|
||||
@@ -89,12 +89,11 @@ def test_concurrent_logins_same_account(
|
||||
assert login_results.get()
|
||||
|
||||
|
||||
def test_no_vrfy(cmfactory, chatmail_config):
|
||||
def test_no_vrfy(cmfactory, chatmail_config, maildomain):
|
||||
ac = cmfactory.get_online_account()
|
||||
addr = ac.get_config("addr")
|
||||
domain = chatmail_config.mail_domain
|
||||
|
||||
s = smtplib.SMTP(domain)
|
||||
s = smtplib.SMTP(maildomain)
|
||||
s.starttls()
|
||||
|
||||
s.putcmd("vrfy", f"wrongaddress@{chatmail_config.mail_domain}")
|
||||
|
||||
@@ -20,7 +20,7 @@ def test_fastcgi_working(maildomain, chatmail_config):
|
||||
|
||||
|
||||
@pytest.mark.filterwarnings("ignore::urllib3.exceptions.InsecureRequestWarning")
|
||||
def test_newemail_configure(maildomain, maildomain_ip, rpc, chatmail_config):
|
||||
def test_newemail_configure(maildomain, rpc, chatmail_config):
|
||||
"""Test configuring accounts by scanning a QR code works."""
|
||||
url = f"DCACCOUNT:https://{maildomain}/new"
|
||||
for i in range(3):
|
||||
@@ -30,15 +30,12 @@ def test_newemail_configure(maildomain, maildomain_ip, rpc, chatmail_config):
|
||||
# set_config_from_qr, so fetch credentials via requests instead
|
||||
res = requests.post(f"https://{maildomain}/new", verify=False)
|
||||
data = res.json()
|
||||
rpc.add_or_update_transport(
|
||||
account_id,
|
||||
{
|
||||
"addr": data["email"],
|
||||
"password": data["password"],
|
||||
"imapServer": maildomain_ip,
|
||||
"smtpServer": maildomain_ip,
|
||||
"certificateChecks": "acceptInvalidCertificates",
|
||||
},
|
||||
)
|
||||
rpc.add_or_update_transport(account_id, {
|
||||
"addr": data["email"],
|
||||
"password": data["password"],
|
||||
"imapServer": maildomain,
|
||||
"smtpServer": maildomain,
|
||||
"certificateChecks": "acceptInvalidCertificates",
|
||||
})
|
||||
else:
|
||||
rpc.add_transport_from_qr(account_id, url)
|
||||
|
||||
@@ -8,13 +8,13 @@ import pytest
|
||||
|
||||
from cmdeploy import remote
|
||||
from cmdeploy.cmdeploy import get_sshexec
|
||||
from chatmaild.config import is_valid_ipv4
|
||||
|
||||
|
||||
class TestSSHExecutor:
|
||||
@pytest.fixture(scope="class")
|
||||
def sshexec(self, sshdomain, pytestconfig):
|
||||
ssh_config = pytestconfig.getoption("ssh_config")
|
||||
return get_sshexec(sshdomain, ssh_config=ssh_config)
|
||||
def sshexec(self, sshdomain):
|
||||
return get_sshexec(sshdomain)
|
||||
|
||||
def test_ls(self, sshexec):
|
||||
out = sshexec(call=remote.rdns.shell, kwargs=dict(command="ls"))
|
||||
@@ -22,6 +22,8 @@ class TestSSHExecutor:
|
||||
assert out == out2
|
||||
|
||||
def test_perform_initial(self, sshexec, maildomain):
|
||||
if is_valid_ipv4(maildomain):
|
||||
pytest.skip(f"{maildomain} is not a domain")
|
||||
res = sshexec(
|
||||
remote.rdns.perform_initial_checks, kwargs=dict(mail_domain=maildomain)
|
||||
)
|
||||
@@ -72,6 +74,44 @@ class TestSSHExecutor:
|
||||
assert (now - since_date).total_seconds() < 60 * 60 * 51
|
||||
|
||||
|
||||
def test_dovecot_main_process_matches_installed_binary(sshdomain):
|
||||
sshexec = get_sshexec(sshdomain)
|
||||
main_pid = int(
|
||||
sshexec(
|
||||
call=remote.rshell.shell,
|
||||
kwargs=dict(
|
||||
command="timeout 10 systemctl show -p MainPID --value dovecot.service"
|
||||
),
|
||||
).strip()
|
||||
)
|
||||
assert main_pid != 0, "dovecot.service MainPID is 0 -- service not running?"
|
||||
|
||||
exe = sshexec(
|
||||
call=remote.rshell.shell,
|
||||
kwargs=dict(command=f"timeout 10 readlink /proc/{main_pid}/exe"),
|
||||
).strip()
|
||||
status_text = sshexec(
|
||||
call=remote.rshell.shell,
|
||||
kwargs=dict(
|
||||
command="timeout 10 systemctl show -p StatusText --value dovecot.service"
|
||||
),
|
||||
).strip()
|
||||
installed_version = sshexec(
|
||||
call=remote.rshell.shell, kwargs=dict(command="timeout 10 dovecot --version")
|
||||
).strip()
|
||||
|
||||
assert not exe.endswith("(deleted)"), (
|
||||
f"running dovecot binary was deleted (stale after upgrade): {exe}"
|
||||
)
|
||||
expected_status_text = f"v{installed_version}"
|
||||
assert status_text == expected_status_text or status_text.startswith(
|
||||
f"{expected_status_text} "
|
||||
), (
|
||||
f"dovecot status version mismatch: "
|
||||
f"StatusText={status_text!r}, installed={installed_version!r}"
|
||||
)
|
||||
|
||||
|
||||
def test_timezone_env(remote):
|
||||
for line in remote.iter_output("env"):
|
||||
print(line)
|
||||
@@ -133,10 +173,11 @@ def test_authenticated_from(cmsetup, maildata):
|
||||
@pytest.mark.parametrize("from_addr", ["fake@example.org", "fake@testrun.org"])
|
||||
def test_reject_missing_dkim(cmsetup, maildata, from_addr):
|
||||
domain = cmsetup.maildomain
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
sock.settimeout(10)
|
||||
try:
|
||||
sock = socket.create_connection((domain, 25), timeout=10)
|
||||
sock.close()
|
||||
except (socket.timeout, OSError):
|
||||
sock.connect((domain, 25))
|
||||
except socket.timeout:
|
||||
pytest.skip(f"port 25 not reachable for {domain}")
|
||||
|
||||
recipient = cmsetup.gen_users(1)[0]
|
||||
@@ -183,7 +224,6 @@ def test_rewrite_subject(cmsetup, maildata):
|
||||
assert "Subject: Unencrypted subject" not in rcvd_msg
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
def test_exceed_rate_limit(cmsetup, gencreds, maildata, chatmail_config):
|
||||
"""Test that the per-account send-mail limit is exceeded."""
|
||||
user1, user2 = cmsetup.gen_users(2)
|
||||
@@ -206,7 +246,6 @@ def test_exceed_rate_limit(cmsetup, gencreds, maildata, chatmail_config):
|
||||
pytest.fail("Rate limit was not exceeded")
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
def test_expunged(remote, chatmail_config):
|
||||
outdated_days = int(chatmail_config.delete_mails_after) + 1
|
||||
find_cmds = [
|
||||
|
||||
@@ -15,7 +15,7 @@ def imap_mailbox(cmfactory, ssl_context):
|
||||
(ac1,) = cmfactory.get_online_accounts(1)
|
||||
user = ac1.get_config("addr")
|
||||
password = ac1.get_config("mail_pw")
|
||||
host = user.split("@")[1]
|
||||
host = user.split("@")[1].strip("[").strip("]")
|
||||
mailbox = imap_tools.MailBox(host, ssl_context=ssl_context)
|
||||
mailbox.login(user, password)
|
||||
mailbox.dc_ac = ac1
|
||||
@@ -67,7 +67,7 @@ class TestEndToEndDeltaChat:
|
||||
assert msg2.get_snapshot().text == "message0"
|
||||
|
||||
def test_exceed_quota(
|
||||
self, cmfactory, lp, tmpdir, remote, chatmail_config, sshdomain, pytestconfig
|
||||
self, cmfactory, lp, tmpdir, remote, chatmail_config, sshdomain
|
||||
):
|
||||
"""This is a very slow test as it needs to upload >100MB of mail data
|
||||
before quota is exceeded, and thus depends on the speed of the upload.
|
||||
@@ -92,9 +92,7 @@ class TestEndToEndDeltaChat:
|
||||
lp.sec(f"filling remote inbox for {user}")
|
||||
fn = f"7743102289.M843172P2484002.c20,S={quota},W=2398:2,"
|
||||
path = chatmail_config.mailboxes_dir.joinpath(user, "cur", fn)
|
||||
sshexec = get_sshexec(
|
||||
sshdomain, ssh_config=pytestconfig.getoption("ssh_config")
|
||||
)
|
||||
sshexec = get_sshexec(sshdomain)
|
||||
sshexec(call=rshell.write_numbytes, kwargs=dict(path=str(path), num=120))
|
||||
res = sshexec(call=rshell.dovecot_recalc_quota, kwargs=dict(user=user))
|
||||
assert res["percent"] >= 100
|
||||
@@ -180,7 +178,7 @@ def test_hide_senders_ip_address(cmfactory, ssl_context):
|
||||
chat.send_text("testing submission header cleanup")
|
||||
user2.wait_for_incoming_msg()
|
||||
addr = user2.get_config("addr")
|
||||
host = addr.split("@")[1]
|
||||
host = addr.split("@")[1].strip("[").strip("]")
|
||||
pw = user2.get_config("mail_pw")
|
||||
mailbox = imap_tools.MailBox(host, ssl_context=ssl_context)
|
||||
mailbox.login(addr, pw)
|
||||
|
||||
@@ -3,15 +3,12 @@ import os
|
||||
from cmdeploy.cmdeploy import main
|
||||
|
||||
|
||||
def test_status_cmd(chatmail_config, capsys, request, pytestconfig):
|
||||
def test_status_cmd(chatmail_config, capsys, request):
|
||||
os.chdir(request.config.invocation_params.dir)
|
||||
command = ["status"]
|
||||
ssh_host = pytestconfig.getoption("ssh_host")
|
||||
if ssh_host:
|
||||
command.extend(["--ssh-host", ssh_host])
|
||||
ssh_config = pytestconfig.getoption("ssh_config")
|
||||
if ssh_config:
|
||||
command.extend(["--ssh-config", ssh_config])
|
||||
if os.getenv("CHATMAIL_SSH"):
|
||||
command.append("--ssh-host")
|
||||
command.append(os.getenv("CHATMAIL_SSH"))
|
||||
assert main(command) == 0
|
||||
status_out = capsys.readouterr()
|
||||
print(status_out.out)
|
||||
|
||||
@@ -1,95 +1,27 @@
|
||||
import imaplib
|
||||
import ipaddress
|
||||
import itertools
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import smtplib
|
||||
import socket
|
||||
import ssl
|
||||
import subprocess
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
from chatmaild.config import read_config
|
||||
from chatmaild.config import read_config, format_mail_domain, is_valid_ipv4
|
||||
|
||||
|
||||
conftestdir = Path(__file__).parent
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption(
|
||||
"--slow", action="store_true", default=False, help="also run slow tests"
|
||||
)
|
||||
parser.addoption(
|
||||
"--ssh-host",
|
||||
dest="ssh_host",
|
||||
default=None,
|
||||
help="SSH host (overrides mail_domain for SSH operations).",
|
||||
)
|
||||
parser.addoption(
|
||||
"--ssh-config",
|
||||
dest="ssh_config",
|
||||
default=None,
|
||||
help="Path to an SSH config file (e.g. lxconfigs/ssh-config).",
|
||||
)
|
||||
|
||||
|
||||
def _parse_ssh_config_hosts(path):
|
||||
"""Parse an OpenSSH config file and return a dict of hostname -> IP."""
|
||||
mapping = {}
|
||||
current_names = []
|
||||
for ln in Path(path).read_text().splitlines():
|
||||
line = ln.strip()
|
||||
m = re.match(r"^Host\s+(.+)", line)
|
||||
if m:
|
||||
current_names = m.group(1).split()
|
||||
continue
|
||||
m = re.match(r"^Hostname\s+(\S+)", line)
|
||||
if m and current_names:
|
||||
ip = m.group(1)
|
||||
for name in current_names:
|
||||
mapping[name] = ip
|
||||
current_names = []
|
||||
return mapping
|
||||
|
||||
|
||||
_original_getaddrinfo = socket.getaddrinfo
|
||||
|
||||
|
||||
def _make_patched_getaddrinfo(host_map):
|
||||
"""Return a getaddrinfo that resolves hosts in host_map to their IPs."""
|
||||
|
||||
def patched_getaddrinfo(host, port, family=0, type=0, proto=0, flags=0):
|
||||
if host in host_map:
|
||||
ip = host_map[host]
|
||||
return _original_getaddrinfo(ip, port, family, type, proto, flags)
|
||||
return _original_getaddrinfo(host, port, family, type, proto, flags)
|
||||
|
||||
return patched_getaddrinfo
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope="session")
|
||||
def _setup_localchat_dns(pytestconfig):
|
||||
"""Monkey-patch socket.getaddrinfo to resolve .localchat via ssh-config."""
|
||||
ssh_config = pytestconfig.getoption("ssh_config")
|
||||
if not ssh_config or not Path(ssh_config).exists():
|
||||
yield {}
|
||||
return
|
||||
host_map = _parse_ssh_config_hosts(ssh_config)
|
||||
if not host_map:
|
||||
yield {}
|
||||
return
|
||||
socket.getaddrinfo = _make_patched_getaddrinfo(host_map)
|
||||
def _is_ip(domain):
|
||||
try:
|
||||
yield host_map
|
||||
finally:
|
||||
socket.getaddrinfo = _original_getaddrinfo
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def ssh_config_host_map(_setup_localchat_dns):
|
||||
"""Return the host-name → IP map parsed from ssh-config."""
|
||||
return _setup_localchat_dns
|
||||
ipaddress.ip_address(domain)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
@@ -99,19 +31,12 @@ def pytest_configure(config):
|
||||
)
|
||||
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
markers = list(item.iter_markers(name="slow"))
|
||||
if markers:
|
||||
if not item.config.getoption("--slow"):
|
||||
pytest.skip("skipping slow test, use --slow to run")
|
||||
|
||||
|
||||
def _get_chatmail_config():
|
||||
ini = os.environ.get("CHATMAIL_INI")
|
||||
if ini:
|
||||
path = Path(ini).resolve()
|
||||
if path.exists():
|
||||
return read_config(path), path
|
||||
inipath = os.environ.get("CHATMAIL_INI")
|
||||
if inipath:
|
||||
path = Path(inipath).resolve()
|
||||
return read_config(path), path
|
||||
|
||||
current = Path().resolve()
|
||||
while 1:
|
||||
path = current.joinpath("chatmail.ini").resolve()
|
||||
@@ -134,18 +59,17 @@ def chatmail_config(pytestconfig):
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def maildomain(chatmail_config):
|
||||
return chatmail_config.mail_domain
|
||||
return chatmail_config.mail_domain_bare
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def sshdomain(maildomain, pytestconfig):
|
||||
return pytestconfig.getoption("ssh_host") or maildomain
|
||||
def maildomain_deliverable(maildomain):
|
||||
return format_mail_domain(maildomain)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def maildomain_ip(maildomain, ssh_config_host_map):
|
||||
"""Return the IP for maildomain from ssh-config, or maildomain itself."""
|
||||
return ssh_config_host_map.get(maildomain, maildomain)
|
||||
def sshdomain(maildomain):
|
||||
return os.environ.get("CHATMAIL_SSH", maildomain)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -389,37 +313,26 @@ from deltachat_rpc_client import DeltaChat, Rpc
|
||||
class ChatmailACFactory:
|
||||
"""RPC-based account factory for chatmail testing."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
rpc,
|
||||
maildomain,
|
||||
maildomain_ip,
|
||||
gencreds,
|
||||
chatmail_config,
|
||||
ssh_config_host_map,
|
||||
):
|
||||
def __init__(self, rpc, maildomain, gencreds, chatmail_config):
|
||||
self.dc = DeltaChat(rpc)
|
||||
self.rpc = rpc
|
||||
self._maildomain = maildomain
|
||||
self._maildomain_ip = maildomain_ip
|
||||
self.gencreds = gencreds
|
||||
self.chatmail_config = chatmail_config
|
||||
self._ssh_config_host_map = ssh_config_host_map
|
||||
|
||||
def _make_transport(self, domain):
|
||||
"""Build a transport config dict for the given domain."""
|
||||
addr, password = self.gencreds(domain)
|
||||
domain_deliverable = format_mail_domain(domain)
|
||||
addr, password = self.gencreds(domain_deliverable)
|
||||
transport = {
|
||||
"addr": addr,
|
||||
"password": password,
|
||||
# Setting server explicitly skips requesting autoconfig XML,
|
||||
# see https://datatracker.ietf.org/doc/draft-ietf-mailmaint-autoconfig/
|
||||
"imapServer": domain,
|
||||
"smtpServer": domain,
|
||||
}
|
||||
# To support running against local relays without host DNS resolution
|
||||
# we attempt resolving the domain via ssh-config
|
||||
# because otherwise core fails to find the address
|
||||
server = self._ssh_config_host_map.get(domain)
|
||||
if server is not None:
|
||||
transport.update({"imapServer": server, "smtpServer": server})
|
||||
if self.chatmail_config.tls_cert_mode == "self":
|
||||
if domain.startswith("_") or is_valid_ipv4(domain):
|
||||
transport["certificateChecks"] = "acceptInvalidCertificates"
|
||||
return transport
|
||||
|
||||
@@ -434,9 +347,23 @@ class ChatmailACFactory:
|
||||
accounts = []
|
||||
for _ in range(num):
|
||||
account = self.dc.add_account()
|
||||
future = account.add_or_update_transport.future(
|
||||
self._make_transport(domain)
|
||||
)
|
||||
domain_deliverable = format_mail_domain(domain)
|
||||
addr, password = self.gencreds(domain_deliverable)
|
||||
if _is_ip(domain):
|
||||
# Use DCLOGIN scheme with explicit server hosts,
|
||||
# matching how madmail presents its addresses to users.
|
||||
qr = (
|
||||
f"dclogin:{addr}"
|
||||
f"?p={password}&v=1"
|
||||
f"&ih={domain}&ip=993"
|
||||
f"&sh={domain}&sp=465"
|
||||
f"&ic=3&ss=default"
|
||||
)
|
||||
future = account.add_transport_from_qr.future(qr)
|
||||
else:
|
||||
future = account.add_or_update_transport.future(
|
||||
self._make_transport(domain)
|
||||
)
|
||||
futures.append(future)
|
||||
|
||||
# ensure messages stay in INBOX so that they can be
|
||||
@@ -471,46 +398,35 @@ def rpc(tmp_path_factory):
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def cmfactory(
|
||||
rpc, gencreds, maildomain, maildomain_ip, chatmail_config, ssh_config_host_map
|
||||
):
|
||||
def cmfactory(rpc, gencreds, maildomain, chatmail_config):
|
||||
"""Return a ChatmailACFactory for creating online Delta Chat accounts."""
|
||||
return ChatmailACFactory(
|
||||
rpc=rpc,
|
||||
maildomain=maildomain,
|
||||
maildomain_ip=maildomain_ip,
|
||||
gencreds=gencreds,
|
||||
chatmail_config=chatmail_config,
|
||||
ssh_config_host_map=ssh_config_host_map,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def remote(sshdomain, pytestconfig):
|
||||
r = Remote(sshdomain, ssh_config=pytestconfig.getoption("ssh_config"))
|
||||
def remote(sshdomain):
|
||||
r = Remote(sshdomain)
|
||||
yield r
|
||||
r.close()
|
||||
|
||||
|
||||
class Remote:
|
||||
def __init__(self, sshdomain, ssh_config=None):
|
||||
def __init__(self, sshdomain):
|
||||
self.sshdomain = sshdomain
|
||||
self.ssh_config = ssh_config
|
||||
self._procs = []
|
||||
|
||||
def iter_output(self, logcmd="", ready=None):
|
||||
getjournal = "journalctl -f" if not logcmd else logcmd
|
||||
print(self.sshdomain)
|
||||
match self.sshdomain:
|
||||
case "@local":
|
||||
command = []
|
||||
case "localhost":
|
||||
command = []
|
||||
case _:
|
||||
command = ["ssh"]
|
||||
if self.ssh_config:
|
||||
command.extend(["-F", self.ssh_config])
|
||||
command.append(f"root@{self.sshdomain}")
|
||||
case "@local": command = []
|
||||
case "localhost": command = []
|
||||
case _: command = ["ssh", f"root@{self.sshdomain}"]
|
||||
[command.append(arg) for arg in getjournal.split()]
|
||||
popen = subprocess.Popen(
|
||||
command,
|
||||
@@ -519,15 +435,19 @@ class Remote:
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
self._procs.append(popen)
|
||||
while 1:
|
||||
line = popen.stdout.readline()
|
||||
res = line.decode().strip().lower()
|
||||
if not res:
|
||||
break
|
||||
if ready is not None:
|
||||
ready()
|
||||
ready = None
|
||||
yield res
|
||||
try:
|
||||
while 1:
|
||||
line = popen.stdout.readline()
|
||||
res = line.decode().strip().lower()
|
||||
if not res:
|
||||
break
|
||||
if ready is not None:
|
||||
ready()
|
||||
ready = None
|
||||
yield res
|
||||
finally:
|
||||
popen.terminate()
|
||||
popen.wait()
|
||||
|
||||
def close(self):
|
||||
while self._procs:
|
||||
|
||||
@@ -23,21 +23,30 @@ class TestCmdline:
|
||||
run = parser.parse_args(["run"])
|
||||
assert init and run
|
||||
|
||||
def test_init_not_overwrite(self, tmp_path, capsys, monkeypatch):
|
||||
def test_init_not_overwrite(self, capsys, tmp_path, monkeypatch):
|
||||
monkeypatch.delenv("CHATMAIL_INI", raising=False)
|
||||
monkeypatch.chdir(tmp_path)
|
||||
|
||||
assert main(["init", "chat.example.org"]) == 0
|
||||
inipath = tmp_path / "chatmail.ini"
|
||||
args = ["init", "--config", str(inipath), "chat.example.org"]
|
||||
assert main(args) == 0
|
||||
capsys.readouterr()
|
||||
|
||||
assert main(["init", "chat.example.org"]) == 1
|
||||
assert main(args) == 1
|
||||
out, err = capsys.readouterr()
|
||||
assert "path exists" in out.lower()
|
||||
|
||||
assert main(["init", "chat.example.org", "--force"]) == 0
|
||||
args.insert(1, "--force")
|
||||
assert main(args) == 0
|
||||
out, err = capsys.readouterr()
|
||||
assert "deleting config file" in out.lower()
|
||||
|
||||
def test_dns_skip_on_ip(self, capsys, tmp_path, monkeypatch):
|
||||
monkeypatch.delenv("CHATMAIL_INI", raising=False)
|
||||
inipath = tmp_path / "chatmail.ini"
|
||||
assert main(["init", "--config", str(inipath), "1.3.3.7"]) == 0
|
||||
assert main(["dns", "--config", str(inipath)]) == 0
|
||||
out, err = capsys.readouterr()
|
||||
assert out == "[WARNING] 1.3.3.7 is not a domain, skipping DNS checks.\n"
|
||||
|
||||
|
||||
def test_www_folder(example_config, tmp_path):
|
||||
reporoot = importlib.resources.files(__package__).joinpath("../../../../").resolve()
|
||||
|
||||
@@ -165,7 +165,6 @@ def test_parse_zone_records_invalid_line():
|
||||
|
||||
def parse_zonefile_into_dict(zonefile, mockdns_base, only_required=False):
|
||||
if only_required:
|
||||
# Only take records before the "; Recommended" section
|
||||
zonefile = zonefile.split("; Recommended")[0]
|
||||
for name, ttl, rtype, rdata in parse_zone_records(zonefile):
|
||||
mockdns_base.setdefault(rtype, {})[name] = rdata
|
||||
|
||||
238
cmdeploy/src/cmdeploy/tests/test_dovecot_deployer.py
Normal file
238
cmdeploy/src/cmdeploy/tests/test_dovecot_deployer.py
Normal file
@@ -0,0 +1,238 @@
|
||||
from contextlib import nullcontext
|
||||
from types import SimpleNamespace
|
||||
|
||||
import pytest
|
||||
from pyinfra.facts.deb import DebPackages
|
||||
|
||||
from cmdeploy.dovecot import deployer as dovecot_deployer
|
||||
|
||||
|
||||
def make_host(*fact_pairs):
|
||||
"""Build a mock host; get_fact(cls) dispatches to the provided facts mapping.
|
||||
|
||||
Args:
|
||||
*fact_pairs: tuples of (fact_class, fact_value) to register
|
||||
|
||||
Returns:
|
||||
SimpleNamespace with get_fact that raises a clear error if an
|
||||
unexpected fact type is requested.
|
||||
"""
|
||||
facts = dict(fact_pairs)
|
||||
|
||||
def get_fact(cls):
|
||||
if cls not in facts:
|
||||
registered = ", ".join(c.__name__ for c in facts)
|
||||
raise LookupError(
|
||||
f"unexpected get_fact({cls.__name__}); "
|
||||
f"only registered: {registered}"
|
||||
)
|
||||
return facts[cls]
|
||||
|
||||
return SimpleNamespace(get_fact=get_fact)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def deployer():
|
||||
return dovecot_deployer.DovecotDeployer(
|
||||
SimpleNamespace(mail_domain="chat.example.org"),
|
||||
disable_mail=False,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def patch_blocked(monkeypatch):
|
||||
monkeypatch.setattr(dovecot_deployer, "blocked_service_startup", nullcontext)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_files_put(monkeypatch):
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer.files,
|
||||
"put",
|
||||
lambda **kwargs: SimpleNamespace(changed=False),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def track_shell(monkeypatch):
|
||||
calls = []
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer.server,
|
||||
"shell",
|
||||
lambda **kwargs: calls.append(kwargs) or SimpleNamespace(changed=False),
|
||||
)
|
||||
return calls
|
||||
|
||||
|
||||
def test_download_dovecot_package_skips_epoch_matched_install(monkeypatch):
|
||||
epoch_version = dovecot_deployer.DOVECOT_PACKAGE_VERSION
|
||||
downloads = []
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer,
|
||||
"host",
|
||||
make_host((DebPackages, {"dovecot-core": [epoch_version]})),
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer,
|
||||
"_pick_url",
|
||||
lambda primary, fallback: primary,
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer.files,
|
||||
"download",
|
||||
lambda **kwargs: downloads.append(kwargs),
|
||||
)
|
||||
|
||||
deb, changed = dovecot_deployer._download_dovecot_package("core", "amd64")
|
||||
|
||||
assert deb is None, f"expected no deb path when version matches, got {deb!r}"
|
||||
assert changed is False, "should not flag changed when version already installed"
|
||||
assert downloads == [], "should not download when version already installed"
|
||||
|
||||
|
||||
def test_download_dovecot_package_uses_archive_version_for_url_and_filename(
|
||||
monkeypatch,
|
||||
):
|
||||
downloads = []
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer,
|
||||
"host",
|
||||
make_host((DebPackages, {})),
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer,
|
||||
"_pick_url",
|
||||
lambda primary, fallback: primary,
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer.files,
|
||||
"download",
|
||||
lambda **kwargs: downloads.append(kwargs),
|
||||
)
|
||||
|
||||
deb, changed = dovecot_deployer._download_dovecot_package("core", "amd64")
|
||||
|
||||
archive_version = dovecot_deployer.DOVECOT_ARCHIVE_VERSION.replace("+", "%2B")
|
||||
expected_deb = f"/root/dovecot-core_{archive_version}_amd64.deb"
|
||||
|
||||
# Verify the returned path uses archive version, not package version (with epoch)
|
||||
assert changed is True, "should flag changed when package not yet installed"
|
||||
assert deb == expected_deb, f"deb path mismatch: {deb!r} != {expected_deb!r}"
|
||||
assert dovecot_deployer.DOVECOT_PACKAGE_VERSION not in deb, (
|
||||
f"deb path should use archive version (no epoch), got {deb!r}"
|
||||
)
|
||||
assert len(downloads) == 1, "files.download should be called exactly once"
|
||||
|
||||
|
||||
def test_install_skips_dpkg_path_when_epoch_matched_packages_present(
|
||||
deployer, patch_blocked, mock_files_put, track_shell, monkeypatch
|
||||
):
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer,
|
||||
"host",
|
||||
make_host(
|
||||
(
|
||||
dovecot_deployer.DebPackages,
|
||||
{
|
||||
"dovecot-core": [dovecot_deployer.DOVECOT_PACKAGE_VERSION],
|
||||
"dovecot-imapd": [dovecot_deployer.DOVECOT_PACKAGE_VERSION],
|
||||
"dovecot-lmtpd": [dovecot_deployer.DOVECOT_PACKAGE_VERSION],
|
||||
},
|
||||
),
|
||||
(dovecot_deployer.Arch, "x86_64"),
|
||||
),
|
||||
)
|
||||
downloads = []
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer.files,
|
||||
"download",
|
||||
lambda **kwargs: downloads.append(kwargs),
|
||||
)
|
||||
|
||||
deployer.install()
|
||||
|
||||
assert downloads == [], "should not download when all packages epoch-matched"
|
||||
assert track_shell == [], "should not run dpkg when all packages epoch-matched"
|
||||
assert deployer.need_restart is False, (
|
||||
"need_restart should be False when nothing changed"
|
||||
)
|
||||
|
||||
|
||||
def test_install_unsupported_arch_falls_back_to_apt(
|
||||
deployer, patch_blocked, mock_files_put, track_shell, monkeypatch
|
||||
):
|
||||
# For unsupported architectures, all fact lookups return the arch string.
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer,
|
||||
"host",
|
||||
SimpleNamespace(get_fact=lambda cls: "riscv64"),
|
||||
)
|
||||
apt_calls = []
|
||||
|
||||
# Mirrors apt.packages() return value: OperationMeta with .changed property.
|
||||
# Only lmtpd triggers a change to verify |= accumulation of changed flags.
|
||||
def fake_apt(**kwargs):
|
||||
apt_calls.append(kwargs)
|
||||
changed = "lmtpd" in kwargs["packages"][0]
|
||||
return SimpleNamespace(changed=changed)
|
||||
|
||||
monkeypatch.setattr(dovecot_deployer.apt, "packages", fake_apt)
|
||||
|
||||
deployer.install()
|
||||
|
||||
actual_pkgs = [c["packages"] for c in apt_calls]
|
||||
assert actual_pkgs == [["dovecot-core"], ["dovecot-imapd"], ["dovecot-lmtpd"]], (
|
||||
f"expected apt install of core/imapd/lmtpd, got {actual_pkgs}"
|
||||
)
|
||||
assert track_shell == [], "should not run dpkg for unsupported arch"
|
||||
assert deployer.need_restart is True, (
|
||||
"need_restart should be True when apt installed a package"
|
||||
)
|
||||
|
||||
|
||||
def test_install_runs_dpkg_when_packages_need_download(
|
||||
deployer, patch_blocked, mock_files_put, track_shell, monkeypatch
|
||||
):
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer,
|
||||
"host",
|
||||
make_host(
|
||||
(dovecot_deployer.DebPackages, {}),
|
||||
(dovecot_deployer.Arch, "x86_64"),
|
||||
),
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer,
|
||||
"_pick_url",
|
||||
lambda primary, fallback: primary,
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
dovecot_deployer.files,
|
||||
"download",
|
||||
lambda **kwargs: SimpleNamespace(changed=True),
|
||||
)
|
||||
|
||||
deployer.install()
|
||||
|
||||
assert len(track_shell) == 1, (
|
||||
f"expected one server.shell() call for dpkg install, got {len(track_shell)}"
|
||||
)
|
||||
cmds = track_shell[0]["commands"]
|
||||
assert len(cmds) == 3, f"expected 3 dpkg/apt commands, got: {cmds}"
|
||||
assert cmds[0].startswith("dpkg --force-confdef --force-confold -i ")
|
||||
assert "apt-get -y --fix-broken install" in cmds[1]
|
||||
assert cmds[2].startswith("dpkg --force-confdef --force-confold -i ")
|
||||
assert deployer.need_restart is True, (
|
||||
"need_restart should be True after dpkg install"
|
||||
)
|
||||
|
||||
|
||||
def test_pick_url_falls_back_on_primary_error(monkeypatch):
|
||||
def raise_error(req, timeout):
|
||||
raise OSError("connection timeout")
|
||||
|
||||
monkeypatch.setattr(dovecot_deployer.urllib.request, "urlopen", raise_error)
|
||||
result = dovecot_deployer._pick_url("http://primary", "http://fallback")
|
||||
assert result == "http://fallback", (
|
||||
f"should fall back when primary fails, got {result!r}"
|
||||
)
|
||||
@@ -1,11 +1,10 @@
|
||||
import importlib.resources
|
||||
from pathlib import Path
|
||||
|
||||
from cmdeploy.www import build_webpages
|
||||
|
||||
|
||||
def test_build_webpages(tmp_path, make_config):
|
||||
pkgroot = importlib.resources.files("cmdeploy")
|
||||
src_dir = pkgroot.joinpath("../../../www/src").resolve()
|
||||
src_dir = (Path(__file__).resolve() / "../../../../../www/src").resolve()
|
||||
assert src_dir.exists(), src_dir
|
||||
config = make_config("chat.example.org")
|
||||
build_dir = tmp_path.joinpath("build")
|
||||
|
||||
@@ -1,174 +0,0 @@
|
||||
"""Tests for cmdeploy lxc-* subcommands."""
|
||||
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
|
||||
from cmdeploy.lxc import cli
|
||||
from cmdeploy.lxc.incus import Incus
|
||||
from cmdeploy.util import Out
|
||||
|
||||
pytestmark = pytest.mark.skipif(
|
||||
not shutil.which("incus"),
|
||||
reason="incus not installed",
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Fixtures
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def ix():
|
||||
out = Out()
|
||||
return Incus(out)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def lxc_setup():
|
||||
out = Out()
|
||||
ix = Incus(out)
|
||||
ix.get_dns_container().ensure()
|
||||
return ix.list_managed()
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def relay_container(lxc_setup):
|
||||
test_names = {f"{n}-localchat" for n in cli.RELAY_NAMES}
|
||||
relays = [c for c in lxc_setup if c["name"] in test_names and c.get("ip")]
|
||||
if not relays:
|
||||
pytest.skip("no test relay containers running")
|
||||
return relays[0]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def cmdeploy():
|
||||
def run(*args):
|
||||
return subprocess.run(
|
||||
[sys.executable, "-m", "cmdeploy.cmdeploy", *args],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=False,
|
||||
)
|
||||
|
||||
return run
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"subcmd, expected, absent",
|
||||
[
|
||||
(None, ["lxc-start", "lxc-stop", "lxc-test", "lxc-status"], ["lxc-destroy"]),
|
||||
("lxc-start", ["--ipv4-only", "--run"], ["--config"]),
|
||||
("lxc-stop", ["--destroy", "--destroy-all"], ["--config"]),
|
||||
("lxc-test", ["--one"], ["--config"]),
|
||||
("lxc-status", [], ["--config"]),
|
||||
("run", ["--ssh-config"], ["--lxc"]),
|
||||
("dns", ["--ssh-config"], []),
|
||||
("test", ["--ssh-config"], []),
|
||||
("status", ["--ssh-config"], []),
|
||||
],
|
||||
)
|
||||
def test_help_options(cmdeploy, subcmd, expected, absent):
|
||||
args = [subcmd, "--help"] if subcmd else ["--help"]
|
||||
result = cmdeploy(*args)
|
||||
output = result.stdout + result.stderr
|
||||
assert result.returncode == 0
|
||||
for flag in expected:
|
||||
assert flag in output
|
||||
for flag in absent:
|
||||
assert flag not in output
|
||||
|
||||
|
||||
class TestSSHConfig:
|
||||
def test_lxconfigs(self, ix, lxc_setup):
|
||||
d = ix.lxconfigs_dir
|
||||
assert d.name == "lxconfigs"
|
||||
assert d.exists()
|
||||
path = ix.ssh_config_path
|
||||
assert path.name == "ssh-config"
|
||||
assert path.parent.name == "lxconfigs"
|
||||
|
||||
def test_write_ssh_config(self, ix, lxc_setup):
|
||||
path = ix.write_ssh_config()
|
||||
assert path.exists()
|
||||
text = path.read_text()
|
||||
|
||||
for c in lxc_setup:
|
||||
if c.get("ip"):
|
||||
assert c["name"] in text
|
||||
assert f"Hostname {c['ip']}" in text
|
||||
|
||||
assert "User root" in text
|
||||
assert "IdentityFile" in text
|
||||
assert "StrictHostKeyChecking accept-new" in text
|
||||
|
||||
|
||||
def test_dns(ix, relay_container):
|
||||
def dig(qname, qtype):
|
||||
ct = ix.get_dns_container()
|
||||
return ct.bash(f"dig @127.0.0.1 {qname} {qtype} +short").strip()
|
||||
|
||||
domain = relay_container["domain"]
|
||||
assert dig(domain, "A") == relay_container["ip"]
|
||||
assert domain in dig(domain, "MX")
|
||||
assert "587" in dig(f"_submission._tcp.{domain}", "SRV")
|
||||
|
||||
|
||||
class TestLxcStatus:
|
||||
def test_cli_lxc_status_help(self, cmdeploy):
|
||||
result = cmdeploy("lxc-status", "--help")
|
||||
assert result.returncode == 0
|
||||
assert "status" in result.stdout.lower()
|
||||
|
||||
def test_shows_containers(self, lxc_setup, capsys):
|
||||
class QuietOut(Out):
|
||||
def red(self, msg, **kw):
|
||||
pass
|
||||
|
||||
def green(self, msg, **kw):
|
||||
pass
|
||||
|
||||
ret = cli.lxc_status_cmd(None, QuietOut())
|
||||
assert ret == 0
|
||||
captured = capsys.readouterr().out
|
||||
assert "ns-localchat" in captured
|
||||
assert "running" in captured
|
||||
|
||||
def test_deploy_freshness(self, ix, monkeypatch):
|
||||
ct = ix.get_container("x")
|
||||
|
||||
monkeypatch.setattr(
|
||||
"cmdeploy.lxc.incus.RelayContainer.deployed_version",
|
||||
lambda _self: "abc123def456",
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"cmdeploy.lxc.incus.RelayContainer.deployed_domain",
|
||||
lambda _self: ct.domain,
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"cmdeploy.lxc.cli.get_version_string",
|
||||
lambda: "abc123def456",
|
||||
)
|
||||
assert "IN-SYNC" in cli._deploy_status(ct, "abc123def456", ix)
|
||||
assert "STALE" in cli._deploy_status(ct, "other_hash_here", ix)
|
||||
|
||||
# Hash matches but local has uncommitted changes
|
||||
monkeypatch.setattr(
|
||||
"cmdeploy.lxc.cli.get_version_string",
|
||||
lambda: "abc123def456\ndiff --git a/foo",
|
||||
)
|
||||
assert "DIRTY" in cli._deploy_status(ct, "abc123def456", ix)
|
||||
|
||||
monkeypatch.setattr(
|
||||
"cmdeploy.lxc.incus.RelayContainer.deployed_version",
|
||||
lambda _self: None,
|
||||
)
|
||||
assert "NOT DEPLOYED" in cli._deploy_status(ct, "abc123", ix)
|
||||
@@ -1,120 +0,0 @@
|
||||
import sys
|
||||
|
||||
from cmdeploy.util import Out, collapse, get_git_hash, get_version_string, shell
|
||||
|
||||
|
||||
class TestOut:
|
||||
def test_prefix_default(self, capsys):
|
||||
out = Out()
|
||||
out.print("hello")
|
||||
assert capsys.readouterr().out == "hello\n"
|
||||
|
||||
def test_prefix_custom(self, capsys):
|
||||
out = Out(prefix=">> ")
|
||||
out.print("hello")
|
||||
assert capsys.readouterr().out == ">> hello\n"
|
||||
|
||||
def test_prefix_print_file(self):
|
||||
import io
|
||||
|
||||
buf = io.StringIO()
|
||||
out = Out(prefix=":: ")
|
||||
out.print("msg", file=buf)
|
||||
assert ":: msg" in buf.getvalue()
|
||||
|
||||
def test_new_prefixed_out(self, capsys):
|
||||
parent = Out(prefix="A")
|
||||
child = parent.new_prefixed_out("B")
|
||||
child.print("x")
|
||||
assert capsys.readouterr().out == "ABx\n"
|
||||
# shares section_timings
|
||||
assert child.section_timings is parent.section_timings
|
||||
|
||||
def test_section_no_auto_indent(self, capsys):
|
||||
out = Out(prefix="")
|
||||
with out.section("test"):
|
||||
out.print("inside")
|
||||
captured = capsys.readouterr().out
|
||||
# "inside" should NOT be indented by section()
|
||||
lines = captured.strip().splitlines()
|
||||
inside_line = [l for l in lines if "inside" in l][0]
|
||||
assert inside_line == "inside"
|
||||
|
||||
def test_section_records_timing(self):
|
||||
out = Out()
|
||||
with out.section("s1"):
|
||||
pass
|
||||
assert len(out.section_timings) == 1
|
||||
assert out.section_timings[0][0] == "s1"
|
||||
|
||||
def test_shell_failure_shows_output(self):
|
||||
"""When a shell command fails, its output and exit code are shown."""
|
||||
import subprocess
|
||||
|
||||
result = subprocess.run(
|
||||
[
|
||||
sys.executable,
|
||||
"-c",
|
||||
"from cmdeploy.util import Out; Out(prefix='').shell("
|
||||
"\"echo 'boom on stderr' >&2; exit 42\")",
|
||||
],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=False,
|
||||
)
|
||||
# the command's stderr is merged into stdout by Popen
|
||||
assert "boom on stderr" in result.stdout
|
||||
# Out.red() prints the failure notice to stderr
|
||||
assert "exit code 42" in result.stderr
|
||||
|
||||
|
||||
def test_collapse():
|
||||
text = """
|
||||
line 1
|
||||
line 2
|
||||
"""
|
||||
assert collapse(text) == "line 1 line 2"
|
||||
assert collapse(" single line ") == "single line"
|
||||
|
||||
|
||||
def test_git_helpers_no_git(tmp_path):
|
||||
# Not a git repo
|
||||
assert get_git_hash(root=tmp_path) is None
|
||||
assert get_version_string(root=tmp_path) == "unknown"
|
||||
|
||||
|
||||
def test_git_helpers_empty_repo(tmp_path):
|
||||
shell("git init", cwd=tmp_path, check=True)
|
||||
# No commits yet
|
||||
assert get_git_hash(root=tmp_path) is None
|
||||
assert get_version_string(root=tmp_path) == "unknown"
|
||||
|
||||
|
||||
def test_git_helpers_with_commits_and_diffs(tmp_path):
|
||||
shell("git init", cwd=tmp_path, check=True)
|
||||
shell("git config user.email you@example.com", cwd=tmp_path, check=True)
|
||||
shell("git config user.name 'Your Name'", cwd=tmp_path, check=True)
|
||||
|
||||
# First commit
|
||||
path = tmp_path / "file.txt"
|
||||
path.write_text("content")
|
||||
shell("git add file.txt", cwd=tmp_path, check=True)
|
||||
shell("git commit -m initial", cwd=tmp_path, check=True)
|
||||
|
||||
git_hash = get_git_hash(root=tmp_path)
|
||||
assert len(git_hash) >= 7 # usually 40, but git is git
|
||||
assert get_version_string(root=tmp_path) == git_hash
|
||||
|
||||
# Create a diff
|
||||
path.write_text("new content")
|
||||
v = get_version_string(root=tmp_path)
|
||||
assert v.startswith(git_hash + "\n")
|
||||
assert "new content" in v
|
||||
assert not v.endswith("\n")
|
||||
|
||||
# Commit again -> no diff
|
||||
shell("git add file.txt", cwd=tmp_path, check=True)
|
||||
shell("git commit -m second", cwd=tmp_path, check=True)
|
||||
new_hash = get_git_hash(root=tmp_path)
|
||||
assert new_hash != git_hash
|
||||
assert get_version_string(root=tmp_path) == new_hash
|
||||
@@ -1,169 +0,0 @@
|
||||
"""Shared utility functions for cmdeploy."""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import textwrap
|
||||
import time
|
||||
from contextlib import contextmanager
|
||||
from pathlib import Path
|
||||
|
||||
from termcolor import colored
|
||||
|
||||
|
||||
class Out:
|
||||
"""Convenience output printer providing coloring and section formatting."""
|
||||
|
||||
def __init__(self, prefix="", verbosity=0):
|
||||
self.section_timings = []
|
||||
self.prefix = prefix
|
||||
self.sepchar = "\u2501"
|
||||
self.verbosity = verbosity
|
||||
env_width = os.environ.get("_CMDEPLOY_WIDTH")
|
||||
if env_width:
|
||||
self.section_width = int(env_width)
|
||||
else:
|
||||
self.section_width = shutil.get_terminal_size((80, 24)).columns
|
||||
|
||||
def new_prefixed_out(self, newprefix=" "):
|
||||
"""Return a new Out with an extended prefix,
|
||||
sharing section_timings with the parent.
|
||||
"""
|
||||
out = Out(
|
||||
prefix=self.prefix + newprefix,
|
||||
verbosity=self.verbosity,
|
||||
)
|
||||
out.section_timings = self.section_timings
|
||||
return out
|
||||
|
||||
def red(self, msg, file=sys.stderr):
|
||||
print(colored(self.prefix + msg, "red"), file=file, flush=True)
|
||||
|
||||
def green(self, msg, file=sys.stderr):
|
||||
print(colored(self.prefix + msg, "green"), file=file, flush=True)
|
||||
|
||||
def print(self, msg="", **kwargs):
|
||||
"""Print to stdout with automatic flush."""
|
||||
if msg:
|
||||
msg = self.prefix + msg
|
||||
print(msg, flush=True, **kwargs)
|
||||
|
||||
def _format_header(self, title):
|
||||
"""Return a formatted section header string."""
|
||||
width = self.section_width - len(self.prefix)
|
||||
bar = self.sepchar * (width - len(title) - 5)
|
||||
return f"{self.sepchar * 3} {title} {bar}"
|
||||
|
||||
@contextmanager
|
||||
def section(self, title):
|
||||
"""Context manager that prints a section header and records elapsed time."""
|
||||
self.green(self._format_header(title))
|
||||
t0 = time.time()
|
||||
yield
|
||||
elapsed = time.time() - t0
|
||||
self.section_timings.append((title, elapsed))
|
||||
|
||||
def section_line(self, title):
|
||||
"""Print a section header without timing."""
|
||||
self.green(self._format_header(title))
|
||||
|
||||
def shell(self, cmd, quiet=False, **kwargs):
|
||||
"""Print *cmd*, run it, and re-print its output with the current prefix.
|
||||
|
||||
*cmd* is passed through :func:`collapse`, so callers
|
||||
can use triple-quoted f-strings freely.
|
||||
Stdout and stderr are merged, read line-by-line,
|
||||
and each line is printed with ``self.prefix`` prepended.
|
||||
When the command exits non-zero, a red error line is printed.
|
||||
"""
|
||||
cmd = collapse(cmd)
|
||||
if not quiet:
|
||||
self.print(f"$ {cmd}")
|
||||
indent = self.prefix + " "
|
||||
env = kwargs.pop("env", None)
|
||||
if env is None:
|
||||
env = os.environ.copy()
|
||||
env["_CMDEPLOY_WIDTH"] = str(self.section_width - len(indent))
|
||||
proc = subprocess.Popen(
|
||||
cmd,
|
||||
shell=True,
|
||||
text=True,
|
||||
stdin=subprocess.DEVNULL,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
env=env,
|
||||
**kwargs,
|
||||
)
|
||||
for line in proc.stdout:
|
||||
sys.stdout.write(indent + line)
|
||||
sys.stdout.flush()
|
||||
ret = proc.wait()
|
||||
if ret:
|
||||
self.red(f"command failed with exit code {ret}: {cmd}")
|
||||
return ret
|
||||
|
||||
|
||||
def _project_root():
|
||||
"""Return the project root directory."""
|
||||
return Path(__file__).resolve().parent.parent.parent.parent
|
||||
|
||||
|
||||
def collapse(text):
|
||||
"""Dedent, join lines, and strip a (triple-quoted) string.
|
||||
|
||||
Handy for writing shell commands across multiple lines::
|
||||
|
||||
cmd = collapse(f\"""
|
||||
cmdeploy run
|
||||
--config {ct.ini}
|
||||
--ssh-host {ct.domain}
|
||||
\""")
|
||||
"""
|
||||
return textwrap.dedent(text).replace("\n", " ").strip()
|
||||
|
||||
|
||||
def shell(cmd, check=False, **kwargs):
|
||||
"""Run a shell command string with sensible defaults.
|
||||
|
||||
*cmd* is passed through :func:`collapse` first, so callers
|
||||
can use triple-quoted f-strings freely.
|
||||
Captures stdout/stderr by default; pass ``capture_output=False``
|
||||
to stream output to the terminal instead.
|
||||
"""
|
||||
if "capture_output" not in kwargs and "stdout" not in kwargs:
|
||||
kwargs["capture_output"] = True
|
||||
kwargs.setdefault("stdin", subprocess.DEVNULL)
|
||||
return subprocess.run(collapse(cmd), shell=True, text=True, check=check, **kwargs)
|
||||
|
||||
|
||||
def get_git_hash(root=None):
|
||||
"""Return the local HEAD commit hash, or None."""
|
||||
if root is None:
|
||||
root = _project_root()
|
||||
result = shell(
|
||||
"git rev-parse HEAD",
|
||||
cwd=str(root),
|
||||
)
|
||||
if result.returncode == 0:
|
||||
return result.stdout.strip()
|
||||
return None
|
||||
|
||||
|
||||
def get_version_string(root=None):
|
||||
"""Return ``git_hash\\ngit_diff`` for the local working tree.
|
||||
|
||||
Used by :class:`~cmdeploy.deployers.GithashDeployer` to write
|
||||
``/etc/chatmail-version`` and by ``lxc-status`` to compare
|
||||
the deployed state against the local checkout.
|
||||
"""
|
||||
if root is None:
|
||||
root = _project_root()
|
||||
git_hash = get_git_hash(root=root) or "unknown"
|
||||
try:
|
||||
git_diff = shell("git diff", cwd=str(root)).stdout.strip()
|
||||
except Exception:
|
||||
git_diff = ""
|
||||
if git_diff:
|
||||
return f"{git_hash}\n{git_diff}"
|
||||
return git_hash
|
||||
@@ -1,5 +1,4 @@
|
||||
import hashlib
|
||||
import importlib.resources
|
||||
import re
|
||||
import time
|
||||
import traceback
|
||||
@@ -37,7 +36,7 @@ def prepare_template(source):
|
||||
|
||||
|
||||
def get_paths(config) -> (Path, Path, Path):
|
||||
reporoot = importlib.resources.files(__package__).joinpath("../../../").resolve()
|
||||
reporoot = (Path(__file__).resolve() / "../../../../").resolve()
|
||||
www_path = Path(config.www_folder)
|
||||
# if www_folder was not set, use default directory
|
||||
if config.www_folder == "":
|
||||
@@ -133,8 +132,7 @@ def find_merge_conflict(src_dir) -> Path:
|
||||
|
||||
|
||||
def main():
|
||||
path = importlib.resources.files(__package__)
|
||||
reporoot = path.joinpath("../../../").resolve()
|
||||
reporoot = (Path(__file__).resolve() / "../../../../").resolve()
|
||||
inipath = reporoot.joinpath("chatmail.ini")
|
||||
config = read_config(inipath)
|
||||
config.webdev = True
|
||||
|
||||
@@ -4,12 +4,14 @@
|
||||
|
||||
You can use the `make` command and `make html` to build web pages.
|
||||
|
||||
You need a Python environment where the following install was excuted:
|
||||
|
||||
pip install furo sphinx-autobuild
|
||||
You need a Python environment with `sphinx` and other
|
||||
dependencies, you can create it by running `scripts/initenv.sh`
|
||||
from the repository root.
|
||||
|
||||
To develop/change documentation, you can then do:
|
||||
|
||||
. venv/bin/activate
|
||||
cd doc
|
||||
make auto
|
||||
|
||||
A page will open at https://127.0.0.1:8000/ serving the docs and it will
|
||||
|
||||
@@ -15,7 +15,7 @@ author = 'chatmail collective'
|
||||
|
||||
extensions = [
|
||||
#'sphinx.ext.autodoc',
|
||||
#'sphinx.ext.viewcode',
|
||||
#'sphinx.ext.viewdoc',
|
||||
'sphinxcontrib.mermaid',
|
||||
]
|
||||
|
||||
|
||||
@@ -14,8 +14,6 @@ Minimal requirements and prerequisites
|
||||
|
||||
You will need the following:
|
||||
|
||||
- Control over a domain through a DNS provider of your choice.
|
||||
|
||||
- A Debian 12 **deployment server** with reachable SMTP/SUBMISSIONS/IMAPS/HTTPS ports.
|
||||
IPv6 is encouraged if available. Chatmail relay servers only require
|
||||
1GB RAM, one CPU, and perhaps 10GB storage for a few thousand active
|
||||
@@ -28,6 +26,11 @@ You will need the following:
|
||||
(An ed25519 private key is required due to an `upstream bug in
|
||||
paramiko <https://github.com/paramiko/paramiko/issues/2191>`_)
|
||||
|
||||
- Control over a domain through a DNS provider of your choice
|
||||
(there is experimental support for :ref:`DNS-less relays <iponly>`).
|
||||
|
||||
|
||||
.. _setup:
|
||||
|
||||
Setup with ``scripts/cmdeploy``
|
||||
-------------------------------------
|
||||
|
||||
@@ -16,6 +16,7 @@ Contributions and feedback welcome through the https://github.com/chatmail/relay
|
||||
proxy
|
||||
migrate
|
||||
overview
|
||||
lxc
|
||||
reverse_dns
|
||||
related
|
||||
faq
|
||||
iponly
|
||||
|
||||
29
doc/source/iponly.rst
Normal file
29
doc/source/iponly.rst
Normal file
@@ -0,0 +1,29 @@
|
||||
.. _iponly:
|
||||
|
||||
Hosting without DNS records
|
||||
===========================
|
||||
|
||||
.. note::
|
||||
|
||||
This option is experimental and might change without notice.
|
||||
|
||||
In case you don't have a domain,
|
||||
for example in a local network,
|
||||
you can run a chatmail relay with only an IPv4 address as well.
|
||||
|
||||
To deploy a relay without a domain,
|
||||
run ``cmdeploy init`` with only the IPv4 address
|
||||
during the :ref:`installation steps <setup>`,
|
||||
for example ``cmdeploy init 13.12.23.42``.
|
||||
|
||||
Drawbacks
|
||||
---------
|
||||
|
||||
- your transport encryption will only use self-signed TLS certificates,
|
||||
which are vulnerable against MITM attacks.
|
||||
the chatmail core's end-to-end encryption should suffice in most scenarios though.
|
||||
|
||||
- your messages will not be DKIM-signed;
|
||||
experimentally, most chatmail relays accept non-DKIM-signed messages from IPv4-only relays,
|
||||
but some relays might not accept messages from yours.
|
||||
|
||||
@@ -1,275 +0,0 @@
|
||||
Local testing with LXC/Incus
|
||||
============================
|
||||
|
||||
The ``cmdeploy`` tool includes support for running
|
||||
chatmail relays inside local
|
||||
`Incus <https://linuxcontainers.org/incus/>`_ LXC containers.
|
||||
This is meant for development, testing, and CI
|
||||
without requiring a remote server.
|
||||
LXC system containers are lightweight virtual machines
|
||||
that share the host's kernel but run their own init system,
|
||||
package manager, and network stack,
|
||||
so the cmdeploy deployment scripts work pretty much
|
||||
as they would on a real Debian server or cloud VPS.
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
|
||||
- Around 4-5 GiB free disk space
|
||||
- `systemd-networkd` for the automagic hostname resolution
|
||||
- No other service occupying Port 53
|
||||
|
||||
Install `Incus <https://linuxcontainers.org/incus/>`_
|
||||
(LXC container manager).
|
||||
See the `official installation guide
|
||||
<https://linuxcontainers.org/incus/docs/main/installing/>`_
|
||||
for full details.
|
||||
|
||||
After installing incus, initialise and grant yourself access::
|
||||
|
||||
sudo incus admin init --minimal
|
||||
sudo usermod -aG incus-admin $USER
|
||||
|
||||
|
||||
.. caution::
|
||||
|
||||
Adding yourself to ``incus-admin`` grants effective root access
|
||||
to the host: any member can mount host directories into a container
|
||||
and manipulate them as root.
|
||||
This is fine for local testing of your own relay branches,
|
||||
but do **not** use it for production setups
|
||||
or for testing untrusted relay branches from others.
|
||||
|
||||
.. warning::
|
||||
|
||||
You **must now log out and back in** (or run ``newgrp incus-admin``)
|
||||
after adding yourself to the group.
|
||||
Without this, all ``cmdeploy lxc-*`` commands
|
||||
will fail with permission errors.
|
||||
|
||||
Verify the installation works by running ``incus list``,
|
||||
which should print an empty table without errors.
|
||||
|
||||
|
||||
Quick start
|
||||
-----------
|
||||
|
||||
::
|
||||
|
||||
cd relay
|
||||
scripts/initenv.sh # bootstrap venv
|
||||
source venv/bin/activate # activate venv
|
||||
cmdeploy lxc-test # create containers, deploy, test
|
||||
|
||||
The ``lxc-test`` command provides an automated way
|
||||
to run the full deployment and test pipeline.
|
||||
It executes several ``cmdeploy`` subcommands in sequential steps.
|
||||
If a step fails, you can copy-paste the printed command
|
||||
and run it manually to debug.
|
||||
No host DNS delegation or ``~/.ssh/config`` changes are needed
|
||||
because ``lxc-test`` passes the required SSH and DNS options directly.
|
||||
|
||||
|
||||
CLI reference
|
||||
--------------
|
||||
|
||||
``lxc-start [--ipv4-only] [--run] [NAME ...]``
|
||||
Create and start containers.
|
||||
Without arguments, creates ``test0-localchat`` and ``ns-localchat`` (DNS).
|
||||
Pass one or more ``NAME`` arguments to create user relay containers instead
|
||||
(e.g. ``cmdeploy lxc-start myrelay``).
|
||||
Use ``--ipv4-only`` to set ``disable_ipv6 = True`` in the generated ``chatmail.ini``,
|
||||
producing an IPv4-only relay.
|
||||
Use ``--run`` to automatically run ``cmdeploy run`` on each container after starting it.
|
||||
Generates ``lxconfigs/ssh-config``.
|
||||
It reuses existing containers and resets DNS zones to minimal records.
|
||||
|
||||
``lxc-stop [--destroy] [--destroy-all] [NAME ...]``
|
||||
Stop relay containers.
|
||||
Without arguments, stops ``test0-localchat`` and ``test1-localchat``.
|
||||
Pass ``NAME`` to stop specific containers.
|
||||
Use ``--destroy`` to also delete the containers and their config files.
|
||||
Use ``--destroy-all`` to additionally destroy
|
||||
the ``ns-localchat`` DNS container **and** remove
|
||||
the cached ``localchat-base`` and ``localchat-relay``
|
||||
images, giving a fully clean slate for the next ``lxc-test``.
|
||||
User containers are **never** destroyed unless named explicitly.
|
||||
|
||||
``lxc-test [--one]``
|
||||
By default creates, deploys, and tests both ``test0`` and ``test1``
|
||||
for dual-domain federation testing (sets ``CHATMAIL_DOMAIN2=_test1.localchat``).
|
||||
test0 runs dual-stack (IPv4 + IPv6) while test1 runs IPv4-only (``disable_ipv6 = True``).
|
||||
Pass ``--one`` to only deploy and test against ``test0``
|
||||
(skips ``test1``, does not set ``CHATMAIL_DOMAIN2``).
|
||||
|
||||
``lxc-status``
|
||||
Show live status of all LXC containers (including the DNS container),
|
||||
deploy freshness (comparing ``/etc/chatmail-version``
|
||||
against local ``git rev-parse HEAD`` and ``git diff``),
|
||||
SSH config inclusion, and host DNS forwarding for ``.localchat``.
|
||||
Reports **IN-SYNC**, **DIRTY** (hash matches but uncommitted changes exist),
|
||||
**STALE** (different commit), or **NOT DEPLOYED**.
|
||||
|
||||
|
||||
Container types
|
||||
-----------------
|
||||
|
||||
**Test relay containers** (``test0-localchat``, ``test1-localchat``)
|
||||
Created automatically by ``lxc-test``.
|
||||
**test0** has IPv4 and IPv6 configured,
|
||||
**test1** is IPv4-only (``disable_ipv6 = True``).
|
||||
|
||||
**User relay containers** (``<name>-localchat``)
|
||||
Created by ``cmdeploy lxc-start <name>``
|
||||
where ``<name>`` does not start with ``test``.
|
||||
These are personal development instances,
|
||||
never touched by ``lxc-stop --destroy`` unless named explicitly.
|
||||
|
||||
**DNS container** (``ns-localchat``)
|
||||
Singleton container running PowerDNS.
|
||||
Created automatically when any relay is started.
|
||||
|
||||
|
||||
.. _lxc-ssh-config:
|
||||
|
||||
SSH configuration
|
||||
-----------------
|
||||
|
||||
``cmdeploy lxc-start`` generates ``lxconfigs/ssh-config``,
|
||||
a standard OpenSSH config file mapping every container name,
|
||||
its domain, and a short alias to the container's IP address::
|
||||
|
||||
Host test0-localchat _test0.localchat _test0
|
||||
Hostname 10.204.0.42
|
||||
User root
|
||||
IdentityFile /path/to/relay/lxconfigs/id_localchat
|
||||
IdentitiesOnly yes
|
||||
StrictHostKeyChecking accept-new
|
||||
UserKnownHostsFile /dev/null
|
||||
LogLevel ERROR
|
||||
|
||||
All ``cmdeploy`` commands (``run``, ``dns``, ``status``, ``test``)
|
||||
accept ``--ssh-config lxconfigs/ssh-config`` to use this file.
|
||||
``lxc-test`` passes it automatically.
|
||||
|
||||
**Using containers from the host shell:**
|
||||
|
||||
To make ``ssh _test0`` work from any terminal, add one line to ``~/.ssh/config``::
|
||||
|
||||
Include /absolute/path/to/relay/lxconfigs/ssh-config
|
||||
|
||||
|
||||
.. _lxc-dns-setup:
|
||||
.. _localchat-tld:
|
||||
|
||||
``.localchat`` DNS and name resolution
|
||||
---------------------------------------
|
||||
|
||||
All LXC-managed chatmail domains use the ``.localchat`` pseudo-TLD
|
||||
(e.g. ``_test0.localchat``, ``_test1.localchat``),
|
||||
a non-delegated suffix that exists only within the local PowerDNS infrastructure.
|
||||
A dedicated DNS container (``ns-localchat``)
|
||||
is created so that local test relays interact
|
||||
with DNS similar to a regular public Internet setup.
|
||||
On first start, ``cmdeploy lxc-start`` creates this container
|
||||
running two `PowerDNS <https://www.powerdns.com/>`_ services:
|
||||
|
||||
* **pdns-server** (authoritative) serves ``.localchat``
|
||||
zones from a local SQLite database.
|
||||
|
||||
* **pdns-recursor** (recursive) listens on the Incus
|
||||
bridge so all containers can use it.
|
||||
Forwards ``.localchat`` queries to the local
|
||||
authoritative server and resolves everything else recursively.
|
||||
|
||||
After the DNS container is up, ``lxc-start`` configures the Incus bridge
|
||||
to advertise its IP via DHCP and disables Incus's own DNS.
|
||||
DNS records are then created in two phases matching the "cmdeploy run" deployment flow:
|
||||
|
||||
1. **``lxc-start``** resets each relay zone to
|
||||
**SOA, NS, and A** records (plus **AAAA** for dual-stack containers).
|
||||
If host DNS resolution is configured, users can
|
||||
afterwards run ``cmdeploy run --config lxconfigs/chatmail-test0.ini
|
||||
--ssh-config lxconfigs/ssh-config --ssh-host _test0.localchat``.
|
||||
LXC subcommands do not depend on host DNS resolution
|
||||
and resolve addresses via ``lxconfigs/ssh-config``.
|
||||
|
||||
2. **``cmdeploy dns --zonefile``** generates a standard
|
||||
BIND-format zone file (MX, TXT/SPF, TXT/DMARC,
|
||||
TXT/MTA-STS, SRV, CNAME, DKIM) and loads it
|
||||
into PowerDNS.
|
||||
|
||||
This two-phase approach prevents premature configuration of mail records
|
||||
before the relay is actually deployed and running.
|
||||
Once ``cmdeploy run`` deploys `Unbound <https://nlnetlabs.nl/projects/unbound/>`_
|
||||
inside a relay container, Unbound has a configuration plugin snippet
|
||||
that forwards all ``.localchat`` queries to the PowerDNS recursor,
|
||||
and lets all other queries go through normal recursive resolution.
|
||||
|
||||
|
||||
State outside the repository
|
||||
-----------------------------
|
||||
|
||||
All generated configuration by lxc subcommands live in ``lxconfigs/``
|
||||
(git-ignored), including the SSH key pair (``id_localchat``),
|
||||
per-container ``chatmail-*.ini`` files, zone files, and ``ssh-config``.
|
||||
|
||||
The only state *outside* the repository is the Incus containers and images themselves
|
||||
(managed via the ``incus`` CLI, labelled with ``user.localchat-managed=true``).
|
||||
The Incus image store retains the following snapshot images:
|
||||
|
||||
* ``localchat-base``: Debian 12 with openssh-server and Python (built on first run)
|
||||
|
||||
* ``localchat-relay``: fully deployed relay snapshot,
|
||||
cached after the first successful ``cmdeploy run``.
|
||||
Subsequent relay containers launch from this image
|
||||
so the deploy step is mostly no-ops (roughly 3× faster than a fresh deploy).
|
||||
|
||||
|
||||
.. _lxc-tls:
|
||||
|
||||
TLS handling and underscore domains
|
||||
------------------------------------
|
||||
|
||||
Container domains start with ``_`` (e.g. ``_test0.localchat``).
|
||||
As described in :doc:`getting_started` ("Running a relay with self-signed certificates"),
|
||||
underscore domains automatically use self-signed TLS
|
||||
and ``smtp_tls_security_level = encrypt``.
|
||||
This permits cross-relay federation between LXC containers
|
||||
without any external certificate authority.
|
||||
Delta Chat clients connecting to these relays
|
||||
must be configured with
|
||||
``certificateChecks = acceptInvalidCertificates``
|
||||
(the test fixtures handle this automatically).
|
||||
`PR #7926 on chatmail-core <https://github.com/chatmail/core/pull/7926>`_
|
||||
is meant to make this special setting unnecessary for chatmail clients
|
||||
that are connecting to underscore domains.
|
||||
|
||||
|
||||
Known limitations
|
||||
------------------
|
||||
|
||||
The LXC environment differs from a production
|
||||
deployment in several ways:
|
||||
|
||||
**No ACME / Let's Encrypt**:
|
||||
Self-signed TLS only (see :ref:`lxc-tls`);
|
||||
ACME code paths are never exercised locally.
|
||||
|
||||
**No inbound connections from the internet**:
|
||||
Containers sit on a private Incus bridge and are not port-forwarded.
|
||||
Only the host and other containers on the same bridge can reach them.
|
||||
|
||||
**Local federation only**:
|
||||
Cross-relay mail delivery (e.g. test0 → test1) works between containers on the same host,
|
||||
but these relays are invisible to any external mail server.
|
||||
|
||||
**DNS is local only**:
|
||||
The ``.localchat`` pseudo-TLD is not resolvable from the wider internet
|
||||
(see :ref:`lxc-dns-setup`).
|
||||
|
||||
**IPv6 is ULA-only**:
|
||||
Containers receive IPv6 addresses from the ``fd42:...`` ULA range on the Incus bridge.
|
||||
These are not globally routable, but are sufficient for testing IPv6 service binding
|
||||
(Postfix, Dovecot, Nginx) and DNS AAAA records inside the local environment.
|
||||
test1 runs with ``disable_ipv6 = True`` to exercise the IPv4-only deployment path.
|
||||
@@ -102,8 +102,12 @@ short overview of ``chatmaild`` services:
|
||||
Apple/Google/Huawei.
|
||||
|
||||
- `chatmail-expire <https://github.com/chatmail/relay/blob/main/chatmaild/src/chatmaild/expire.py>`_
|
||||
deletes users if they have not logged in for a longer while.
|
||||
The timeframe can be configured in ``chatmail.ini``.
|
||||
deletes old messages, large messages, and entire mailboxes
|
||||
of users who have not logged in for longer than
|
||||
``delete_inactive_users_after`` days.
|
||||
|
||||
- ``chatmail-quota-expire`` is called by Dovecot's ``quota_warning`` mechanism
|
||||
and will automatically remove oldest messages to keep mailboxes well under ``max_mailbox_size``.
|
||||
|
||||
- `lastlogin <https://github.com/chatmail/relay/blob/main/chatmaild/src/chatmaild/lastlogin.py>`_
|
||||
is contacted by Dovecot when a user logs in and stores the date of
|
||||
@@ -149,6 +153,7 @@ Chatmail relay dependency diagram
|
||||
autoconfig.xml --- dovecot;
|
||||
postfix --- |10080|filtermail-outgoing;
|
||||
postfix --- |10081|filtermail-incoming;
|
||||
postfix --- |10083|filtermail-transport;
|
||||
filtermail-outgoing --- |10025 reinject|postfix;
|
||||
filtermail-incoming --- |10026 reinject|postfix;
|
||||
dovecot --- |doveauth.socket|doveauth;
|
||||
@@ -156,6 +161,8 @@ Chatmail relay dependency diagram
|
||||
/home/vmail/.../user"];
|
||||
dovecot --- |lastlogin.socket|lastlogin;
|
||||
dovecot --- chatmail-metadata;
|
||||
dovecot --- |quota-warning|chatmail-quota-expire;
|
||||
chatmail-quota-expire --- maildir;
|
||||
lastlogin --- maildir;
|
||||
doveauth --- maildir;
|
||||
chatmail-expire-daily --- maildir;
|
||||
@@ -289,9 +296,7 @@ ensured by ``filtermail`` proxy.
|
||||
TLS requirements
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
Postfix is configured to require valid TLS by setting
|
||||
`smtp_tls_security_level <https://www.postfix.org/postconf.5.html#smtp_tls_security_level>`_
|
||||
to ``verify``.
|
||||
Filtermail (used for delivery) requires a valid TLS.
|
||||
|
||||
You can test it by resolving ``MX`` records of your relay domain and
|
||||
then connecting to MX relays (e.g ``mx.example.org``) with
|
||||
|
||||
64
doc/source/reverse_dns.rst
Normal file
64
doc/source/reverse_dns.rst
Normal file
@@ -0,0 +1,64 @@
|
||||
Configuring reverse DNS
|
||||
=======================
|
||||
|
||||
Some email servers reject the emails
|
||||
if they don't pass `FCrDNS`_ check, also known as `iprev`_ check.
|
||||
|
||||
.. _FCrDNS: https://en.wikipedia.org/wiki/Forward-confirmed_reverse_DNS
|
||||
.. _iprev: https://datatracker.ietf.org/doc/html/rfc8601#section-3
|
||||
|
||||
Passing the check requires that the IP address that email is sent from
|
||||
should have a ``PTR`` record pointing to the domain name of the server,
|
||||
and domain name record should have an ``A/AAAA`` record
|
||||
pointing to the IP address.
|
||||
|
||||
Modern email relies on DKIM and SPF for authentication,
|
||||
while iprev check exists for
|
||||
`historical reasons <https://datatracker.ietf.org/doc/html/draft-ietf-dnsop-reverse-mapping-considerations-06#section-2.1>`_.
|
||||
Chatmail relays don't resolve ``PTR`` records,
|
||||
so you can ignore this section if configuring ``PTR`` records
|
||||
is difficult and federation with legacy email servers that don't accept
|
||||
valid DKIM signature for authentication is not important.
|
||||
|
||||
Multi-homed setups
|
||||
------------------
|
||||
|
||||
If you have a server with multiple IP addresses,
|
||||
also known as multi-homed setup,
|
||||
and don't publish all IP addresses in DNS,
|
||||
you need to make sure you are using
|
||||
the published address when making outgoing connections.
|
||||
|
||||
For example, your server may have a static IP
|
||||
address, and a so-called Floating IP or Virtual IP
|
||||
that can be moved between servers in case of
|
||||
migration or for failover.
|
||||
By using Floating IP you can avoid downtime
|
||||
and keep the IP address reputation
|
||||
for destinatinons that rely on IP reputation and IP blocklists.
|
||||
In this case you will only publish
|
||||
the Floating IP to DNS and only use the static IP
|
||||
to SSH into the server.
|
||||
|
||||
If you have such setup, make sure that
|
||||
you not only set ``PTR`` records for the Floating IP,
|
||||
but make outgoing connections using the Floating IP.
|
||||
Otherwise reverse DNS check succeed,
|
||||
but forward check making sure your domain name points
|
||||
to the IP address will fail.
|
||||
Such setup is indistinguishable from someone
|
||||
setting IP address ``PTR`` with the domain they don't own
|
||||
and as a result don't succeed.
|
||||
|
||||
On Linux you can configure source IP address with ``ip route`` command,
|
||||
for example:
|
||||
::
|
||||
|
||||
ip route change default via <default-gateway> dev eth0 src <source-address>
|
||||
|
||||
Make sure to persist the change after verifying it is working.
|
||||
You can check what your outgoing IP address is
|
||||
with ``curl icanhazip.com``.
|
||||
Check both the IPv4 and IPv6 addresses.
|
||||
For IPv4 address use ``curl ipv4.icanhazip.com`` or ``curl -4 icanhazip.com``
|
||||
and similarly for IPv6 if you have it.
|
||||
Reference in New Issue
Block a user