Compare commits

..

1 Commits

Author SHA1 Message Date
link2xt
0f89b52d5b Add init.sh and deploy.sh scripts 2023-10-13 14:11:13 +00:00
190 changed files with 788 additions and 14303 deletions

View File

@@ -1,4 +0,0 @@
Please refer to
[Delta Chat community standards and practices](https://delta.chat/en/community-standards)
which also apply for all chatmail developments.

View File

@@ -1,33 +0,0 @@
---
name: Bug report
about: Report something that isn't working.
title: ''
assignees: ''
---
<!--
Please fill out as much of this form as you can (leaving out stuff that is not applicable is ok).
-->
- Server OS (Operating System) - preferably Debian 12:
- On which OS you run cmdeploy:
- chatmail/relay version: `git rev-parse HEAD`
## Expected behavior
*What did you try to achieve?*
## Actual behavior
*What happened instead?*
### Steps to reproduce the problem:
1.
2.
### Screenshots
### Logs

View File

@@ -1 +0,0 @@
blank_issues_enabled: true

View File

@@ -1,116 +0,0 @@
name: CI
on:
pull_request:
push:
jobs:
tox:
name: isolated chatmaild tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
# Checkout pull request HEAD commit instead of merge commit
# Otherwise `test_deployed_state` will be unhappy.
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: download filtermail
run: curl -L https://github.com/chatmail/filtermail/releases/download/v0.5.2/filtermail-x86_64 -o /usr/local/bin/filtermail && chmod +x /usr/local/bin/filtermail
- name: run chatmaild tests
working-directory: chatmaild
run: pipx run tox
scripts:
name: deploy-chatmail tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: initenv
run: scripts/initenv.sh
- name: append venv/bin to PATH
run: echo venv/bin >>$GITHUB_PATH
- name: run formatting checks
run: cmdeploy fmt -v
- name: run deploy-chatmail offline tests
run: pytest --pyargs cmdeploy
lxc-test:
name: LXC deploy and test
runs-on: ubuntu-24.04
timeout-minutes: 30
steps:
- uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: install incus
run: |
# zabbly is the official incus community packages source
curl -fsSL https://pkgs.zabbly.com/key.asc \
| sudo gpg --dearmor -o /etc/apt/keyrings/zabbly.gpg
sudo sh -c 'cat <<EOF > /etc/apt/sources.list.d/zabbly-incus-stable.sources
Enabled: yes
Types: deb
URIs: https://pkgs.zabbly.com/incus/stable
Suites: $(. /etc/os-release && echo ${VERSION_CODENAME})
Components: main
Architectures: $(dpkg --print-architecture)
Signed-By: /etc/apt/keyrings/zabbly.gpg
EOF'
sudo apt-get update
sudo apt-get install -y incus
- name: initialise incus
run: |
sudo systemctl stop docker.socket docker || true
sudo iptables -P FORWARD ACCEPT
sudo sysctl -w fs.inotify.max_user_instances=65535
sudo sysctl -w fs.inotify.max_user_watches=65535
sudo incus admin init --minimal
sudo usermod -aG incus-admin "$USER"
- name: initenv
run: scripts/initenv.sh
- name: append venv/bin to PATH
run: echo venv/bin >>$GITHUB_PATH
- name: restore cached images
id: cache-images
uses: actions/cache@v4
with:
path: |
/tmp/localchat-base.tar.gz
/tmp/localchat-ns.tar.gz
/tmp/localchat-test0.tar.gz
/tmp/localchat-test1.tar.gz
lxconfigs/id_localchat*
key: incus-images-${{ runner.os }}-${{ github.ref_name }}
restore-keys: |
incus-images-${{ runner.os }}-${{ github.ref_name }}-
incus-images-${{ runner.os }}-main-
incus-images-${{ runner.os }}-
- name: import cached images
run: |
for alias in localchat-base localchat-ns localchat-test0 localchat-test1; do
if [ -f /tmp/$alias.tar.gz ]; then
sg incus-admin -c "incus image import /tmp/$alias.tar.gz --alias $alias" || true
fi
done
- name: lxc-test
run: sg incus-admin -c 'cmdeploy lxc-test'
- name: export images for cache
if: always()
run: |
for alias in localchat-base localchat-ns localchat-test0 localchat-test1; do
if ! [ -f /tmp/$alias.tar.gz ]; then
sg incus-admin -c "incus image export $alias /tmp/$alias" || true
fi
done

View File

@@ -1,53 +0,0 @@
name: documentation preview
on:
pull_request:
paths:
- 'doc/**'
- 'scripts/build-docs.sh'
- '.github/workflows/docs-preview.yaml'
jobs:
scripts:
name: build
runs-on: ubuntu-latest
environment:
name: 'staging.chatmail.at/doc/relay/'
url: https://staging.chatmail.at/doc/relay/${{ steps.prepare.outputs.prid }}
steps:
- uses: actions/checkout@v4
- name: initenv
run: scripts/initenv.sh
- name: append venv/bin to PATH
run: echo `pwd`/venv/bin >>$GITHUB_PATH
- name: build documentation
working-directory: doc
run: sphinx-build source build
- name: build documentation second time (for TOC)
working-directory: doc
run: sphinx-build source build
- name: Get Pullrequest ID
id: prepare
run: |
export PULLREQUEST_ID=$(echo "${{ github.ref }}" | cut -d "/" -f3)
echo "prid=$PULLREQUEST_ID" >> $GITHUB_OUTPUT
if [ $(expr length "${{ secrets.USERNAME }}") -gt "1" ]; then echo "uploadtoserver=true" >> $GITHUB_OUTPUT; fi
- run: |
echo "baseurl: /${{ steps.prepare.outputs.prid }}" >> _config.yml
- name: Upload preview
run: |
mkdir -p "$HOME/.ssh"
echo "${{ secrets.CHATMAIL_STAGING_SSHKEY }}" > "$HOME/.ssh/key"
chmod 600 "$HOME/.ssh/key"
rsync -rILvh -e "ssh -i $HOME/.ssh/key -o StrictHostKeyChecking=no" $GITHUB_WORKSPACE/doc/build/ "${{ secrets.USERNAME }}@chatmail.at:/var/www/html/staging.chatmail.at/doc/relay/${{ steps.prepare.outputs.prid }}/"
- name: check links
working-directory: doc
run: sphinx-build --builder linkcheck source build

View File

@@ -1,47 +0,0 @@
name: build and upload documentation
on:
push:
branches:
- main
- 'missytake/docs-ci'
paths:
- 'doc/**'
- 'scripts/build-docs.sh'
- '.github/workflows/docs.yaml'
jobs:
scripts:
name: build
runs-on: ubuntu-latest
environment:
name: 'chatmail.at/doc/relay/'
url: https://chatmail.at/doc/relay/
steps:
- uses: actions/checkout@v4
- name: initenv
run: scripts/initenv.sh
- name: append venv/bin to PATH
run: echo `pwd`/venv/bin >>$GITHUB_PATH
- name: build documentation
working-directory: doc
run: sphinx-build source build
- name: build documentation second time (for TOC)
working-directory: doc
run: sphinx-build source build
- name: check links
working-directory: doc
run: sphinx-build --builder linkcheck source build
- name: upload documentation
run: |
mkdir -p "$HOME/.ssh"
echo "${{ secrets.CHATMAIL_STAGING_SSHKEY }}" > "$HOME/.ssh/key"
chmod 600 "$HOME/.ssh/key"
rsync -rILvh -e "ssh -i $HOME/.ssh/key -o StrictHostKeyChecking=no" $GITHUB_WORKSPACE/doc/build/ "${{ secrets.USERNAME }}@chatmail.at:/var/www/html/chatmail.at/doc/relay/"

6
.gitignore vendored
View File

@@ -3,10 +3,6 @@ __pycache__/
*.py[cod]
*$py.class
*.swp
*qr-*.png
chatmail*.ini
lxconfigs/
# C extensions
*.so
@@ -163,5 +159,3 @@ cython_debug/
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
chatmail.zone

View File

@@ -1,519 +0,0 @@
# Changelog for chatmail deployment
## 1.9.0 2025-12-18
### Documentation
- Add RELEASE.md and CONTRIBUTING.md
- README update, mention Chatmail Cookbook project
### Bug Fixes
- Expire messages also from IMAP subfolders
- Use absolute path instead of relative path in message expiration script
- Restart Postfix and Dovecot automatically on failure
- acmetool: Use a fixed name and `reconcile` instead of `want`
### Features
- Report DKIM error code in SMTP response
- Remove development notice from the web pages
### Miscellaneous Tasks
- Update the heading in the CHANGELOG.md
- Setup git-cliff
- Run tests against ci-chatmail.testrun.org instead of nine.testrun.org
- Cleanup remaining echobot code, remove echobot user from deployment and passthrough recipients
## 1.8.0 2025-12-12
- Add imap_compress option to chatmail.ini
([#760](https://github.com/chatmail/relay/pull/760))
- Remove echobot from relays
([#753](https://github.com/chatmail/relay/pull/753))
- Fix `cmdeploy webdev`
([#743](https://github.com/chatmail/relay/pull/743))
- Add robots.txt to exclude all web crawlers
([#732](https://github.com/chatmail/relay/pull/732))
- acmetool: accept new Let's Encrypt ToS: https://letsencrypt.org/documents/LE-SA-v1.6-August-18-2025.pdf
([#729](https://github.com/chatmail/relay/pull/729))
- Organized cmdeploy into install, configure, and activate stages
([#695](https://github.com/chatmail/relay/pull/695))
- docs: move readme.md docs to sphinx documentation rendered at https://chatmail.at/doc/relay
([#711](https://github.com/chatmail/relay/pull/711))
- acmetool: replace cronjob with a systemd timer
([#719](https://github.com/chatmail/relay/pull/719))
- remove xstore@testrun.org from default passthrough recipients
([#722](https://github.com/chatmail/relay/pull/722))
- don't deploy the website if there are merge conflicts in the www folder
([#714](https://github.com/chatmail/relay/pull/714))
- acmetool: use ECDSA keys instead of RSA
([#689](https://github.com/chatmail/relay/pull/689))
- Require TLS 1.2 for outgoing SMTP connections
([#685](https://github.com/chatmail/relay/pull/685), [#730](https://github.com/chatmail/relay/pull/730))
- require STARTTLS for incoming port 25 connections
([#684](https://github.com/chatmail/relay/pull/684), [#730](https://github.com/chatmail/relay/pull/730))
- filtermail: run CPU-intensive handle_DATA in a thread pool executor
([#676](https://github.com/chatmail/relay/pull/676))
- don't use the complicated logging module in filtermail to exclude a potential source of errors.
([#674](https://github.com/chatmail/relay/pull/674))
- Specify nginx.conf to only handle `mail_domain`, www, and mta-sts domains
([#636](https://github.com/chatmail/relay/pull/636))
- Setup TURN server
([#621](https://github.com/chatmail/relay/pull/621))
- cmdeploy: make --ssh-host work with localhost
([#659](https://github.com/chatmail/relay/pull/659))
- Update iroh-relay to 0.35.0
([#650](https://github.com/chatmail/relay/pull/650))
- filtermail: accept mails from Protonmail
([#616](https://github.com/chatmail/relay/pull/616))
- Ignore all RCPT TO: parameters
([#651](https://github.com/chatmail/relay/pull/651))
- Increase opendkim DNS Timeout from 5 to 60 seconds
([#672](https://github.com/chatmail/relay/pull/672))
- Add config parameter for Let's Encrypt ACME email
([#663](https://github.com/chatmail/relay/pull/663))
- Use max username length in newemail.py, not min
([#648](https://github.com/chatmail/relay/pull/648))
- Add startup for `fcgiwrap.service` because sometimes it did not start automatically.
([#657](https://github.com/chatmail/relay/pull/657))
- Add `cmdeploy init --force` command for recreating chatmail.ini
([#656](https://github.com/chatmail/relay/pull/656))
- Increase maxproc for reinjecting ports from 10 to 100
([#646](https://github.com/chatmail/relay/pull/646))
- Allow ports 143 and 993 to be used by `dovecot` process
([#639](https://github.com/chatmail/relay/pull/639))
- Add `--skip-dns-check` argument to `cmdeploy run` command, which disables DNS record checking before installation.
([#661](https://github.com/chatmail/relay/pull/661))
- Rework expiry of message files and mailboxes in Python
to only do a single iteration over sometimes millions of messages
instead of doing "find" commands that iterate 9 times over the messages.
Provide an "fsreport" CLI for more fine grained analysis of message files.
([#637](https://github.com/chatmail/relay/pull/637))
## 1.7.0 2025-09-11
- Make www upload path configurable
([#618](https://github.com/chatmail/relay/pull/618))
- Check whether GCC is installed in initenv.sh
([#608](https://github.com/chatmail/relay/pull/608))
- Expire push notification tokens after 90 days
([#583](https://github.com/chatmail/relay/pull/583))
- Use official `mtail` binary instead of `mtail` package
([#581](https://github.com/chatmail/relay/pull/581))
- dovecot: install from download.delta.chat instead of openSUSE Build Service
([#590](https://github.com/chatmail/relay/pull/590))
- Reconfigure Dovecot imap-login service to high-performance mode
([#578](https://github.com/chatmail/relay/pull/578))
- Set timezone to improve dovecot performance
([#584](https://github.com/chatmail/relay/pull/584))
- Increase nginx connection limits
([#576](https://github.com/chatmail/relay/pull/576))
- If `dns-utils` needs to be installed before cmdeploy run, apt update to make sure it works
([#560](https://github.com/chatmail/relay/pull/560))
- filtermail: respect config message size limit
([#572](https://github.com/chatmail/relay/pull/572))
- Don't deploy if one of the ports used for chatmail relay services is occupied by an unexpected process
([#568](https://github.com/chatmail/relay/pull/568))
- Add config value after how many days large files are deleted
([#555](https://github.com/chatmail/relay/pull/555))
- cmdeploy: push relay version to /etc/chatmail-version
([#573](https://github.com/chatmail/relay/pull/573))
- filtermail: allow partial body length in OpenPGP payloads
([#570](https://github.com/chatmail/relay/pull/570))
- chatmaild: allow echobot to receive unencrypted messages by default
([#556](https://github.com/chatmail/relay/pull/556))
## 1.6.0 2025-04-11
- Handle Port-25 connect errors more gracefully (common with VPNs)
([#552](https://github.com/chatmail/relay/pull/552))
- Avoid "acmetool not found" during initial run
([#550](https://github.com/chatmail/relay/pull/550))
- Fix timezone handling such that client/servers do not need to use
same timezone.
([#553](https://github.com/chatmail/relay/pull/553))
- Enforce end-to-end encryption for incoming messages.
New user address mailboxes now get a `enforceE2EEincoming` file
which prohibits incoming cleartext messages from other domains.
An outside MTA trying to submit a cleartext message will
get a "523 Encryption Needed" response, see RFC5248.
If the file does not exist (as it the case for all existing accounts)
incoming cleartext messages are accepted.
([#538](https://github.com/chatmail/server/pull/538))
- Enforce end-to-end encryption between local addresses
([#535](https://github.com/chatmail/server/pull/535))
- unbound: check that port 53 is not occupied by a different process
([#537](https://github.com/chatmail/server/pull/537))
- unbound: before unbound is there, use 9.9.9.9 for resolving
([#518](https://github.com/chatmail/relay/pull/518))
- Limit the bind for the HTTPS server on 8443 to 127.0.0.1
([#522](https://github.com/chatmail/server/pull/522))
([#532](https://github.com/chatmail/server/pull/532))
- Send SNI when connecting to outside servers
([#524](https://github.com/chatmail/server/pull/524))
- postfix master.cf: use 127.0.0.1 for consistency
([#544](https://github.com/chatmail/relay/pull/544))
- Pass through `original_content` instead of `content` in filtermail
([#509](https://github.com/chatmail/server/pull/509))
- Document TLS requirements in the readme
([#514](https://github.com/chatmail/server/pull/514))
- Remove cleanup service from submission ports
([#512](https://github.com/chatmail/server/pull/512))
- cmdeploy dovecot: delete big messages after 7 days
([#504](https://github.com/chatmail/server/pull/504))
- mtail: fix getting logs from STDIN
([#502](https://github.com/chatmail/server/pull/502))
- filtermail: don't require exactly 2 lines after openPGP payload
([#497](https://github.com/chatmail/server/pull/497))
- cmdeploy dns: offer alternative DKIM record format for some web interfaces
([#470](https://github.com/chatmail/server/pull/470))
- journald: remove old logs from disk
([#490](https://github.com/chatmail/server/pull/490))
- opendkim: restart once every day to mend RAM leaks
([#498](https://github.com/chatmail/server/pull/498)
- migration guide: let opendkim own the DKIM keys directory
([#468](https://github.com/chatmail/server/pull/468))
- improve secure-join message detection
([#473](https://github.com/chatmail/server/pull/473))
- use old crypt lib in python < 3.11
([#483](https://github.com/chatmail/server/pull/483))
- chatmaild: set umask to 0700 for doveauth + metadata
([#490](https://github.com/chatmail/server/pull/492))
- remove MTA-STS daemon
([#488](https://github.com/chatmail/server/pull/488))
- replace `Subject` with `[...]` for all outgoing mails.
([#481](https://github.com/chatmail/server/pull/481))
- opendkim: use su instead of sudo
([#491](https://github.com/chatmail/server/pull/491))
## 1.5.0 2024-12-20
- cmdeploy dns: always show recommended DNS records
([#463](https://github.com/chatmail/server/pull/463))
- add `--all` to `cmdeploy dns`
([#462](https://github.com/chatmail/server/pull/462))
- fix `_mta-sts` TXT DNS record
([#461](https://github.com/chatmail/server/pull/461)
- deploy `iroh-relay` and also update "realtime relay services" in privacy policy.
([#434](https://github.com/chatmail/server/pull/434))
([#451](https://github.com/chatmail/server/pull/451))
- add guide to migrate chatmail to a new server
([#429](https://github.com/chatmail/server/pull/429))
- disable anvil authentication penalty
([#414](https://github.com/chatmail/server/pull/444)
- increase `request_queue_size` for UNIX sockets to 1000.
([#437](https://github.com/chatmail/server/pull/437))
- add argument to `cmdeploy run` for specifying
a different SSH host than `mail_domain`
([#439](https://github.com/chatmail/server/pull/439))
- query autoritative nameserver to bypass DNS cache
([#424](https://github.com/chatmail/server/pull/424))
- add mtail support (new optional `mtail_address` ini value)
This defines the address on which [`mtail`](https://google.github.io/mtail/)
exposes its metrics collected from the logs.
If you want to collect the metrics with Prometheus,
setup a private network (e.g. WireGuard interface)
and assign an IP address from this network to the host.
If you do not plan to collect metrics,
keep this setting unset.
([#388](https://github.com/chatmail/server/pull/388))
- fix checking for required DNS records
([#412](https://github.com/chatmail/server/pull/412))
- add support for specifying whole domains for recipient passthrough list
([#408](https://github.com/chatmail/server/pull/408))
- add a paragraph about "account deletion" to info page
([#405](https://github.com/chatmail/server/pull/405))
- avoid nginx listening on ipv6 if v6 is dsiabled
([#402](https://github.com/chatmail/server/pull/402))
- refactor ssh-based execution to allow organizing remote functions in
modules.
([#396](https://github.com/chatmail/server/pull/396))
- trigger "apt upgrade" during "cmdeploy run"
([#398](https://github.com/chatmail/server/pull/398))
- drop hispanilandia passthrough address
([#401](https://github.com/chatmail/server/pull/401))
- set CAA record flags to 0
- add IMAP capabilities instead of overwriting them
([#413](https://github.com/chatmail/server/pull/413))
- fix OpenPGP payload check
([#435](https://github.com/chatmail/server/pull/435))
- fix Dovecot quota_max_mail_size to use max_message_size config value
([#438](https://github.com/chatmail/server/pull/438))
## 1.4.1 2024-07-31
- fix metadata dictproxy which would confuse transactions
resulting in missed notifications and other issues.
([#393](https://github.com/chatmail/server/pull/393))
([#394](https://github.com/chatmail/server/pull/394))
- add optional "imap_rawlog" config option. If true,
.in/.out files are created in user home dirs
containing the imap protocol messages.
([#389](https://github.com/chatmail/server/pull/389))
## 1.4.0 2024-07-28
- Add `disable_ipv6` config option to chatmail.ini.
Required if the server doesn't have IPv6 connectivity.
([#312](https://github.com/chatmail/server/pull/312))
- allow current K9/Thunderbird-mail releases to send encrypted messages
outside by accepting their localized "encrypted subject" strings.
([#370](https://github.com/chatmail/server/pull/370))
- Migrate and remove sqlite database in favor of password/lastlogin tracking
in a user's maildir.
([#379](https://github.com/chatmail/server/pull/379))
- Require pyinfra V3 installed on the client side,
run `./scripts/initenv.sh` to upgrade locally.
([#378](https://github.com/chatmail/server/pull/378))
- don't hardcode "/home/vmail" paths but rather set them
once in the config object and use it everywhere else,
thereby also improving testability.
([#351](https://github.com/chatmail/server/pull/351))
temporarily introduced obligatory "passdb_path" and "mailboxes_dir"
settings but they were removed/obsoleted in
([#380](https://github.com/chatmail/server/pull/380))
- BREAKING: new required chatmail.ini value 'delete_inactive_users_after = 100'
which removes users from database and mails after 100 days without any login.
([#350](https://github.com/chatmail/server/pull/350))
- Refine DNS checking to distinguish between "required" and "recommended" settings
([#372](https://github.com/chatmail/server/pull/372))
- reload nginx in the acmetool cronjob
([#360](https://github.com/chatmail/server/pull/360))
- remove checking of reverse-DNS PTR records. Chatmail-servers don't
depend on it and even in the wider e-mail system it's not common anymore.
If it's an issue, a chatmail operator can still care to properly set reverse DNS.
([#348](https://github.com/chatmail/server/pull/348))
- Make DNS-checking faster and more interactive, run it fully during "cmdeploy run",
also introducing a generic mechanism for rapid remote ssh-based python function execution.
([#346](https://github.com/chatmail/server/pull/346))
- Don't fix file owner ship of /home/vmail
([#345](https://github.com/chatmail/server/pull/345))
- Support iterating over all users with doveadm commands
([#344](https://github.com/chatmail/server/pull/344))
- Test and fix for attempts to create inadmissible accounts
([#333](https://github.com/chatmail/server/pull/321))
- check that OpenPGP has only PKESK, SKESK and SEIPD packets
([#323](https://github.com/chatmail/server/pull/323),
[#324](https://github.com/chatmail/server/pull/324))
- improve filtermail checks for encrypted messages and drop support for unencrypted MDNs
([#320](https://github.com/chatmail/server/pull/320))
- replace `bash` with `/bin/sh`
([#334](https://github.com/chatmail/server/pull/334))
- Increase number of logged in IMAP sessions to 50000
([#335](https://github.com/chatmail/server/pull/335))
- filtermail: do not allow ASCII armor without actual payload
([#325](https://github.com/chatmail/server/pull/325))
- Remove sieve to enable hardlink deduplication in LMTP
([#343](https://github.com/chatmail/server/pull/343))
- dovecot: enable gzip compression on disk
([#341](https://github.com/chatmail/server/pull/341))
- DKIM-sign Content-Type and oversign all signed headers
([#296](https://github.com/chatmail/server/pull/296))
- Add nonci_accounts metric
([#347](https://github.com/chatmail/server/pull/347))
- doveauth: log when a new account is created
([#349](https://github.com/chatmail/server/pull/349))
- Multiplex HTTPS, IMAP and SMTP on port 443
([#357](https://github.com/chatmail/server/pull/357))
## 1.3.0 - 2024-06-06
- don't check necessary DNS records on cmdeploy init anymore
([#316](https://github.com/chatmail/server/pull/316))
- ensure cron and acl are installed
([#293](https://github.com/chatmail/server/pull/293),
[#310](https://github.com/chatmail/server/pull/310))
- change default for delete_mails_after from 40 to 20 days
([#300](https://github.com/chatmail/server/pull/300))
- save journald logs only to memory and save nginx logs to journald instead of file
([#299](https://github.com/chatmail/server/pull/299))
- fix writing of multiple obs repositories in `/etc/apt/sources.list`
([#290](https://github.com/chatmail/server/pull/290))
- metadata: add support for `/shared/vendor/deltachat/irohrelay`
([#284](https://github.com/chatmail/server/pull/284))
- Emit "XCHATMAIL" capability from IMAP server
([#278](https://github.com/chatmail/server/pull/278))
- Move echobot `into /var/lib/echobot`
([#281](https://github.com/chatmail/server/pull/281))
- Accept Let's Encrypt's new Terms of Services
([#275](https://github.com/chatmail/server/pull/276))
- Reload Dovecot and Postfix when TLS certificate updates
([#271](https://github.com/chatmail/server/pull/271))
- Use forked version of dovecot without hardcoded delays
([#270](https://github.com/chatmail/server/pull/270))
## 1.2.0 - 2024-04-04
- Install dig on the server to resolve DNS records
([#267](https://github.com/chatmail/server/pull/267))
- preserve notification order and exponentially backoff with
retries for tokens where we didn't get a successful return
([#265](https://github.com/chatmail/server/pull/263))
- Run chatmail-metadata and doveauth as vmail
([#261](https://github.com/chatmail/server/pull/261))
- Apply systemd restrictions to echobot
([#259](https://github.com/chatmail/server/pull/259))
- re-enable running the CI in pull requests, but not concurrently
([#258](https://github.com/chatmail/server/pull/258))
## 1.1.0 - 2024-03-28
### The changelog starts to record changes from March 15th, 2024
- Move systemd unit templates to cmdeploy package
([#255](https://github.com/chatmail/server/pull/255))
- Persist push tokens and support multiple device per address
([#254](https://github.com/chatmail/server/pull/254))
- Avoid warning for regular doveauth protocol's hello message.
([#250](https://github.com/chatmail/server/pull/250))
- Fix various tests to pass again with "cmdeploy test".
([#245](https://github.com/chatmail/server/pull/245),
[#242](https://github.com/chatmail/server/pull/242)
- Ensure lets-encrypt certificates are reloaded after renewal
([#244]) https://github.com/chatmail/server/pull/244
- Persist tokens to avoid iOS users loosing push-notifications when the
chatmail metadata service is restarted (happens regularly during deploys)
([#238](https://github.com/chatmail/server/pull/239)
- Fix failing sieve-script compile errors on incoming messages
([#237](https://github.com/chatmail/server/pull/239)
- Fix quota reporting after expunging of old mails
([#233](https://github.com/chatmail/server/pull/239)

View File

@@ -1,7 +0,0 @@
# Contributing to the chatmail relay
Commit messages follow the [Conventional Commits] notation.
We use [git-cliff] to generate the changelog from commit messages before the release.
[Conventional Commits]: https://www.conventionalcommits.org/
[git-cliff]: https://git-cliff.org/

21
LICENSE
View File

@@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2023, chatmail and delta chat teams
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,20 +1,20 @@
# Chat Mail server configuration
# Chatmail relays for end-to-end encrypted email
This package deploys Postfix and Dovecot servers, including OpenDKIM for DKIM signing.
Chatmail relay servers are interoperable Mail Transport Agents (MTAs) designed for:
Postfix uses Dovecot for authentication as described in <https://www.postfix.org/SASL_README.html#server_dovecot>
- **Zero State:** no private data or metadata collected, messages are auto-deleted, low disk usage
## Ports
- **Instant/Realtime:** sub-second message delivery, realtime P2P
streaming, privacy-preserving Push Notifications for Apple, Google, and Huawei;
Postfix listens on ports 25 (smtp) and 587 (submission) and 465 (submissions).
Dovecot listens on ports 143(imap) and 993 (imaps).
- **Security Enforcement**: only strict TLS, DKIM and OpenPGP with minimized metadata accepted
## DNS
- **Reliable Federation and Decentralization:** No spam or IP reputation checks, federating
depends on established IETF standards and protocols.
For DKIM you must add a DNS entry as in /etc/opendkim/selector.txt (where selector is the opendkim_selector configured in the chatmail inventory).
This repository contains everything needed to setup a ready-to-use chatmail relay on an ssh-reachable host.
For getting started and more information please refer to the web version of this repositories' documentation at
[https://chatmail.at/doc/relay](https://chatmail.at/doc/relay)
## Run with pyinfra
```
CHATMAIL_DOMAIN=c1.testrun.org pyinfra --ssh-user root c1.testrun.org deploy.py
```

View File

@@ -1,15 +0,0 @@
# Releasing a new version of chatmail relay
For example, to release version 1.9.0 of chatmail relay, do the following steps.
1. Update the changelog: `git cliff --unreleased --tag 1.9.0 --prepend CHANGELOG.md` or `git cliff -u -t 1.9.0 -p CHANGELOG.md`.
2. Open the changelog in the editor, edit it if required.
3. Commit the changes to the changelog with a commit message `chore(release): prepare for 1.9.0`.
3. Tag the release: `git tag --annotate 1.9.0`.
4. Push the release tag: `git push origin 1.9.0`.
5. Create a GitHub release: `gh release create 1.9.0`.

View File

@@ -1,3 +0,0 @@
include src/chatmaild/ini/*.ini.f
include src/chatmaild/ini/*.ini
include src/chatmaild/tests/mail-data/*

View File

@@ -1,5 +0,0 @@
# chatmaild
chatmaild provides dovecot autentication
to create dovecot users on login
and mail filtering.

View File

@@ -1,75 +0,0 @@
[build-system]
requires = ["setuptools>=61"]
build-backend = "setuptools.build_meta"
[project]
name = "chatmaild"
version = "0.3"
dependencies = [
"aiosmtpd",
"iniconfig",
"deltachat-rpc-server",
"deltachat-rpc-client",
"filelock",
"requests",
"crypt-r >= 3.13.1 ; python_version >= '3.11'",
]
[tool.setuptools]
include-package-data = true
[tool.setuptools.packages.find]
where = ['src']
[project.scripts]
doveauth = "chatmaild.doveauth:main"
chatmail-metadata = "chatmaild.metadata:main"
chatmail-expire = "chatmaild.expire:main"
chatmail-fsreport = "chatmaild.fsreport:main"
lastlogin = "chatmaild.lastlogin:main"
turnserver = "chatmaild.turnserver:main"
[project.entry-points.pytest11]
"chatmaild.testplugin" = "chatmaild.tests.plugin"
[tool.pytest.ini_options]
addopts = "-v -ra --strict-markers"
log_format = "%(asctime)s %(levelname)s %(message)s"
log_date_format = "%Y-%m-%d %H:%M:%S"
log_level = "INFO"
[tool.ruff]
lint.select = [
"F", # Pyflakes
"I", # isort
"PLC", # Pylint Convention
"PLE", # Pylint Error
"PLW", # Pylint Warning
]
lint.ignore = [
"PLC0415" # import-outside-top-level
]
[tool.tox]
legacy_tox_ini = """
[tox]
isolated_build = true
envlist = lint,py
[testenv:lint]
skipdist = True
skip_install = True
deps =
ruff
commands =
ruff format --quiet --diff src/
ruff check src/
[testenv]
deps = pytest
pdbpp
pytest-localserver
execnet
commands = pytest -v -rsXx {posargs}
"""

View File

@@ -1 +0,0 @@

View File

@@ -1,159 +0,0 @@
import os
from pathlib import Path
import iniconfig
from chatmaild.user import User
def read_config(inipath):
assert Path(inipath).exists(), inipath
cfg = iniconfig.IniConfig(inipath)
params = cfg.sections["params"]
default_config_content = get_default_config_content(params["mail_domain"])
df_params = iniconfig.IniConfig("ini", data=default_config_content)["params"]
new_params = dict(df_params.items())
new_params.update(params)
return Config(inipath, params=new_params)
class Config:
def __init__(self, inipath, params):
self._inipath = inipath
self.mail_domain = params["mail_domain"]
self.max_user_send_per_minute = int(params.get("max_user_send_per_minute", 60))
self.max_user_send_burst_size = int(params.get("max_user_send_burst_size", 10))
self.max_mailbox_size = params["max_mailbox_size"]
self.max_message_size = int(params.get("max_message_size", "31457280"))
self.delete_mails_after = params["delete_mails_after"]
self.delete_large_after = params["delete_large_after"]
self.delete_inactive_users_after = int(params["delete_inactive_users_after"])
self.username_min_length = int(params["username_min_length"])
self.username_max_length = int(params["username_max_length"])
self.password_min_length = int(params["password_min_length"])
self.passthrough_senders = params["passthrough_senders"].split()
self.passthrough_recipients = params["passthrough_recipients"].split()
self.www_folder = params.get("www_folder", "")
self.filtermail_smtp_port = int(params.get("filtermail_smtp_port", "10080"))
self.filtermail_smtp_port_incoming = int(
params.get("filtermail_smtp_port_incoming", "10081")
)
self.postfix_reinject_port = int(params.get("postfix_reinject_port", "10025"))
self.postfix_reinject_port_incoming = int(
params.get("postfix_reinject_port_incoming", "10026")
)
self.mtail_address = params.get("mtail_address")
self.disable_ipv6 = params.get("disable_ipv6", "false").lower() == "true"
self.addr_v4 = os.environ.get("CHATMAIL_ADDR_V4", "")
self.addr_v6 = os.environ.get("CHATMAIL_ADDR_V6", "")
self.acme_email = params.get("acme_email", "")
self.imap_rawlog = params.get("imap_rawlog", "false").lower() == "true"
self.imap_compress = params.get("imap_compress", "false").lower() == "true"
if "iroh_relay" not in params:
self.iroh_relay = "https://" + params["mail_domain"]
self.enable_iroh_relay = True
else:
self.iroh_relay = params["iroh_relay"].strip()
self.enable_iroh_relay = False
self.privacy_postal = params.get("privacy_postal")
self.privacy_mail = params.get("privacy_mail")
self.privacy_pdo = params.get("privacy_pdo")
self.privacy_supervisor = params.get("privacy_supervisor")
# TLS certificate management.
# If tls_external_cert_and_key is set, use externally managed certs.
# Otherwise derived from the domain name:
# - Domains starting with "_" use self-signed certificates
# - All other domains use ACME.
external = params.get("tls_external_cert_and_key", "").strip()
if external:
parts = external.split()
if len(parts) != 2:
raise ValueError(
"tls_external_cert_and_key must have two space-separated"
" paths: CERT_PATH KEY_PATH"
)
self.tls_cert_mode = "external"
self.tls_cert_path, self.tls_key_path = parts
elif self.mail_domain.startswith("_"):
self.tls_cert_mode = "self"
self.tls_cert_path = "/etc/ssl/certs/mailserver.pem"
self.tls_key_path = "/etc/ssl/private/mailserver.key"
else:
self.tls_cert_mode = "acme"
self.tls_cert_path = f"/var/lib/acme/live/{self.mail_domain}/fullchain"
self.tls_key_path = f"/var/lib/acme/live/{self.mail_domain}/privkey"
# deprecated option
mbdir = params.get("mailboxes_dir", f"/home/vmail/mail/{self.mail_domain}")
self.mailboxes_dir = Path(mbdir.strip())
# old unused option (except for first migration from sqlite to maildir store)
self.passdb_path = Path(params.get("passdb_path", "/home/vmail/passdb.sqlite"))
def _getbytefile(self):
return open(self._inipath, "rb")
def get_user(self, addr) -> User:
if not addr or "@" not in addr or "/" in addr:
raise ValueError(f"invalid address {addr!r}")
maildir = self.mailboxes_dir.joinpath(addr)
password_path = maildir.joinpath("password")
return User(maildir, addr, password_path, uid="vmail", gid="vmail")
def write_initial_config(inipath, mail_domain, overrides):
"""Write out default config file, using the specified config value overrides."""
content = get_default_config_content(mail_domain, **overrides)
inipath.write_text(content)
def get_default_config_content(mail_domain, **overrides):
from importlib.resources import files
inidir = files(__package__).joinpath("ini")
source_inipath = inidir.joinpath("chatmail.ini.f")
content = source_inipath.read_text().format(mail_domain=mail_domain)
# apply config overrides
new_lines = []
extra = overrides.copy()
for line in content.split("\n"):
new_line = line.strip()
if new_line and new_line[0] not in "#[":
name, value = map(str.strip, new_line.split("=", maxsplit=1))
value = extra.pop(name, value)
new_line = f"{name} = {value}"
new_lines.append(new_line)
for name, value in extra.items():
new_line = f"{name} = {value}"
new_lines.append(new_line)
content = "\n".join(new_lines)
# apply testrun privacy overrides
if mail_domain.endswith(".testrun.org"):
override_inipath = inidir.joinpath("override-testrun.ini")
privacy = iniconfig.IniConfig(override_inipath)["privacy"]
lines = []
for line in content.split("\n"):
for key, value in privacy.items():
value_lines = value.format(mail_domain=mail_domain).strip().split("\n")
if not line.startswith(f"{key} =") or not value_lines:
continue
if len(value_lines) == 1:
lines.append(f"{key} = {value}")
else:
lines.append(f"{key} =")
for vl in value_lines:
lines.append(f" {vl}")
break
else:
lines.append(line)
content = "\n".join(lines)
return content

View File

@@ -1,98 +0,0 @@
import logging
import os
from socketserver import StreamRequestHandler, ThreadingUnixStreamServer
class DictProxy:
def loop_forever(self, rfile, wfile):
# Transaction storage is local to each handler loop.
# Dovecot reuses transaction IDs across connections,
# starting transaction with the name `1`
# on two different connections to the same proxy sometimes.
transactions = {}
while True:
msg = rfile.readline().strip().decode()
if not msg:
break
res = self.handle_dovecot_request(msg, transactions)
if res:
wfile.write(res.encode("ascii"))
wfile.flush()
def handle_dovecot_request(self, msg, transactions):
# see https://doc.dovecot.org/2.3/developer_manual/design/dict_protocol/#dovecot-dict-protocol
short_command = msg[0]
parts = msg[1:].split("\t")
if short_command == "L":
return self.handle_lookup(parts)
elif short_command == "I":
return self.handle_iterate(parts)
elif short_command == "H":
return # no version checking
if short_command not in ("BSC"):
logging.warning(f"unknown dictproxy request: {msg!r}")
return
transaction_id = parts[0]
if short_command == "B":
return self.handle_begin_transaction(transaction_id, parts, transactions)
elif short_command == "C":
return self.handle_commit_transaction(transaction_id, parts, transactions)
elif short_command == "S":
addr = transactions[transaction_id]["addr"]
if not self.handle_set(addr, parts):
transactions[transaction_id]["res"] = "F\n"
logging.error(f"dictproxy-set failed for {addr!r}: {msg!r}")
def handle_lookup(self, parts):
logging.warning(f"lookup ignored: {parts!r}")
return "N\n"
def handle_iterate(self, parts):
# Empty line means ITER_FINISHED.
# If we don't return empty line Dovecot will timeout.
return "\n"
def handle_begin_transaction(self, transaction_id, parts, transactions):
addr = parts[1]
transactions[transaction_id] = dict(addr=addr, res="O\n")
def handle_set(self, addr, parts):
# For documentation on key structure see
# https://github.com/dovecot/core/blob/main/src/lib-storage/mailbox-attribute.h
return False
def handle_commit_transaction(self, transaction_id, parts, transactions):
# return whatever "set" command(s) set as result.
return transactions.pop(transaction_id)["res"]
def serve_forever_from_socket(self, socket):
dictproxy = self
class Handler(StreamRequestHandler):
def handle(self):
try:
dictproxy.loop_forever(self.rfile, self.wfile)
except Exception:
logging.exception("Exception in the handler")
raise
try:
os.unlink(socket)
except FileNotFoundError:
pass
with CustomThreadingUnixStreamServer(socket, Handler) as server:
try:
server.serve_forever()
except KeyboardInterrupt:
pass
class CustomThreadingUnixStreamServer(ThreadingUnixStreamServer):
request_queue_size = 1000

View File

@@ -1,169 +0,0 @@
import json
import logging
import os
import re
import sys
import filelock
try:
import crypt_r
except ImportError:
import crypt as crypt_r
from .config import Config, read_config
from .dictproxy import DictProxy
from .migrate_db import migrate_from_db_to_maildir
NOCREATE_FILE = "/etc/chatmail-nocreate"
VALID_LOCALPART_RE = re.compile(r"^[a-z0-9._-]+$")
def encrypt_password(password: str):
# https://doc.dovecot.org/2.3/configuration_manual/authentication/password_schemes/
passhash = crypt_r.crypt(password, crypt_r.METHOD_SHA512)
return "{SHA512-CRYPT}" + passhash
def is_allowed_to_create(config: Config, user, cleartext_password) -> bool:
"""Return True if user and password are admissable."""
if os.path.exists(NOCREATE_FILE):
logging.warning(f"blocked account creation because {NOCREATE_FILE!r} exists.")
return False
if len(cleartext_password) < config.password_min_length:
logging.warning(
"Password needs to be at least %s characters long",
config.password_min_length,
)
return False
parts = user.split("@")
if len(parts) != 2:
logging.warning(f"user {user!r} is not a proper e-mail address")
return False
localpart, domain = parts
if (
len(localpart) > config.username_max_length
or len(localpart) < config.username_min_length
):
logging.warning(
"localpart %s has to be between %s and %s chars long",
localpart,
config.username_min_length,
config.username_max_length,
)
return False
if not VALID_LOCALPART_RE.match(localpart):
logging.warning("localpart %r contains invalid characters", localpart)
return False
return True
def split_and_unescape(s):
"""Split strings using double quote as a separator and backslash as escape character
into parts."""
out = ""
i = 0
while i < len(s):
c = s[i]
if c == "\\":
# Skip escape character.
i += 1
# This will raise IndexError if there is no character
# after escape character. This is expected
# as this is an invalid input.
out += s[i]
elif c == '"':
# Separator
yield out
out = ""
else:
out += c
i += 1
yield out
class AuthDictProxy(DictProxy):
def __init__(self, config):
super().__init__()
self.config = config
def handle_lookup(self, parts):
# Dovecot <2.3.17 has only one part,
# do not attempt to read any other parts for compatibility.
keyname = parts[0]
namespace, type, args = keyname.split("/", 2)
args = list(split_and_unescape(args))
config = self.config
reply_command = "F"
res = ""
if namespace == "shared":
if type == "userdb":
user = args[0]
if user.endswith(f"@{config.mail_domain}"):
res = self.lookup_userdb(user)
if res:
reply_command = "O"
else:
reply_command = "N"
elif type == "passdb":
user = args[1]
if user.endswith(f"@{config.mail_domain}"):
res = self.lookup_passdb(user, cleartext_password=args[0])
if res:
reply_command = "O"
else:
reply_command = "N"
json_res = json.dumps(res) if res else ""
return f"{reply_command}{json_res}\n"
def handle_iterate(self, parts):
# example: I0\t0\tshared/userdb/
if parts[2] == "shared/userdb/":
result = "".join(
f"Oshared/userdb/{user}\t\n" for user in self.iter_userdb()
)
return f"{result}\n"
def iter_userdb(self) -> list:
"""Get a list of all user addresses."""
return [x for x in os.listdir(self.config.mailboxes_dir) if "@" in x]
def lookup_userdb(self, addr):
return self.config.get_user(addr).get_userdb_dict()
def lookup_passdb(self, addr, cleartext_password):
user = self.config.get_user(addr)
userdata = user.get_userdb_dict()
if userdata:
return userdata
if not is_allowed_to_create(self.config, addr, cleartext_password):
return
lock = filelock.FileLock(str(user.password_path) + ".lock", timeout=5)
with lock:
userdata = user.get_userdb_dict()
if userdata:
return userdata
user.set_password(encrypt_password(cleartext_password))
print(f"Created address: {addr}", file=sys.stderr)
return user.get_userdb_dict()
def main():
socket, cfgpath = sys.argv[1:]
config = read_config(cfgpath)
migrate_from_db_to_maildir(config)
dictproxy = AuthDictProxy(config=config)
dictproxy.serve_forever_from_socket(socket)

View File

@@ -1,206 +0,0 @@
"""
Expire old messages and addresses.
"""
import os
import shutil
import sys
import time
from argparse import ArgumentParser
from collections import namedtuple
from datetime import datetime
from stat import S_ISREG
from chatmaild.config import read_config
FileEntry = namedtuple("FileEntry", ("path", "mtime", "size"))
def iter_mailboxes(basedir, maxnum):
if not os.path.exists(basedir):
print_info(f"no mailboxes found at: {basedir}")
return
for name in os_listdir_if_exists(basedir)[:maxnum]:
if "@" in name:
yield MailboxStat(basedir + "/" + name)
def get_file_entry(path):
"""return a FileEntry or None if the path does not exist or is not a regular file."""
try:
st = os.stat(path)
except FileNotFoundError:
return None
if not S_ISREG(st.st_mode):
return None
return FileEntry(path, st.st_mtime, st.st_size)
def os_listdir_if_exists(path):
"""return a list of names obtained from os.listdir or an empty list if the path does not exist."""
try:
return os.listdir(path)
except FileNotFoundError:
return []
class MailboxStat:
last_login = None
def __init__(self, basedir):
self.basedir = str(basedir)
self.messages = []
self.extrafiles = []
self.scandir(self.basedir)
def scandir(self, folderdir):
for name in os_listdir_if_exists(folderdir):
path = f"{folderdir}/{name}"
if name in ("cur", "new", "tmp"):
for msg_name in os_listdir_if_exists(path):
entry = get_file_entry(f"{path}/{msg_name}")
if entry is not None:
self.messages.append(entry)
elif os.path.isdir(path):
self.scandir(path)
else:
entry = get_file_entry(path)
if entry is not None:
self.extrafiles.append(entry)
if name == "password":
self.last_login = entry.mtime
self.extrafiles.sort(key=lambda x: -x.size)
def print_info(msg):
print(msg, file=sys.stderr)
class Expiry:
def __init__(self, config, dry, now, verbose):
self.config = config
self.dry = dry
self.now = now
self.verbose = verbose
self.del_mboxes = 0
self.all_mboxes = 0
self.del_files = 0
self.all_files = 0
self.start = time.time()
def remove_mailbox(self, mboxdir):
if self.verbose:
print_info(f"removing {mboxdir}")
if not self.dry:
shutil.rmtree(mboxdir)
self.del_mboxes += 1
def remove_file(self, path, mtime=None):
if self.verbose:
if mtime is not None:
date = datetime.fromtimestamp(mtime).strftime("%b %d")
print_info(f"removing {date} {path}")
else:
print_info(f"removing {path}")
if not self.dry:
try:
os.unlink(path)
except FileNotFoundError:
print_info(f"file not found/vanished {path}")
self.del_files += 1
def process_mailbox_stat(self, mbox):
cutoff_without_login = (
self.now - int(self.config.delete_inactive_users_after) * 86400
)
cutoff_mails = self.now - int(self.config.delete_mails_after) * 86400
cutoff_large_mails = self.now - int(self.config.delete_large_after) * 86400
self.all_mboxes += 1
changed = False
if mbox.last_login and mbox.last_login < cutoff_without_login:
self.remove_mailbox(mbox.basedir)
return
mboxname = os.path.basename(mbox.basedir)
if self.verbose:
date = datetime.fromtimestamp(mbox.last_login) if mbox.last_login else None
if date:
print_info(f"checking mailbox {date.strftime('%b %d')} {mboxname}")
else:
print_info(f"checking mailbox (no last_login) {mboxname}")
self.all_files += len(mbox.messages)
for message in mbox.messages:
if message.mtime < cutoff_mails:
self.remove_file(message.path, mtime=message.mtime)
elif message.size > 200000 and message.mtime < cutoff_large_mails:
# we only remove noticed large files (not unnoticed ones in new/)
parts = message.path.split("/")
if len(parts) >= 2 and parts[-2] == "cur":
self.remove_file(message.path, mtime=message.mtime)
else:
continue
changed = True
if changed:
self.remove_file(f"{mbox.basedir}/maildirsize")
def get_summary(self):
return (
f"Removed {self.del_mboxes} out of {self.all_mboxes} mailboxes "
f"and {self.del_files} out of {self.all_files} files in existing mailboxes "
f"in {time.time() - self.start:2.2f} seconds"
)
def main(args=None):
"""Expire mailboxes and messages according to chatmail config"""
parser = ArgumentParser(description=main.__doc__)
ini = "/usr/local/lib/chatmaild/chatmail.ini"
parser.add_argument(
"chatmail_ini",
action="store",
nargs="?",
help=f"path pointing to chatmail.ini file, default: {ini}",
default=ini,
)
parser.add_argument(
"--days", action="store", help="assume date to be days older than now"
)
parser.add_argument(
"--maxnum",
default=None,
action="store",
help="maximum number of mailboxes to iterate on",
)
parser.add_argument(
"-v",
dest="verbose",
action="store_true",
help="print out removed files and mailboxes",
)
parser.add_argument(
"--remove",
dest="remove",
action="store_true",
help="actually remove all expired files and dirs",
)
args = parser.parse_args(args)
config = read_config(args.chatmail_ini)
now = datetime.utcnow().timestamp()
if args.days:
now = now - 86400 * int(args.days)
maxnum = int(args.maxnum) if args.maxnum else None
exp = Expiry(config, dry=not args.remove, now=now, verbose=args.verbose)
for mailbox in iter_mailboxes(str(config.mailboxes_dir), maxnum=maxnum):
exp.process_mailbox_stat(mailbox)
print(exp.get_summary())
if __name__ == "__main__":
main(sys.argv[1:])

View File

@@ -1,44 +0,0 @@
import json
import logging
import os
from contextlib import contextmanager
from random import randint
import filelock
class FileDict:
"""Concurrency-safe multi-reader/single-writer persistent dict."""
def __init__(self, path):
self.path = path
self.lock_path = path.with_name(path.name + ".lock")
@contextmanager
def modify(self):
# the OS will release the lock if the process dies,
# and the contextmanager will otherwise guarantee release
with filelock.FileLock(self.lock_path):
data = self.read()
yield data
write_path = self.path.with_name(self.path.name + ".tmp")
with write_path.open("w") as f:
json.dump(data, f)
os.rename(write_path, self.path)
def read(self):
try:
with self.path.open("r") as f:
return json.load(f)
except FileNotFoundError:
return {}
except Exception:
logging.warning(f"corrupt serialization state at: {self.path!r}")
return {}
def write_bytes_atomic(path, content):
rint = randint(0, 10000000)
tmp = path.with_name(path.name + f".tmp-{rint}")
tmp.write_bytes(content)
os.rename(tmp, path)

View File

@@ -1,287 +0,0 @@
"""
command line tool to analyze mailbox message storage
example invocation:
python -m chatmaild.fsreport /path/to/chatmail.ini
to show storage summaries for all "cur" folders
python -m chatmaild.fsreport /path/to/chatmail.ini --mdir cur
to show storage summaries only for first 1000 mailboxes
python -m chatmaild.fsreport /path/to/chatmail.ini --maxnum 1000
to write Prometheus textfile for node_exporter
python -m chatmaild.fsreport --textfile /var/lib/prometheus/node-exporter/
writes to /var/lib/prometheus/node-exporter/fsreport.prom
to also write legacy metrics.py style output (default: /var/www/html/metrics):
python -m chatmaild.fsreport --textfile /var/lib/prometheus/node-exporter/ --legacy-metrics
"""
import os
import tempfile
from argparse import ArgumentParser
from datetime import datetime
from chatmaild.config import read_config
from chatmaild.expire import iter_mailboxes
DAYSECONDS = 24 * 60 * 60
MONTHSECONDS = DAYSECONDS * 30
def HSize(size: int):
"""Format a size integer as a Human-readable string Kilobyte, Megabyte or Gigabyte"""
if size < 10000:
return f"{size / 1000:5.2f}K"
if size < 1000 * 1000:
return f"{size / 1000:5.0f}K"
if size < 1000 * 1000 * 1000:
return f"{int(size / 1000000):5.0f}M"
return f"{size / 1000000000:5.2f}G"
class Report:
def __init__(self, now, min_login_age, mdir):
self.size_extra = 0
self.size_messages = 0
self.now = now
self.min_login_age = min_login_age
self.mdir = mdir
self.num_ci_logins = self.num_all_logins = 0
self.login_buckets = {x: 0 for x in (1, 10, 30, 40, 80, 100, 150)}
KiB = 1024
MiB = 1024 * KiB
self.message_size_thresholds = (
0,
100 * KiB,
MiB // 2,
1 * MiB,
2 * MiB,
5 * MiB,
10 * MiB,
)
self.message_buckets = {x: 0 for x in self.message_size_thresholds}
self.message_count_buckets = {x: 0 for x in self.message_size_thresholds}
def process_mailbox_stat(self, mailbox):
# categorize login times
last_login = mailbox.last_login
if last_login:
self.num_all_logins += 1
if os.path.basename(mailbox.basedir)[:3] == "ci-":
self.num_ci_logins += 1
else:
for days in self.login_buckets:
if last_login >= self.now - days * DAYSECONDS:
self.login_buckets[days] += 1
cutoff_login_date = self.now - self.min_login_age * DAYSECONDS
if last_login and last_login <= cutoff_login_date:
# categorize message sizes
for size in self.message_buckets:
for msg in mailbox.messages:
if msg.size >= size:
if self.mdir and f"/{self.mdir}/" not in msg.path:
continue
self.message_buckets[size] += msg.size
self.message_count_buckets[size] += 1
self.size_messages += sum(entry.size for entry in mailbox.messages)
self.size_extra += sum(entry.size for entry in mailbox.extrafiles)
def dump_summary(self):
all_messages = self.size_messages
print()
print("## Mailbox storage use analysis")
print(f"Mailbox data total size: {HSize(self.size_extra + all_messages)}")
print(f"Messages total size : {HSize(all_messages)}")
try:
percent = self.size_extra / (self.size_extra + all_messages) * 100
except ZeroDivisionError:
percent = 100
print(f"Extra files : {HSize(self.size_extra)} ({percent:.2f}%)")
print()
if self.min_login_age:
print(f"### Message storage for {self.min_login_age} days old logins")
pref = f"[{self.mdir}] " if self.mdir else ""
for minsize, sumsize in self.message_buckets.items():
count = self.message_count_buckets[minsize]
percent = (sumsize / all_messages * 100) if all_messages else 0
print(
f"{pref}larger than {HSize(minsize)}: {HSize(sumsize)} ({percent:.2f}%), {count} msgs"
)
user_logins = self.num_all_logins - self.num_ci_logins
def p(num):
return f"({num / user_logins * 100:2.2f}%)" if user_logins else "100%"
print()
print(f"## Login stats, from date reference {datetime.fromtimestamp(self.now)}")
print(f"all: {HSize(self.num_all_logins)}")
print(f"non-ci: {HSize(user_logins)}")
print(f"ci: {HSize(self.num_ci_logins)}")
for days, active in self.login_buckets.items():
print(f"last {days:3} days: {HSize(active)} {p(active)}")
def _write_atomic(self, filepath, content):
"""Atomically write content to filepath via tmp+rename."""
dirpath = os.path.dirname(os.path.abspath(filepath))
fd, tmppath = tempfile.mkstemp(dir=dirpath, suffix=".tmp")
try:
with os.fdopen(fd, "w") as f:
f.write(content)
os.chmod(tmppath, 0o644)
os.rename(tmppath, filepath)
except BaseException:
try:
os.unlink(tmppath)
except OSError:
pass
raise
def dump_textfile(self, filepath):
"""Dump metrics in Prometheus exposition format."""
lines = []
lines.append("# HELP chatmail_storage_bytes Mailbox storage in bytes.")
lines.append("# TYPE chatmail_storage_bytes gauge")
lines.append(f'chatmail_storage_bytes{{kind="messages"}} {self.size_messages}')
lines.append(f'chatmail_storage_bytes{{kind="extra"}} {self.size_extra}')
total = self.size_extra + self.size_messages
lines.append(f'chatmail_storage_bytes{{kind="total"}} {total}')
lines.append("# HELP chatmail_messages_bytes Sum of msg bytes >= threshold.")
lines.append("# TYPE chatmail_messages_bytes gauge")
for minsize, sumsize in self.message_buckets.items():
lines.append(f'chatmail_messages_bytes{{min_size="{minsize}"}} {sumsize}')
lines.append("# HELP chatmail_messages_count Number of msgs >= size threshold.")
lines.append("# TYPE chatmail_messages_count gauge")
for minsize, count in self.message_count_buckets.items():
lines.append(f'chatmail_messages_count{{min_size="{minsize}"}} {count}')
lines.append("# HELP chatmail_accounts Number of accounts.")
lines.append("# TYPE chatmail_accounts gauge")
user_logins = self.num_all_logins - self.num_ci_logins
lines.append(f'chatmail_accounts{{kind="all"}} {self.num_all_logins}')
lines.append(f'chatmail_accounts{{kind="ci"}} {self.num_ci_logins}')
lines.append(f'chatmail_accounts{{kind="user"}} {user_logins}')
lines.append(
"# HELP chatmail_accounts_active Non-CI accounts active within N days."
)
lines.append("# TYPE chatmail_accounts_active gauge")
for days, active in self.login_buckets.items():
lines.append(f'chatmail_accounts_active{{days="{days}"}} {active}')
self._write_atomic(filepath, "\n".join(lines) + "\n")
def dump_compat_textfile(self, filepath):
"""Dump legacy metrics.py style metrics."""
user_logins = self.num_all_logins - self.num_ci_logins
lines = [
"# HELP total number of accounts",
"# TYPE accounts gauge",
f"accounts {self.num_all_logins}",
"# HELP number of CI accounts",
"# TYPE ci_accounts gauge",
f"ci_accounts {self.num_ci_logins}",
"# HELP number of non-CI accounts",
"# TYPE nonci_accounts gauge",
f"nonci_accounts {user_logins}",
]
self._write_atomic(filepath, "\n".join(lines) + "\n")
def main(args=None):
"""Report about filesystem storage usage of all mailboxes and messages"""
parser = ArgumentParser(description=main.__doc__)
ini = "/usr/local/lib/chatmaild/chatmail.ini"
parser.add_argument(
"chatmail_ini",
action="store",
nargs="?",
help=f"path pointing to chatmail.ini file, default: {ini}",
default=ini,
)
parser.add_argument(
"--days",
default=0,
action="store",
help="assume date to be DAYS older than now",
)
parser.add_argument(
"--min-login-age",
default=0,
metavar="DAYS",
dest="min_login_age",
action="store",
help="only sum up message size if last login is at least DAYS days old",
)
parser.add_argument(
"--mdir",
metavar="{cur,new,tmp}",
action="store",
help="only consider messages in specified Maildir subdirectory for summary",
)
parser.add_argument(
"--maxnum",
default=None,
action="store",
help="maximum number of mailboxes to iterate on",
)
parser.add_argument(
"--textfile",
metavar="PATH",
default=None,
help="write Prometheus textfile to PATH (directory or file); "
"if PATH is a directory, writes 'fsreport.prom' inside it",
)
parser.add_argument(
"--legacy-metrics",
metavar="FILENAME",
nargs="?",
const="/var/www/html/metrics",
default=None,
help="write legacy metrics.py textfile (default: /var/www/html/metrics)",
)
args = parser.parse_args(args)
config = read_config(args.chatmail_ini)
now = datetime.utcnow().timestamp()
if args.days:
now = now - 86400 * int(args.days)
maxnum = int(args.maxnum) if args.maxnum else None
rep = Report(now=now, min_login_age=int(args.min_login_age), mdir=args.mdir)
for mbox in iter_mailboxes(str(config.mailboxes_dir), maxnum=maxnum):
rep.process_mailbox_stat(mbox)
if args.textfile:
path = args.textfile
if os.path.isdir(path):
path = os.path.join(path, "fsreport.prom")
rep.dump_textfile(path)
if args.legacy_metrics:
rep.dump_compat_textfile(args.legacy_metrics)
if not args.textfile and not args.legacy_metrics:
rep.dump_summary()
if __name__ == "__main__":
main()

View File

@@ -1,133 +0,0 @@
[params]
# mail domain (MUST be set to fully qualified chat mail domain)
mail_domain = {mail_domain}
#
# If you only do private test deploys, you don't need to modify any settings below
#
#
# Restrictions on user addresses
#
# email sending rate per user and minute
max_user_send_per_minute = 60
# per-user max burst size for sending rate limiting (GCRA bucket capacity)
max_user_send_burst_size = 10
# maximum mailbox size of a chatmail address
max_mailbox_size = 500M
# maximum message size for an e-mail in bytes
max_message_size = 31457280
# days after which mails are unconditionally deleted
delete_mails_after = 20
# days after which large messages (>200k) are unconditionally deleted
delete_large_after = 7
# days after which users without a successful login are deleted (database and mails)
delete_inactive_users_after = 90
# minimum length a username must have
username_min_length = 9
# maximum length a username can have
username_max_length = 9
# minimum length a password must have
password_min_length = 9
# list of chatmail addresses which can send outbound un-encrypted mail
passthrough_senders =
# list of e-mail recipients for which to accept outbound un-encrypted mails
# (space-separated, item may start with "@" to whitelist whole recipient domains)
passthrough_recipients =
# Use externally managed TLS certificates instead of built-in acmetool.
# Paths refer to files on the deployment server (not the build machine).
# Both files must already exist before running cmdeploy.
# Certificate renewal is your responsibility; changed files are
# picked up automatically by all relay services.
# tls_external_cert_and_key = /path/to/fullchain.pem /path/to/privkey.pem
# path to www directory - documented here: https://chatmail.at/doc/relay/getting_started.html#custom-web-pages
#www_folder = www
#
# Deployment Details
#
# SMTP outgoing filtermail and reinjection
filtermail_smtp_port = 10080
postfix_reinject_port = 10025
# SMTP incoming filtermail and reinjection
filtermail_smtp_port_incoming = 10081
postfix_reinject_port_incoming = 10026
# if set to "True" IPv6 is disabled
disable_ipv6 = False
# Your email adress, which will be used in acmetool to manage Let's Encrypt SSL certificates
acme_email =
# Defaults to https://iroh.{{mail_domain}} and running `iroh-relay` on the chatmail
# service.
# If you set it to anything else, the service will be disabled
# and users will be directed to use the given iroh relay URL.
# Set it to empty string if you want users to use their default iroh relay.
# iroh_relay =
# Address on which `mtail` listens,
# e.g. 127.0.0.1 or some private network
# address like 192.168.10.1.
# You can point Prometheus
# or some other OpenMetrics-compatible
# collector to
# http://{{mtail_address}}:3903/metrics
# and display collected metrics with Grafana.
#
# WARNING: do not expose this service
# to the public IP address.
#
# `mtail is not running if the setting is not set.
# mtail_address = 127.0.0.1
#
# Debugging options
#
# set to True if you want to track imap protocol execution
# in per-maildir ".in/.out" files.
# Note that you need to manually cleanup these files
# so use this option with caution on production servers.
imap_rawlog = false
# set to true if you want to enable the IMAP COMPRESS Extension,
# which allows IMAP connections to be efficiently compressed.
# WARNING: Enabling this makes it impossible to hibernate IMAP
# processes which will result in much higher memory/RAM usage.
imap_compress = false
#
# Privacy Policy
#
# postal address of privacy contact
privacy_postal =
# email address of privacy contact
privacy_mail =
# postal address of the privacy data officer
privacy_pdo =
# postal address of the privacy supervisor
privacy_supervisor =

View File

@@ -1,16 +0,0 @@
[privacy]
passthrough_recipients = privacy@testrun.org echo@{mail_domain}
privacy_postal =
Merlinux GmbH, Represented by the managing director H. Krekel,
Reichgrafen Str. 20, 79102 Freiburg, Germany
privacy_mail = privacy@testrun.org
privacy_pdo =
Prof. Dr. Fabian Schmieder, lexICT UG (limited), Ostfeldstr. 49, 30559 Hannover.
You can contact him at *delta-privacy@merlinux.eu* (Keyword: DPO)
privacy_supervisor =
State Commissioner for Data Protection and Freedom of Information of
Baden-Württemberg in 70173 Stuttgart, Germany.

View File

@@ -1,29 +0,0 @@
import sys
from .config import read_config
from .dictproxy import DictProxy
class LastLoginDictProxy(DictProxy):
def __init__(self, config):
super().__init__()
self.config = config
def handle_set(self, addr, parts):
keyname = parts[1].split("/")
value = parts[2] if len(parts) > 2 else ""
if keyname[0] == "shared" and keyname[1] == "last-login":
addr = keyname[2]
timestamp = int(value)
user = self.config.get_user(addr)
user.set_last_login_timestamp(timestamp)
return True
return False
def main():
socket, config_path = sys.argv[1:]
config = read_config(config_path)
dictproxy = LastLoginDictProxy(config=config)
dictproxy.serve_forever_from_socket(socket)

View File

@@ -1,155 +0,0 @@
import logging
import sys
import time
from contextlib import contextmanager
from .config import read_config
from .dictproxy import DictProxy
from .filedict import FileDict
from .notifier import Notifier
from .turnserver import turn_credentials
def _is_valid_token_timestamp(timestamp, now):
# Token if invalid after 90 days
# or if the timestamp is in the future.
return timestamp > now - 3600 * 24 * 90 and timestamp < now + 60
class Metadata:
# each SETMETADATA on this key appends to dictionary
# mapping of unique device tokens
# which only ever get removed if the upstream indicates the token is invalid
DEVICETOKEN_KEY = "devicetoken"
def __init__(self, vmail_dir):
self.vmail_dir = vmail_dir
def get_metadata_dict(self, addr):
return FileDict(self.vmail_dir / addr / "metadata.json")
@contextmanager
def _modify_tokens(self, addr):
with self.get_metadata_dict(addr).modify() as data:
tokens = data.setdefault(self.DEVICETOKEN_KEY, {})
now = int(time.time())
if isinstance(tokens, list):
data[self.DEVICETOKEN_KEY] = tokens = {t: now for t in tokens}
expired_tokens = [
token
for token, timestamp in tokens.items()
if not _is_valid_token_timestamp(tokens[token], now)
]
for expired_token in expired_tokens:
del tokens[expired_token]
yield tokens
def add_token_to_addr(self, addr, token):
with self._modify_tokens(addr) as tokens:
tokens[token] = int(time.time())
def remove_token_from_addr(self, addr, token):
with self._modify_tokens(addr) as tokens:
if token in tokens:
del tokens[token]
def get_tokens_for_addr(self, addr):
mdict = self.get_metadata_dict(addr).read()
tokens = mdict.get(self.DEVICETOKEN_KEY, {})
now = int(time.time())
if isinstance(tokens, dict):
token_list = [
token
for token, timestamp in tokens.items()
if _is_valid_token_timestamp(timestamp, now)
]
if len(token_list) < len(tokens):
# Some tokens have expired, remove them.
with self._modify_tokens(addr) as _tokens:
pass
else:
token_list = []
return token_list
class MetadataDictProxy(DictProxy):
def __init__(self, notifier, metadata, iroh_relay=None, turn_hostname=None):
super().__init__()
self.notifier = notifier
self.metadata = metadata
self.iroh_relay = iroh_relay
self.turn_hostname = turn_hostname
def handle_lookup(self, parts):
# Lpriv/43f5f508a7ea0366dff30200c15250e3/devicetoken\tlkj123poi@c2.testrun.org
keyparts = parts[0].split("/", 2)
if keyparts[0] == "priv":
keyname = keyparts[2]
addr = parts[1]
if keyname == self.metadata.DEVICETOKEN_KEY:
res = " ".join(self.metadata.get_tokens_for_addr(addr))
return f"O{res}\n"
elif keyparts[0] == "shared":
keyname = keyparts[2]
if (
keyname == "vendor/vendor.dovecot/pvt/server/vendor/deltachat/irohrelay"
and self.iroh_relay
):
# Handle `GETMETADATA "" /shared/vendor/deltachat/irohrelay`
return f"O{self.iroh_relay}\n"
elif keyname == "vendor/vendor.dovecot/pvt/server/vendor/deltachat/turn":
try:
res = turn_credentials()
except Exception:
logging.exception("failed to get TURN credentials")
return "N\n"
port = 3478
return f"O{self.turn_hostname}:{port}:{res}\n"
logging.warning(f"lookup ignored: {parts!r}")
return "N\n"
def handle_set(self, addr, parts):
# For documentation on key structure see
# https://github.com/dovecot/core/blob/main/src/lib-storage/mailbox-attribute.h
keyname = parts[1].split("/")
value = parts[2] if len(parts) > 2 else ""
if keyname[0] == "priv" and keyname[2] == self.metadata.DEVICETOKEN_KEY:
self.metadata.add_token_to_addr(addr, value)
return True
elif keyname[0] == "priv" and keyname[2] == "messagenew":
self.notifier.new_message_for_addr(addr, self.metadata)
return True
return False
def main():
socket, config_path = sys.argv[1:]
config = read_config(config_path)
iroh_relay = config.iroh_relay
mail_domain = config.mail_domain
vmail_dir = config.mailboxes_dir
if not vmail_dir.exists():
logging.error("vmail dir does not exist: %r", vmail_dir)
return 1
queue_dir = vmail_dir / "pending_notifications"
queue_dir.mkdir(exist_ok=True)
metadata = Metadata(vmail_dir)
notifier = Notifier(queue_dir)
notifier.start_notification_threads(metadata.remove_token_from_addr)
dictproxy = MetadataDictProxy(
notifier=notifier,
metadata=metadata,
iroh_relay=iroh_relay,
turn_hostname=mail_domain,
)
dictproxy.serve_forever_from_socket(socket)

View File

@@ -1,63 +0,0 @@
"""
migration code from old sqlite databases into per-maildir "password" files
where mtime reflects and is updated to be the "last-login" time.
"""
import logging
import os
import sqlite3
import sys
from chatmaild.config import read_config
def get_all_rows(path):
assert path.exists()
uri = f"file:{path}?mode=ro"
sqlconn = sqlite3.connect(uri, timeout=60, isolation_level="DEFERRED", uri=True)
cur = sqlconn.cursor()
cur.execute("SELECT * from users")
rows = cur.fetchall()
sqlconn.close()
return rows
def migrate_from_db_to_maildir(config, chunking=10000):
path = config.passdb_path
if not path.exists():
return
all_rows = get_all_rows(path)
# don't transfer special/CI accounts
rows = [row for row in all_rows if row[0][:3] not in ("ci-", "ac_")]
logging.info(f"ignoring {len(all_rows) - len(rows)} CI accounts")
logging.info(f"migrating {len(rows)} sqlite database passwords to user dirs")
for i, row in enumerate(rows):
addr = row[0]
enc_password = row[1]
user = config.get_user(addr)
user.set_password(enc_password)
if len(row) == 3 and row[2]:
timestamp = int(row[2])
user.set_last_login_timestamp(timestamp)
if i > 0 and i % chunking == 0:
logging.info(f"migration-progress: {i} passwords transferred")
logging.info("migration: all passwords migrated")
oldpath = config.passdb_path.with_suffix(config.passdb_path.suffix + ".old")
os.rename(config.passdb_path, oldpath)
for path in config.passdb_path.parent.iterdir():
if path.name.startswith(config.passdb_path.name + "-"):
path.unlink()
logging.info(f"migration: moved database to {oldpath!r}")
if __name__ == "__main__":
config = read_config(sys.argv[1])
logging.basicConfig(level=logging.INFO)
migrate_from_db_to_maildir(config)

View File

@@ -1,51 +0,0 @@
#!/usr/local/lib/chatmaild/venv/bin/python3
"""CGI script for creating new accounts."""
import json
import secrets
import string
from urllib.parse import quote
from chatmaild.config import Config, read_config
CONFIG_PATH = "/usr/local/lib/chatmaild/chatmail.ini"
ALPHANUMERIC = string.ascii_lowercase + string.digits
ALPHANUMERIC_PUNCT = string.ascii_letters + string.digits + string.punctuation
def create_newemail_dict(config: Config):
user = "".join(
secrets.choice(ALPHANUMERIC) for _ in range(config.username_max_length)
)
password = "".join(
secrets.choice(ALPHANUMERIC_PUNCT)
for _ in range(config.password_min_length + 3)
)
return dict(email=f"{user}@{config.mail_domain}", password=f"{password}")
def create_dclogin_url(email, password):
"""Build a dclogin: URL with credentials and self-signed cert acceptance.
Uses ic=3 (AcceptInvalidCertificates) so chatmail clients
can connect to servers with self-signed TLS certificates.
"""
return f"dclogin:{quote(email, safe='@')}?p={quote(password, safe='')}&v=1&ic=3"
def print_new_account():
config = read_config(CONFIG_PATH)
creds = create_newemail_dict(config)
result = dict(email=creds["email"], password=creds["password"])
if config.tls_cert_mode == "self":
result["dclogin_url"] = create_dclogin_url(creds["email"], creds["password"])
print("Content-Type: application/json")
print("")
print(json.dumps(result))
if __name__ == "__main__":
print_new_account()

View File

@@ -1,171 +0,0 @@
"""
This modules provides notification machinery for transmitting device tokens to
a central notification server which in turn contacts a phone provider's notification server
to trigger Delta Chat apps to retrieve messages and provide instant notifications to users.
The Notifier class arranges the queuing of tokens in separate PriorityQueues
from which NotifyThreads take and transmit them via HTTPS
to the `notifications.delta.chat` service.
The current lack of proper HTTP/2-support in Python leads us
to use multiple threads and connections to the Rust-implemented `notifications.delta.chat`
which itself uses HTTP/2 and thus only a single connection to phone-notification providers.
If a token fails to cause a successful notification
it is moved to a retry-number specific PriorityQueue
which handles all tokens that failed a particular number of times
and which are scheduled for retry using exponential back-off timing.
If a token notification would be scheduled more than DROP_DEADLINE seconds
after its first attempt, it is dropped with a log error.
Note that tokens are opaque to the notification machinery here
and are encrypted foreclosing all ability to distinguish
which device token ultimately goes to which phone-provider notification service,
or to understand the relation of "device tokens" and chatmail addresses.
The meaning and format of tokens is basically a matter of chatmail Core and
the `notification.delta.chat` service.
"""
import logging
import math
import os
import time
from dataclasses import dataclass
from pathlib import Path
from queue import PriorityQueue
from threading import Thread
from uuid import uuid4
import requests
@dataclass
class PersistentQueueItem:
path: Path
addr: str
start_ts: int
token: str
def delete(self):
self.path.unlink(missing_ok=True)
@classmethod
def create(cls, queue_dir, addr, start_ts, token):
queue_id = uuid4().hex
start_ts = int(start_ts)
path = queue_dir.joinpath(queue_id)
tmp_path = path.with_name(path.name + ".tmp")
tmp_path.write_text(f"{addr}\n{start_ts}\n{token}")
os.rename(tmp_path, path)
return cls(path, addr, start_ts, token)
@classmethod
def read_from_path(cls, path):
addr, start_ts, token = path.read_text().split("\n", maxsplit=2)
return cls(path, addr, int(start_ts), token)
def __lt__(self, other):
return self.start_ts < other.start_ts
class Notifier:
URL = "https://notifications.delta.chat/notify"
CONNECTION_TIMEOUT = 60.0 # seconds until http-request is given up
BASE_DELAY = 8.0 # base seconds for exponential back-off delay
DROP_DEADLINE = 5 * 60 * 60 # drop notifications after 5 hours
def __init__(self, queue_dir):
self.queue_dir = queue_dir
max_tries = int(math.log(self.DROP_DEADLINE, self.BASE_DELAY)) + 1
self.retry_queues = [PriorityQueue() for _ in range(max_tries)]
def compute_delay(self, retry_num):
return 0 if retry_num == 0 else pow(self.BASE_DELAY, retry_num)
def new_message_for_addr(self, addr, metadata):
start_ts = int(time.time())
for token in metadata.get_tokens_for_addr(addr):
queue_item = PersistentQueueItem.create(
self.queue_dir, addr, start_ts, token
)
self.queue_for_retry(queue_item)
def requeue_persistent_queue_items(self):
for queue_path in self.queue_dir.iterdir():
if queue_path.name.endswith(".tmp"):
logging.warning(f"removing spurious queue item: {queue_path!r}")
queue_path.unlink()
continue
try:
queue_item = PersistentQueueItem.read_from_path(queue_path)
except ValueError:
logging.warning(f"removing spurious queue item: {queue_path!r}")
queue_path.unlink()
continue
self.queue_for_retry(queue_item)
def queue_for_retry(self, queue_item, retry_num=0):
delay = self.compute_delay(retry_num)
when = int(time.time()) + delay
deadline = queue_item.start_ts + self.DROP_DEADLINE
if retry_num >= len(self.retry_queues) or when > deadline:
queue_item.delete()
logging.error(f"notification exceeded deadline: {queue_item.token!r}")
return
self.retry_queues[retry_num].put((when, queue_item))
def start_notification_threads(self, remove_token_from_addr):
self.requeue_persistent_queue_items()
threads = {}
for retry_num in range(len(self.retry_queues)):
# use 4 threads for first-try tokens and less for subsequent tries
num_threads = 4 if retry_num == 0 else 2
threads[retry_num] = []
for _ in range(num_threads):
thread = NotifyThread(self, retry_num, remove_token_from_addr)
threads[retry_num].append(thread)
thread.start()
return threads
class NotifyThread(Thread):
def __init__(self, notifier, retry_num, remove_token_from_addr):
super().__init__(daemon=True)
self.notifier = notifier
self.retry_num = retry_num
self.remove_token_from_addr = remove_token_from_addr
def stop(self):
self.notifier.retry_queues[self.retry_num].put((None, None))
def run(self):
requests_session = requests.Session()
while self.retry_one(requests_session):
pass
def retry_one(self, requests_session, sleep=time.sleep):
when, queue_item = self.notifier.retry_queues[self.retry_num].get()
if when is None:
return False
wait_time = when - int(time.time())
if wait_time > 0:
sleep(wait_time)
self.perform_request_to_notification_server(requests_session, queue_item)
return True
def perform_request_to_notification_server(self, requests_session, queue_item):
timeout = self.notifier.CONNECTION_TIMEOUT
token = queue_item.token
try:
res = requests_session.post(self.notifier.URL, data=token, timeout=timeout)
except requests.exceptions.RequestException as e:
res = e
else:
if res.status_code in (200, 410):
if res.status_code == 410:
self.remove_token_from_addr(queue_item.addr, token)
queue_item.delete()
return
logging.warning(f"Notification request failed: {res!r}")
self.notifier.queue_for_retry(queue_item, retry_num=self.retry_num + 1)

View File

@@ -1,56 +0,0 @@
From: {from_addr}
To: {to_addr}
Autocrypt-Setup-Message: v1
Subject: Autocrypt Setup Message
Date: Tue, 22 Jan 2019 12:56:29 +0100
Content-type: multipart/mixed; boundary="Y6fyGi9SoGeH8WwRaEdC6bbBcYOedDzrQ"
--Y6fyGi9SoGeH8WwRaEdC6bbBcYOedDzrQ
Content-Type: text/plain
This message contains all information to transfer your Autocrypt
settings along with your secret key securely from your original
device.
To set up your new device for Autocrypt, please follow the
instuctions that should be presented by your new device.
You can keep this message and use it as a backup for your secret
key. If you want to do this, you should write down the Setup Code
and store it securely.
--Y6fyGi9SoGeH8WwRaEdC6bbBcYOedDzrQ
Content-Type: application/autocrypt-setup
Content-Disposition: attachment; filename="autocrypt-setup-message.html"
<html><body>
<p>
This is the Autocrypt setup file used to transfer settings and
keys between clients. You can decrypt it using the Setup Code
presented on your old device, and then import the contained key
into your keyring.
</p>
<pre>
-----BEGIN PGP MESSAGE-----
Passphrase-Format: numeric9x4
Passphrase-Begin: 17
jA0EBwMCFAxADoCdzeX/0ukBlqI5+pfpKb751qd/7nLNbkpy3gVcaf1QwRPZYt40
Ynp08UqRQ2g48ZlnzHLSwlTGOPTuv2Jt8ka+pgZ45xzvJSG2gau03xP4VsC271kR
VmCjdb0Y6Rk96mAwfGzrkbaRQ9Z7fIoL866GOv6h9neiVIkp+JYlTV6ISD0ZQJ4Q
I6dOQkB/TWZyVjtiJDOQHdfNWliA6NtqaLq19wlu9L5xXjuNpY95KwR8EJXWe0+o
Y3d2U/KxOAkXKghP2Qg1GtlPVeGC5T4p03TGI6pzKT+kHX6Rrm9wK6sM9aTquMmF
Vok84Jg1DFnwivWC2RILR81rXi7k/+Y6MUbveFgJ9cQduqpxnmD7TjOblYu7M6zp
YGAUxh8DRKlIMn2QsA++DBYQ6ACZvwuY8qTDLkqPDo4WqM313dsMJbyGjDdVE7EM
PESS+RlABETpZXz8g/ycr6DIUNdlbPcmYlsBfHWDOuR2GFFTwmlv5slWS39dJv38
E0eIe1CwdxI801Se7t7dUUS/ZF8wb6GlmxOcqGbF8eko1Z0S64IAm7/h13MRQCxI
geQnHfGYVJ2FOimoCMEKwfa9x++RFTDW0u7spDC2uWvK/1viV8OfRppFhLr/kmKb
18lWXuAz80DAjUDUsVqEq2MvJBJGoCJUEyjuRsLkHYRM5jYk4v50LyyR0Om73nWF
nZBqmqNzdr7Xb9PHHdFhnEc0VvoYbrcM0RVYcEMW3YbmejM891j1d6Iv+/n/qND/
NdebGrfWJMmFLf/iEkzTZ3/v5inW9LpWoRc94ioCjJTaEo8Rib6ARRFaJVIsmNXi
YicFGO98D+zX+a2t9Yz6IpPajVslnOp6ScpmXgts/2XWD7oE+JgxSAqo/dLVsHgP
Ufo=
=pulM
-----END PGP MESSAGE-----
</pre></body></html>
--Y6fyGi9SoGeH8WwRaEdC6bbBcYOedDzrQ--

View File

@@ -1,66 +0,0 @@
From: {from_addr}
To: {to_addr}
Subject: {subject}
Date: Sun, 15 Oct 2023 16:43:21 +0000
Message-ID: <Mr.UVyJWZmkCKM.hGzNc6glBE_@c2.testrun.org>
In-Reply-To: <Mr.MvmCz-GQbi_.6FGRkhDf05c@c2.testrun.org>
References: <Mr.3gckbNy5bch.uK3Hd2Ws6-w@c2.testrun.org>
<Mr.MvmCz-GQbi_.6FGRkhDf05c@c2.testrun.org>
Chat-Version: 1.0
Autocrypt: addr={from_addr}; prefer-encrypt=mutual;
keydata=xjMEZSwWjhYJKwYBBAHaRw8BAQdAQBEhqeJh0GueHB6kF/DUQqYCxARNBVokg/AzT+7LqH
rNFzxiYXJiYXpAYzIudGVzdHJ1bi5vcmc+wosEEBYIADMCGQEFAmUsFo4CGwMECwkIBwYVCAkKCwID
FgIBFiEEFTfUNvVnY3b9F7yHnmme1PfUhX8ACgkQnmme1PfUhX9A4AEAnHWHp49eBCMHK5t66gYPiW
XQuB1mwUjzGfYWB+0RXUoA/0xcQ3FbUNlGKW7Blp6eMFfViv6Mv2d3kNSXACB6nmcMzjgEZSwWjhIK
KwYBBAGXVQEFAQEHQBpY5L2M1XHo0uxf8SX1wNLBp/OVvidoWHQF2Jz+kJsUAwEIB8J4BBgWCAAgBQ
JlLBaOAhsMFiEEFTfUNvVnY3b9F7yHnmme1PfUhX8ACgkQnmme1PfUhX/INgEA37AJaNvruYsJVanP
IXnYw4CKd55UAwl8Zcy+M2diAbkA/0fHHcGV4r78hpbbL1Os52DPOdqYQRauIeJUeG+G6bQO
MIME-Version: 1.0
Content-Type: multipart/encrypted; protocol="application/pgp-encrypted";
boundary="YFrteb74qSXmggbOxZL9dRnhymywAi"
--YFrteb74qSXmggbOxZL9dRnhymywAi
Content-Description: PGP/MIME version identification
Content-Type: application/pgp-encrypted
Version: 1
--YFrteb74qSXmggbOxZL9dRnhymywAi
Content-Description: OpenPGP encrypted message
Content-Disposition: inline; filename="encrypted.asc";
Content-Type: application/octet-stream; name="encrypted.asc"
-----BEGIN PGP MESSAGE-----
wU4DhW3gBZ/VvCYSAQdA8bMs2spwbKdGjVsL1ByPkNrqD7frpB73maeL6I6SzDYg
O5G53tv339RdKq3WRcCtEEvxjHlUx2XNwXzC04BpmfvBTgNfPUyLDzjXnxIBB0Ae
8ymwGvXMCCimHXN0Dg8Ui62KOi03h0UgheoHWovJSCDF4CKre/xtFr3nL7lq/PKI
JsjVNz7/RK9FSXF6WwfONtLCyQGEuVAsB/KXfCBEyfKhaMwGHvhujRidGW5uV1no
lMGl3ODmo29Lgeu2uSE7EpJRZoe6hU6ddmBkqxax61ZtkaFlGFFpdo2K8balNNdz
ZsJ/9mmI9x3oOJ4/l1nhQbUO9ADbs7gJhFdV5Qkp30b5fCI7bU+aoe1ccBbLe/WM
YUty1PqcuQT7XjA+XmYuL261tvW8pBetT+i33/E2d8PzzYt2IuK9qeevyS+yxdwA
kfwejFWzzsUlJaDxs1x4XOxkMgSj+jo+g12dFOb7fyClsAnq23iDb8AuaT/BScAI
+lO+gher69+6LmM7VGHLG5k762J1jTaQCaKt1s8TAWV99Eo4491vL6fyvk3l/Cfg
RXSwiWFgj19Pn0Rq7CD9v22UE2vdUMBTcV4aw79mClk1YQ23jbF0y5DCjPdJ62Zo
tskBgFt3NoWV80jZ76zIBLrrjLwCCll8JjJtFwSkt2GX5RFBsVa4A8IDht9RtEk7
rrHgbSZQfkauEi/mH3/6CDZoLqSHudUZ7d4MaJwun1TkFYGe2ORwGJd4OBj3oGJp
H8YBwCpk///L/fKjX0Gg3M8nrpM4wrRFhPKidAgO/kcm25X4+ZHlVkWBTCt5RWKI
fHh6oLDZCqCfcgMkE1KKmwfIHaUkhq5BPRigwy6i5dh1DM4+1UCLh3dxzVbqE9b9
61NB19nXdRtDA2sOUnj9ve6m/wEPyCb6/zBQZqvCBYb1/AjdXpUrFT+DbpfyxaXN
XfhDVb5mNqNM/IVj0V5fvTc6vOfYbzQtPm10H+FdWWfb+rJRfyC3MA2w2IqstFe3
w3bu2iE6CQvSqRvge+ZqLKt/NqYwOURiUmpuklbl3kPJ97+mfKWoiqk8Iz1VY+bb
NMUC7aoGv+jcoj+WS6PYO8N6BeRVUUB3ZJSf8nzjgxm1/BcM+UD3BPrlhT11ODRs
baifGbprMWwt3dhb8cQgRT8GPdpO1OsDkzL6iikMjLHWWiA99GV6ruiHsIPw6boW
A6/uSOskbDHOROotKmddGTBd0iiHXAoQsJFt1ZjUkt6EHrgWs+GAvrvKpXs1mrz8
uj3GwEFrHS+Xuf2UDgpszYT3hI2cL/kUtGakVR7m7vVMZqXBUbZdGAEb1PZNPwsI
E4aMK02+EVB+tSN4Fzj99N2YD0inVYt+oPjr2tHhUS6aSGBNS/48Ki47DOg4Sxkn
lkOWnEbCD+XTnbDd
=agR5
-----END PGP MESSAGE-----
--YFrteb74qSXmggbOxZL9dRnhymywAi--

View File

@@ -1,25 +0,0 @@
Subject: =?utf-8?q?Message_from_foobar=40c2=2Etestrun=2Eorg?=
Chat-Disposition-Notification-To: foobar@c2.testrun.org
Chat-User-Avatar: 0
From: <{from_addr}>
To: <{to_addr}>
Date: Sun, 15 Oct 2023 16:41:44 +0000
Message-ID: <Mr.3gckbNy5bch.uK3Hd2Ws6-w@c2.testrun.org>
References: <Mr.3gckbNy5bch.uK3Hd2Ws6-w@c2.testrun.org>
Chat-Version: 1.0
Autocrypt: addr={from_addr}; prefer-encrypt=mutual;
keydata=xjMEZSrw3hYJKwYBBAHaRw8BAQdAiEKNQFU28c6qsx4vo/JHdt73RXdjMOmByf/XsGiJ7m
nNFzxmb29iYXJAYzIudGVzdHJ1bi5vcmc+wosEEBYIADMCGQEFAmUq8N4CGwMECwkIBwYVCAkKCwID
FgIBFiEEGil0OvTIa6RngmCLUYNnEa9leJAACgkQUYNnEa9leJCX3gEAhm0MehE5byBBU1avPczr/I
HjNLht7Qf6++mAhlJmtDcA/0C8VYJhsUpmiDjuZaMDWNv4FO2BJG6LH7gSm6n7ClMJzjgEZSrw3hIK
KwYBBAGXVQEFAQEHQAxGG/QW0owCfMp1A+vXEMwgzWcBpNFr58kX2eXuPpM6AwEIB8J4BBgWCAAgBQ
JlKvDeAhsMFiEEGil0OvTIa6RngmCLUYNnEa9leJAACgkQUYNnEa9leJDg1gEAwLf8KDoAAKyYgjyI
vYvO9VEgBni1C4Xx1VjcaEmlDK8BALoFuUCK+enw76TtDcAUKhlhUiM6SDRExkS4Nskp/BcK
MIME-Version: 1.0
Content-Type: text/plain; charset=utf-8; format=flowed; delsp=no
-----BEGIN PGP MESSAGE-----
Hi!
-----END PGP MESSAGE-----

View File

@@ -1,44 +0,0 @@
From: {from_addr}
To: {to_addr}
Subject: ...
Date: Sun, 15 Oct 2023 16:43:21 +0000
Message-ID: <Mr.UVyJWZmkCKM.hGzNc6glBE_@c2.testrun.org>
In-Reply-To: <Mr.MvmCz-GQbi_.6FGRkhDf05c@c2.testrun.org>
References: <Mr.3gckbNy5bch.uK3Hd2Ws6-w@c2.testrun.org>
<Mr.MvmCz-GQbi_.6FGRkhDf05c@c2.testrun.org>
Chat-Version: 1.0
Autocrypt: addr={from_addr}; prefer-encrypt=mutual;
keydata=xjMEZSwWjhYJKwYBBAHaRw8BAQdAQBEhqeJh0GueHB6kF/DUQqYCxARNBVokg/AzT+7LqH
rNFzxiYXJiYXpAYzIudGVzdHJ1bi5vcmc+wosEEBYIADMCGQEFAmUsFo4CGwMECwkIBwYVCAkKCwID
FgIBFiEEFTfUNvVnY3b9F7yHnmme1PfUhX8ACgkQnmme1PfUhX9A4AEAnHWHp49eBCMHK5t66gYPiW
XQuB1mwUjzGfYWB+0RXUoA/0xcQ3FbUNlGKW7Blp6eMFfViv6Mv2d3kNSXACB6nmcMzjgEZSwWjhIK
KwYBBAGXVQEFAQEHQBpY5L2M1XHo0uxf8SX1wNLBp/OVvidoWHQF2Jz+kJsUAwEIB8J4BBgWCAAgBQ
JlLBaOAhsMFiEEFTfUNvVnY3b9F7yHnmme1PfUhX8ACgkQnmme1PfUhX/INgEA37AJaNvruYsJVanP
IXnYw4CKd55UAwl8Zcy+M2diAbkA/0fHHcGV4r78hpbbL1Os52DPOdqYQRauIeJUeG+G6bQO
MIME-Version: 1.0
Content-Type: multipart/encrypted; protocol="application/pgp-encrypted";
boundary="YFrteb74qSXmggbOxZL9dRnhymywAi"
--YFrteb74qSXmggbOxZL9dRnhymywAi
Content-Description: PGP/MIME version identification
Content-Type: application/pgp-encrypted
Version: 1
--YFrteb74qSXmggbOxZL9dRnhymywAi
Content-Description: OpenPGP encrypted message
Content-Disposition: inline; filename="encrypted.asc";
Content-Type: application/octet-stream; name="encrypted.asc"
-----BEGIN PGP MESSAGE-----
yxJiAAAAAABIZWxsbyB3b3JsZCE=
=1I/B
-----END PGP MESSAGE-----
--YFrteb74qSXmggbOxZL9dRnhymywAi--

View File

@@ -1,46 +0,0 @@
Date: Fri, 8 Jul 1994 09:21:47 -0400
From: Mail Delivery Subsystem <MAILER-DAEMON@example.org>
Subject: Returned mail: User unknown
To: <owner-ups-mib@CS.UTK.EDU>
Auto-Submitted: auto-replied
MIME-Version: 1.0
Content-Type: multipart/report; report-type=delivery-status;
boundary="JAA13167.773673707/CS.UTK.EDU"
--JAA13167.773673707/CS.UTK.EDU
content-type: text/plain; charset=us-ascii
----- The following addresses had delivery problems -----
<arathib@vnet.ibm.com> (unrecoverable error)
<wsnell@sdcc13.ucsd.edu> (unrecoverable error)
--JAA13167.773673707/CS.UTK.EDU
content-type: message/delivery-status
Reporting-MTA: dns; cs.utk.edu
Original-Recipient: rfc822;arathib@vnet.ibm.com
Final-Recipient: rfc822;arathib@vnet.ibm.com
Action: failed
Status: 5.0.0 (permanent failure)
Diagnostic-Code: smtp;
550 'arathib@vnet.IBM.COM' is not a registered gateway user
Remote-MTA: dns; vnet.ibm.com
Original-Recipient: rfc822;johnh@hpnjld.njd.hp.com
Final-Recipient: rfc822;johnh@hpnjld.njd.hp.com
Action: delayed
Status: 4.0.0 (hpnjld.njd.jp.com: host name lookup failure)
Original-Recipient: rfc822;wsnell@sdcc13.ucsd.edu
Final-Recipient: rfc822;wsnell@sdcc13.ucsd.edu
Action: failed
Status: 5.0.0
Diagnostic-Code: smtp; 550 user unknown
Remote-MTA: dns; sdcc13.ucsd.edu
--JAA13167.773673707/CS.UTK.EDU
content-type: message/rfc822
[original message goes here]
--JAA13167.773673707/CS.UTK.EDU--

View File

@@ -1,33 +0,0 @@
Subject: Message opened
From: <{from_addr}>
To: <{to_addr}>
Date: Sun, 15 Oct 2023 16:43:25 +0000
Message-ID: <Mr.78MWtlV7RAi.goCFzBhCYfy@c2.testrun.org>
Auto-Submitted: auto-replied
Chat-Version: 1.0
MIME-Version: 1.0
Content-Type: multipart/report; report-type=disposition-notification;
boundary="Gl92xgZjOShJ5PGHntqYkoo2OK2Dvi"
--Gl92xgZjOShJ5PGHntqYkoo2OK2Dvi
Content-Type: text/plain; charset=utf-8; format=flowed; delsp=no
The "Hi!" message you sent was displayed on the screen of the recipient.
This is no guarantee the content was read.
--Gl92xgZjOShJ5PGHntqYkoo2OK2Dvi
Content-Type: message/disposition-notification
Reporting-UA: Delta Chat 1.124.1
Original-Recipient: rfc822;barbaz@c2.testrun.org
Final-Recipient: rfc822;barbaz@c2.testrun.org
Original-Message-ID: <Mr.MvmCz-GQbi_.6FGRkhDf05c@c2.testrun.org>
Disposition: manual-action/MDN-sent-automatically; displayed
--Gl92xgZjOShJ5PGHntqYkoo2OK2Dvi--

View File

@@ -1,23 +0,0 @@
Subject: =?utf-8?q?Message_from_foobar=40c2=2Etestrun=2Eorg?=
Chat-Disposition-Notification-To: foobar@c2.testrun.org
Chat-User-Avatar: 0
From: <{from_addr}>
To: <{to_addr}>
Date: Sun, 15 Oct 2023 16:41:44 +0000
Message-ID: <Mr.3gckbNy5bch.uK3Hd2Ws6-w@c2.testrun.org>
References: <Mr.3gckbNy5bch.uK3Hd2Ws6-w@c2.testrun.org>
Chat-Version: 1.0
Autocrypt: addr={from_addr}; prefer-encrypt=mutual;
keydata=xjMEZSrw3hYJKwYBBAHaRw8BAQdAiEKNQFU28c6qsx4vo/JHdt73RXdjMOmByf/XsGiJ7m
nNFzxmb29iYXJAYzIudGVzdHJ1bi5vcmc+wosEEBYIADMCGQEFAmUq8N4CGwMECwkIBwYVCAkKCwID
FgIBFiEEGil0OvTIa6RngmCLUYNnEa9leJAACgkQUYNnEa9leJCX3gEAhm0MehE5byBBU1avPczr/I
HjNLht7Qf6++mAhlJmtDcA/0C8VYJhsUpmiDjuZaMDWNv4FO2BJG6LH7gSm6n7ClMJzjgEZSrw3hIK
KwYBBAGXVQEFAQEHQAxGG/QW0owCfMp1A+vXEMwgzWcBpNFr58kX2eXuPpM6AwEIB8J4BBgWCAAgBQ
JlKvDeAhsMFiEEGil0OvTIa6RngmCLUYNnEa9leJAACgkQUYNnEa9leJDg1gEAwLf8KDoAAKyYgjyI
vYvO9VEgBni1C4Xx1VjcaEmlDK8BALoFuUCK+enw76TtDcAUKhlhUiM6SDRExkS4Nskp/BcK
MIME-Version: 1.0
Content-Type: text/plain; charset=utf-8; format=flowed; delsp=no
Hi!

View File

@@ -1,21 +0,0 @@
Subject: Message from {from_addr}
From: <{from_addr}>
To: <{to_addr}>
Date: Sun, 15 Oct 2023 16:43:25 +0000
Message-ID: <Mr.78MWtlV7RAi.goCFzBhCYfy@c2.testrun.org>
Chat-Version: 1.0
Secure-Join: vc-request
Secure-Join-Invitenumber: RANDOM-TOKEN
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="Gl92xgZjOShJ5PGHntqYkoo2OK2Dvi"
--Gl92xgZjOShJ5PGHntqYkoo2OK2Dvi
Content-Type: text/plain; charset=utf-8
Buy viagra!
--Gl92xgZjOShJ5PGHntqYkoo2OK2Dvi--

View File

@@ -1,21 +0,0 @@
Subject: Message from {from_addr}
From: <{from_addr}>
To: <{to_addr}>
Date: Sun, 15 Oct 2023 16:43:25 +0000
Message-ID: <Mr.78MWtlV7RAi.goCFzBhCYfy@c2.testrun.org>
Chat-Version: 1.0
Secure-Join: vc-request
Secure-Join-Invitenumber: RANDOM-TOKEN
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="Gl92xgZjOShJ5PGHntqYkoo2OK2Dvi"
--Gl92xgZjOShJ5PGHntqYkoo2OK2Dvi
Content-Type: text/plain; charset=utf-8
Secure-Join: vc-request
--Gl92xgZjOShJ5PGHntqYkoo2OK2Dvi--

View File

@@ -1,97 +0,0 @@
import importlib.resources
import itertools
import os
import random
from email import policy
from email.parser import BytesParser
from pathlib import Path
import pytest
from chatmaild.config import read_config, write_initial_config
@pytest.fixture
def make_config(tmp_path):
inipath = tmp_path.joinpath("chatmail.ini")
def make_conf(mail_domain, settings=None):
basedir = tmp_path.joinpath(f"vmail/{mail_domain}")
basedir.mkdir(parents=True, exist_ok=True)
overrides = settings.copy() if settings else {}
overrides["mailboxes_dir"] = str(basedir)
write_initial_config(inipath, mail_domain, overrides=overrides)
return read_config(inipath)
return make_conf
@pytest.fixture
def example_config(make_config):
return make_config("chat.example.org")
@pytest.fixture
def maildomain(example_config):
return example_config.mail_domain
@pytest.fixture
def testaddr(maildomain):
return f"user.name@{maildomain}"
@pytest.fixture
def gencreds(maildomain):
count = itertools.count()
next(count)
def gen(domain=None):
domain = domain if domain else maildomain
while 1:
num = next(count)
alphanumeric = "abcdefghijklmnopqrstuvwxyz1234567890"
user = "".join(random.choices(alphanumeric, k=10))
user = f"ac{num}_{user}"[:9]
password = "".join(random.choices(alphanumeric, k=12))
yield f"{user}@{domain}", f"{password}"
return lambda domain=None: next(gen(domain))
@pytest.fixture
def maildata(request):
try:
datadir = importlib.resources.files(__package__).joinpath("mail-data")
except TypeError:
# in python3.9 or lower, the above doesn't work, so we get datadir this way:
datadir = Path(os.getcwd()).joinpath("chatmaild/src/chatmaild/tests/mail-data")
assert datadir.exists(), datadir
def maildata(name, from_addr, to_addr, subject="[...]"):
# Using `.read_bytes().decode()` instead of `.read_text()` to preserve newlines.
data = datadir.joinpath(name).read_bytes().decode()
text = data.format(from_addr=from_addr, to_addr=to_addr, subject=subject)
return BytesParser(policy=policy.SMTP).parsebytes(text.encode())
return maildata
@pytest.fixture
def mockout():
class MockOut:
captured_red = []
captured_green = []
captured_plain = []
def red(self, msg):
self.captured_red.append(msg)
def green(self, msg):
self.captured_green.append(msg)
def __call__(self, msg):
self.captured_plain.append(msg)
return MockOut()

View File

@@ -1,123 +0,0 @@
import pytest
from chatmaild.config import read_config
def test_read_config_basic(example_config):
assert example_config.mail_domain == "chat.example.org"
assert not example_config.privacy_supervisor and not example_config.privacy_mail
assert not example_config.privacy_pdo and not example_config.privacy_postal
inipath = example_config._inipath
inipath.write_text(inipath.read_text().replace("60", "37"))
example_config = read_config(inipath)
assert example_config.max_user_send_per_minute == 37
assert example_config.mail_domain == "chat.example.org"
def test_read_config_basic_using_defaults(tmp_path, maildomain):
inipath = tmp_path.joinpath("chatmail.ini")
inipath.write_text(f"[params]\nmail_domain = {maildomain}")
example_config = read_config(inipath)
assert example_config.max_user_send_per_minute == 60
assert example_config.filtermail_smtp_port_incoming == 10081
def test_read_config_testrun(make_config):
config = make_config("something.testrun.org")
assert config.mail_domain == "something.testrun.org"
assert len(config.privacy_postal.split("\n")) > 1
assert len(config.privacy_supervisor.split("\n")) > 1
assert len(config.privacy_pdo.split("\n")) > 1
assert config.privacy_mail == "privacy@testrun.org"
assert config.filtermail_smtp_port == 10080
assert config.postfix_reinject_port == 10025
assert config.max_user_send_per_minute == 60
assert config.max_mailbox_size == "500M"
assert config.delete_mails_after == "20"
assert config.delete_large_after == "7"
assert config.username_min_length == 9
assert config.username_max_length == 9
assert config.password_min_length == 9
assert "privacy@testrun.org" in config.passthrough_recipients
assert config.passthrough_senders == []
def test_config_userstate_paths(make_config, tmp_path):
config = make_config("something.testrun.org")
mailboxes_dir = config.mailboxes_dir
passdb_path = config.passdb_path
assert mailboxes_dir.name == "something.testrun.org"
assert str(passdb_path) == "/home/vmail/passdb.sqlite"
assert config.mail_domain == "something.testrun.org"
path = config.get_user("user1@something.testrun.org").maildir
assert not path.exists()
assert path == mailboxes_dir.joinpath("user1@something.testrun.org")
with pytest.raises(ValueError):
config.get_user("")
with pytest.raises(ValueError):
config.get_user(None)
with pytest.raises(ValueError):
config.get_user("../some@something.testrun.org").maildir
with pytest.raises(ValueError):
config.get_user("..").maildir
with pytest.raises(ValueError):
config.get_user(".")
def test_config_max_message_size(make_config, tmp_path):
config = make_config("something.testrun.org", dict(max_message_size="10000"))
assert config.max_message_size == 10000
def test_config_tls_default_acme(make_config):
config = make_config("chat.example.org")
assert config.tls_cert_mode == "acme"
assert config.tls_cert_path == "/var/lib/acme/live/chat.example.org/fullchain"
assert config.tls_key_path == "/var/lib/acme/live/chat.example.org/privkey"
def test_config_tls_self(make_config):
config = make_config("_test.example.org")
assert config.tls_cert_mode == "self"
assert config.tls_cert_path == "/etc/ssl/certs/mailserver.pem"
assert config.tls_key_path == "/etc/ssl/private/mailserver.key"
def test_config_tls_external(make_config):
config = make_config(
"chat.example.org",
{
"tls_external_cert_and_key": "/custom/fullchain.pem /custom/privkey.pem",
},
)
assert config.tls_cert_mode == "external"
assert config.tls_cert_path == "/custom/fullchain.pem"
assert config.tls_key_path == "/custom/privkey.pem"
def test_config_tls_external_overrides_underscore(make_config):
config = make_config(
"_test.example.org",
{
"tls_external_cert_and_key": "/certs/fullchain.pem /certs/privkey.pem",
},
)
assert config.tls_cert_mode == "external"
assert config.tls_cert_path == "/certs/fullchain.pem"
assert config.tls_key_path == "/certs/privkey.pem"
def test_config_tls_external_bad_format(make_config):
with pytest.raises(ValueError, match="two space-separated"):
make_config(
"chat.example.org",
{
"tls_external_cert_and_key": "/only/one/path.pem",
},
)

View File

@@ -1,64 +0,0 @@
import time
from chatmaild.doveauth import AuthDictProxy
from chatmaild.expire import main as main_expire
def test_login_timestamps(example_config):
testaddr = "someuser@chat.example.org"
user = example_config.get_user(testaddr)
# password file needs to be set because it's mtime tracks last-login time
user.set_password("1l2k3j1l2k3j123")
for i in range(10):
user.set_last_login_timestamp(86400 * 4 + i)
assert user.get_last_login_timestamp() == 86400 * 4
def test_delete_inactive_users(example_config):
new = time.time()
old = new - (example_config.delete_inactive_users_after * 86400) - 1
dictproxy = AuthDictProxy(example_config)
def create_user(addr, last_login):
dictproxy.lookup_passdb(addr, "q9mr3faue")
user = example_config.get_user(addr)
user.maildir.joinpath("cur").mkdir()
user.maildir.joinpath("cur", "something").mkdir()
user.set_last_login_timestamp(timestamp=last_login)
# create some stale and some new accounts
to_remove = []
for i in range(150):
addr = f"oldold{i:03}@chat.example.org"
create_user(addr, last_login=old)
to_remove.append(addr)
remain = []
for i in range(5):
addr = f"newnew{i:03}@chat.example.org"
create_user(addr, last_login=new)
remain.append(addr)
# check pre and post-conditions for delete_inactive_users()
for addr in to_remove:
assert example_config.get_user(addr).maildir.exists()
main_expire(
args=[
"--remove",
str(example_config._inipath),
]
)
for p in example_config.mailboxes_dir.iterdir():
assert not p.name.startswith("old")
for addr in to_remove:
assert not example_config.get_user(addr).maildir.exists()
for addr in remain:
userdir = example_config.get_user(addr).maildir
assert userdir.exists()
assert userdir.joinpath("password").read_text()

View File

@@ -1,204 +0,0 @@
import io
import json
import queue
import threading
import traceback
import pytest
import chatmaild.doveauth
from chatmaild.doveauth import (
AuthDictProxy,
is_allowed_to_create,
)
from chatmaild.newemail import create_newemail_dict
@pytest.fixture
def dictproxy(example_config):
return AuthDictProxy(config=example_config)
def test_basic(dictproxy, gencreds):
addr, password = gencreds()
dictproxy.lookup_passdb(addr, password)
data = dictproxy.lookup_userdb(addr)
assert data
data2 = dictproxy.lookup_passdb(addr, password)
assert data == data2
def test_iterate_addresses(dictproxy):
addresses = []
for i in range(10):
addresses.append(f"asdf1234{i}@chat.example.org")
dictproxy.lookup_passdb(addresses[-1], "q9mr3faue")
res = dictproxy.iter_userdb()
assert set(res) == set(addresses)
def test_invalid_username_length(example_config):
config = example_config
config.username_min_length = 6
config.username_max_length = 10
password = create_newemail_dict(config)["password"]
assert not is_allowed_to_create(config, f"a1234@{config.mail_domain}", password)
assert is_allowed_to_create(config, f"012345@{config.mail_domain}", password)
assert is_allowed_to_create(config, f"0123456@{config.mail_domain}", password)
assert is_allowed_to_create(config, f"0123456789@{config.mail_domain}", password)
assert not is_allowed_to_create(
config, f"0123456789x@{config.mail_domain}", password
)
def test_dont_overwrite_password_on_wrong_login(dictproxy):
"""Test that logging in with a different password doesn't create a new user"""
res = dictproxy.lookup_passdb(
"newuser12@chat.example.org", "kajdlkajsldk12l3kj1983"
)
assert res["password"]
res2 = dictproxy.lookup_passdb("newuser12@chat.example.org", "kajdslqwe")
# this function always returns a password hash, which is actually compared by dovecot.
assert res["password"] == res2["password"]
def test_nocreate_file(monkeypatch, tmpdir, dictproxy):
p = tmpdir.join("nocreate")
p.write("")
monkeypatch.setattr(chatmaild.doveauth, "NOCREATE_FILE", str(p))
dictproxy.lookup_passdb("newuser12@chat.example.org", "zequ0Aimuchoodaechik")
assert not dictproxy.lookup_userdb("newuser12@chat.example.org")
def test_handle_dovecot_request(dictproxy):
transactions = {}
# Test that password can contain ", ', \ and /
msg = (
'Lshared/passdb/laksjdlaksjdlak\\\\sjdlk\\"12j\\\'3l1/k2j3123"'
"some42123@chat.example.org\tsome42123@chat.example.org"
)
res = dictproxy.handle_dovecot_request(msg, transactions)
assert res
assert res[0] == "O" and res.endswith("\n")
userdata = json.loads(res[1:].strip())
assert userdata["home"].endswith("chat.example.org/some42123@chat.example.org")
assert userdata["uid"] == userdata["gid"] == "vmail"
assert userdata["password"].startswith("{SHA512-CRYPT}")
def test_handle_dovecot_protocol_hello_is_skipped(example_config, caplog):
dictproxy = AuthDictProxy(config=example_config)
rfile = io.BytesIO(b"H3\t2\t0\t\tauth\n")
wfile = io.BytesIO()
dictproxy.loop_forever(rfile, wfile)
assert wfile.getvalue() == b""
assert not caplog.messages
def test_handle_dovecot_protocol_user_not_exists(example_config):
dictproxy = AuthDictProxy(config=example_config)
rfile = io.BytesIO(
b"H3\t2\t0\t\tauth\nLshared/userdb/foobar@chat.example.org\tfoobar@chat.example.org\n"
)
wfile = io.BytesIO()
dictproxy.loop_forever(rfile, wfile)
assert wfile.getvalue() == b"N\n"
def test_handle_dovecot_protocol_iterate(gencreds, example_config):
dictproxy = AuthDictProxy(config=example_config)
dictproxy.lookup_passdb("asdf00000@chat.example.org", "q9mr3faue")
dictproxy.lookup_passdb("asdf11111@chat.example.org", "q9mr3faue")
rfile = io.BytesIO(b"H3\t2\t0\t\tauth\nI0\t0\tshared/userdb/")
wfile = io.BytesIO()
dictproxy.loop_forever(rfile, wfile)
lines = wfile.getvalue().decode("ascii").split("\n")
assert "Oshared/userdb/asdf00000@chat.example.org\t" in lines
assert "Oshared/userdb/asdf11111@chat.example.org\t" in lines
assert not lines[2]
def test_invalid_localpart_characters(make_config):
"""Test that is_allowed_to_create rejects localparts with invalid characters."""
config = make_config("chat.example.org", {"username_min_length": "3"})
password = "zequ0Aimuchoodaechik"
domain = config.mail_domain
# valid localparts
assert is_allowed_to_create(config, f"abc123@{domain}", password)
assert is_allowed_to_create(config, f"a.b-c_d@{domain}", password)
# uppercase rejected
assert not is_allowed_to_create(config, f"Abc123@{domain}", password)
assert not is_allowed_to_create(config, f"ABCDEFG@{domain}", password)
# spaces and special chars rejected
assert not is_allowed_to_create(config, f"a b cde@{domain}", password)
assert not is_allowed_to_create(config, f"abc+def@{domain}", password)
assert not is_allowed_to_create(config, f"abc!def@{domain}", password)
assert not is_allowed_to_create(config, f"ab@cdef@{domain}", password)
assert not is_allowed_to_create(config, f"abc/def@{domain}", password)
assert not is_allowed_to_create(config, f"abc\\def@{domain}", password)
def test_concurrent_creation_same_account(dictproxy):
"""Test that concurrent creation of the same account doesn't corrupt password."""
addr = "racetest1@chat.example.org"
password = "zequ0Aimuchoodaechik"
num_threads = 10
results = queue.Queue()
def create():
try:
res = dictproxy.lookup_passdb(addr, password)
results.put(("ok", res))
except Exception:
results.put(("err", traceback.format_exc()))
threads = [threading.Thread(target=create, daemon=True) for _ in range(num_threads)]
for t in threads:
t.start()
for t in threads:
t.join(timeout=10)
passwords_seen = set()
for _ in range(num_threads):
status, res = results.get()
if status == "err":
pytest.fail(f"concurrent creation failed\n{res}")
passwords_seen.add(res["password"])
# all threads must see the same password hash
assert len(passwords_seen) == 1
def test_50_concurrent_lookups_different_accounts(gencreds, dictproxy):
num_threads = 50
req_per_thread = 5
results = queue.Queue()
def lookup():
for i in range(req_per_thread):
addr, password = gencreds()
try:
dictproxy.lookup_passdb(addr, password)
except Exception:
results.put(traceback.format_exc())
else:
results.put(None)
threads = []
for i in range(num_threads):
thread = threading.Thread(target=lookup, daemon=True)
threads.append(thread)
print(f"created {num_threads} threads, starting them and waiting for results")
for thread in threads:
thread.start()
for i in range(num_threads * req_per_thread):
res = results.get()
if res is not None:
pytest.fail(f"concurrent lookup failed\n{res}")

View File

@@ -1,198 +0,0 @@
import os
import random
from datetime import datetime
from fnmatch import fnmatch
from pathlib import Path
import pytest
from chatmaild.expire import (
FileEntry,
MailboxStat,
get_file_entry,
iter_mailboxes,
os_listdir_if_exists,
)
from chatmaild.expire import main as expiry_main
from chatmaild.fsreport import main as report_main
def fill_mbox(folderdir):
password = folderdir.joinpath("password")
password.write_text("xxx")
folderdir.joinpath("maildirsize").write_text("xxx")
garbagedir = folderdir.joinpath("garbagedir")
garbagedir.mkdir()
garbagedir.joinpath("bimbum").write_text("hello")
create_new_messages(folderdir, ["cur/msg1"], size=500)
create_new_messages(folderdir, ["new/msg2"], size=600)
def create_new_messages(basedir, relpaths, size=1000, days=0):
now = datetime.utcnow().timestamp()
for relpath in relpaths:
msg_path = Path(basedir).joinpath(relpath)
msg_path.parent.mkdir(parents=True, exist_ok=True)
msg_path.write_text("x" * size)
# accessed now, modified N days ago
os.utime(msg_path, (now, now - days * 86400))
@pytest.fixture
def mbox1(example_config):
mboxdir = example_config.mailboxes_dir.joinpath("mailbox1@example.org")
mboxdir.mkdir()
fill_mbox(mboxdir)
return MailboxStat(mboxdir)
def test_deltachat_folder(example_config):
"""Test old setups that might have a .DeltaChat folder where messages also need to get removed."""
mboxdir = example_config.mailboxes_dir.joinpath("mailbox1@example.org")
mboxdir.mkdir()
mbox2dir = mboxdir.joinpath(".DeltaChat")
mbox2dir.mkdir()
fill_mbox(mbox2dir)
mb = MailboxStat(mboxdir)
assert len(mb.messages) == 2
def test_filentry_ordering(tmp_path):
l = [FileEntry(f"x{i}", size=i + 10, mtime=1000 - i) for i in range(10)]
sorted = list(l)
random.shuffle(l)
l.sort(key=lambda x: x.size)
assert l == sorted
def test_no_mailbxoes(tmp_path, capsys):
assert [] == list(iter_mailboxes(str(tmp_path.joinpath("notexists")), maxnum=10))
out, err = capsys.readouterr()
assert "no mailboxes" in err
def test_stats_mailbox(mbox1):
password = Path(mbox1.basedir).joinpath("password")
assert mbox1.last_login == password.stat().st_mtime
assert len(mbox1.messages) == 2
msgs = list(sorted(mbox1.messages, key=lambda x: x.size))
assert len(msgs) == 2
assert msgs[0].size == 500 # cur
assert msgs[1].size == 600 # new
create_new_messages(mbox1.basedir, ["large-extra"], size=1000)
create_new_messages(mbox1.basedir, ["index-something"], size=3)
mbox2 = MailboxStat(mbox1.basedir)
assert len(mbox2.extrafiles) == 5
assert mbox2.extrafiles[0].size == 1000
# cope well with mailbox dirs that have no password (for whatever reason)
Path(mbox1.basedir).joinpath("password").unlink()
mbox3 = MailboxStat(mbox1.basedir)
assert mbox3.last_login is None
def test_report_no_mailboxes(example_config):
args = (str(example_config._inipath),)
report_main(args)
def test_report(mbox1, example_config):
args = (str(example_config._inipath),)
report_main(args)
args = list(args) + "--days 1".split()
report_main(args)
args = list(args) + "--min-login-age 1".split()
report_main(args)
args = list(args) + "--mdir cur".split()
report_main(args)
def test_report_mdir_filters_by_path(mbox1, example_config):
"""Test that Report with mdir='cur' only counts messages in cur/ subdirectory."""
from chatmaild.fsreport import Report
now = datetime.utcnow().timestamp()
# Set password mtime to old enough so min_login_age check passes
password = Path(mbox1.basedir).joinpath("password")
old_time = now - 86400 * 10 # 10 days ago
os.utime(password, (old_time, old_time))
# Reload mailbox with updated mtime
from chatmaild.expire import MailboxStat
mbox = MailboxStat(mbox1.basedir)
# Report without mdir — should count all messages
rep_all = Report(now=now, min_login_age=1, mdir=None)
rep_all.process_mailbox_stat(mbox)
total_all = rep_all.message_buckets[0]
# Report with mdir='cur' — should only count cur/ messages
rep_cur = Report(now=now, min_login_age=1, mdir="cur")
rep_cur.process_mailbox_stat(mbox)
total_cur = rep_cur.message_buckets[0]
# Report with mdir='new' — should only count new/ messages
rep_new = Report(now=now, min_login_age=1, mdir="new")
rep_new.process_mailbox_stat(mbox)
total_new = rep_new.message_buckets[0]
# cur has 500-byte msg, new has 600-byte msg (from fill_mbox)
assert total_cur == 500
assert total_new == 600
assert total_all == 500 + 600
def test_expiry_cli_basic(example_config, mbox1):
args = (str(example_config._inipath),)
expiry_main(args)
def test_expiry_cli_old_files(capsys, example_config, mbox1):
relpaths_old = ["cur/msg_old1", "cur/msg_old1"]
cutoff_days = int(example_config.delete_mails_after) + 1
create_new_messages(mbox1.basedir, relpaths_old, size=1000, days=cutoff_days)
relpaths_large = ["cur/msg_old_large1", "new/msg_old_large2"]
cutoff_days = int(example_config.delete_large_after) + 1
create_new_messages(
mbox1.basedir, relpaths_large, size=1000 * 300, days=cutoff_days
)
create_new_messages(mbox1.basedir, ["cur/shouldstay"], size=1000 * 300, days=1)
args = str(example_config._inipath), "--remove", "-v"
expiry_main(args)
out, err = capsys.readouterr()
allpaths = relpaths_old + relpaths_large + ["maildirsize"]
for path in allpaths:
for line in err.split("\n"):
if fnmatch(line, f"removing*{path}"):
break
else:
if path != "new/msg_old_large2":
pytest.fail(f"failed to remove {path}\n{err}")
assert "shouldstay" not in err
def test_get_file_entry(tmp_path):
assert get_file_entry(str(tmp_path.joinpath("123123"))) is None
p = tmp_path.joinpath("x")
p.write_text("hello")
entry = get_file_entry(str(p))
assert entry.size == 5
assert entry.mtime
def test_os_listdir_if_exists(tmp_path):
tmp_path.joinpath("x").write_text("hello")
assert len(os_listdir_if_exists(str(tmp_path))) == 1
assert len(os_listdir_if_exists(str(tmp_path.joinpath("123123")))) == 0

View File

@@ -1,39 +0,0 @@
import threading
from chatmaild.filedict import FileDict, write_bytes_atomic
def test_basic(tmp_path):
fdict = FileDict(tmp_path.joinpath("metadata"))
assert fdict.read() == {}
with fdict.modify() as d:
d["devicetoken"] = [1, 2, 3]
d["456"] = 4.2
new = fdict.read()
assert new["devicetoken"] == [1, 2, 3]
assert new["456"] == 4.2
def test_bad_marshal_file(tmp_path, caplog):
fdict1 = FileDict(tmp_path.joinpath("metadata"))
fdict1.path.write_bytes(b"l12k3l12k3l")
assert fdict1.read() == {}
assert "corrupt" in caplog.records[0].msg
def test_write_bytes_atomic_concurrent(tmp_path):
p = tmp_path.joinpath("somefile.ext")
write_bytes_atomic(p, b"hello")
threads = []
for i in range(30):
content = f"hello{i}".encode("ascii")
t = threading.Thread(target=lambda: write_bytes_atomic(p, content))
t.start()
threads.append(t)
for t in threads:
t.join()
assert p.read_text().strip() != "hello"
assert len(list(p.parent.iterdir())) == 1

View File

@@ -1,90 +0,0 @@
import shutil
import smtplib
import subprocess
import sys
import pytest
pytestmark = pytest.mark.skipif(
shutil.which("filtermail") is None,
reason="filtermail binary not found",
)
@pytest.fixture
def smtpserver():
from pytest_localserver import smtp
server = smtp.Server("127.0.0.1")
server.start()
yield server
server.stop()
@pytest.fixture
def make_popen(request):
def popen(cmdargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kw):
p = subprocess.Popen(
cmdargs,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
def fin():
p.terminate()
out, err = p.communicate()
print(out.decode("ascii"))
print(err.decode("ascii"), file=sys.stderr)
request.addfinalizer(fin)
return p
return popen
@pytest.mark.parametrize("filtermail_mode", ["outgoing", "incoming"])
def test_one_mail(
make_config, make_popen, smtpserver, maildata, filtermail_mode, monkeypatch
):
monkeypatch.setenv("PYTHONUNBUFFERED", "1")
# DKIM is tested by cmdeploy tests.
monkeypatch.setenv("FILTERMAIL_SKIP_DKIM", "1")
smtp_inject_port = 20025
if filtermail_mode == "outgoing":
settings = dict(
postfix_reinject_port=smtpserver.port,
filtermail_smtp_port=smtp_inject_port,
)
else:
settings = dict(
postfix_reinject_port_incoming=smtpserver.port,
filtermail_smtp_port_incoming=smtp_inject_port,
)
config = make_config("example.org", settings=settings)
path = str(config._inipath)
popen = make_popen(["filtermail", path, filtermail_mode])
line = popen.stderr.readline().strip()
# skip a warning that FILTERMAIL_SKIP_DKIM shouldn't be used in prod
if b"DKIM verification DISABLED!" in line:
line = popen.stderr.readline().strip()
if b"loop" not in line:
print(line.decode("ascii"), file=sys.stderr)
pytest.fail("starting filtermail failed")
addr = f"user1@{config.mail_domain}"
config.get_user(addr).set_password("l1k2j3l1k2j3l")
# send encrypted mail
data = str(maildata("encrypted.eml", from_addr=addr, to_addr=addr))
client = smtplib.SMTP("localhost", smtp_inject_port)
client.sendmail(addr, [addr], data)
assert len(smtpserver.outbox) == 1
# send un-encrypted mail that errors
data = str(maildata("fake-encrypted.eml", from_addr=addr, to_addr=addr))
with pytest.raises(smtplib.SMTPDataError) as e:
client.sendmail(addr, [addr], data)
assert e.value.smtp_code == 523

View File

@@ -1,38 +0,0 @@
import time
from chatmaild.doveauth import AuthDictProxy
from chatmaild.lastlogin import (
LastLoginDictProxy,
)
def test_handle_dovecot_request_last_login(testaddr, example_config):
dictproxy = LastLoginDictProxy(config=example_config)
authproxy = AuthDictProxy(config=example_config)
authproxy.lookup_passdb(testaddr, "1l2k3j1l2k3jl123")
dictproxy_transactions = {}
# Begin transaction
tx = "1111"
msg = f"B{tx}\t{testaddr}"
res = dictproxy.handle_dovecot_request(msg, dictproxy_transactions)
assert not res
assert dictproxy_transactions == {tx: dict(addr=testaddr, res="O\n")}
# set last-login info for user
user = dictproxy.config.get_user(testaddr)
timestamp = int(time.time())
msg = f"S{tx}\tshared/last-login/{testaddr}\t{timestamp}"
res = dictproxy.handle_dovecot_request(msg, dictproxy_transactions)
assert not res
assert len(dictproxy_transactions) == 1
read_timestamp = user.get_last_login_timestamp()
assert read_timestamp == timestamp // 86400 * 86400
# finish transaction
msg = f"C{tx}"
res = dictproxy.handle_dovecot_request(msg, dictproxy_transactions)
assert res == "O\n"
assert len(dictproxy_transactions) == 0

View File

@@ -1,374 +0,0 @@
import io
import time
import pytest
import requests
from chatmaild.metadata import (
Metadata,
MetadataDictProxy,
)
from chatmaild.notifier import (
Notifier,
NotifyThread,
PersistentQueueItem,
)
@pytest.fixture
def notifier(metadata):
queue_dir = metadata.vmail_dir.joinpath("pending_notifications")
queue_dir.mkdir()
return Notifier(queue_dir)
@pytest.fixture
def metadata(tmp_path):
vmail_dir = tmp_path.joinpath("vmaildir")
vmail_dir.mkdir()
return Metadata(vmail_dir)
@pytest.fixture
def dictproxy(notifier, metadata):
return MetadataDictProxy(notifier=notifier, metadata=metadata)
@pytest.fixture
def testaddr2():
return "user2@example.org"
@pytest.fixture
def token():
return "01234"
def get_mocked_requests(statuslist):
class ReqMock:
requests = []
def post(self, url, data, timeout):
self.requests.append((url, data, timeout))
res = statuslist.pop(0)
if isinstance(res, Exception):
raise res
class Result:
status_code = res
return Result()
return ReqMock()
def test_metadata_persistence(tmp_path, testaddr, testaddr2):
metadata1 = Metadata(tmp_path)
metadata2 = Metadata(tmp_path)
assert not metadata1.get_tokens_for_addr(testaddr)
assert not metadata2.get_tokens_for_addr(testaddr)
metadata1.add_token_to_addr(testaddr, "01234")
metadata1.add_token_to_addr(testaddr2, "456")
assert metadata2.get_tokens_for_addr(testaddr) == ["01234"]
assert metadata2.get_tokens_for_addr(testaddr2) == ["456"]
metadata2.remove_token_from_addr(testaddr, "01234")
assert not metadata1.get_tokens_for_addr(testaddr)
assert metadata1.get_tokens_for_addr(testaddr2) == ["456"]
def test_remove_nonexisting(metadata, tmp_path, testaddr):
metadata.add_token_to_addr(testaddr, "123")
metadata.remove_token_from_addr(testaddr, "1l23k1l2k3")
assert metadata.get_tokens_for_addr(testaddr) == ["123"]
def test_notifier_remove_without_set(metadata, testaddr):
metadata.remove_token_from_addr(testaddr, "123")
assert not metadata.get_tokens_for_addr(testaddr)
def test_handle_dovecot_request_lookup_fails(dictproxy, testaddr):
transactions = {}
res = dictproxy.handle_dovecot_request(
f"Lpriv/123/chatmail\t{testaddr}", transactions
)
assert res == "N\n"
def test_handle_dovecot_request_happy_path(dictproxy, testaddr, token):
metadata = dictproxy.metadata
transactions = {}
notifier = dictproxy.notifier
# set device token in a transaction
tx = "1111"
msg = f"B{tx}\t{testaddr}"
res = dictproxy.handle_dovecot_request(msg, transactions)
assert not res and not metadata.get_tokens_for_addr(testaddr)
assert transactions == {tx: dict(addr=testaddr, res="O\n")}
msg = f"S{tx}\tpriv/guid00/devicetoken\t{token}"
res = dictproxy.handle_dovecot_request(msg, transactions)
assert not res
assert len(transactions) == 1
assert metadata.get_tokens_for_addr(testaddr) == [token]
msg = f"C{tx}"
res = dictproxy.handle_dovecot_request(msg, transactions)
assert res == "O\n"
assert len(transactions) == 0
assert metadata.get_tokens_for_addr(testaddr) == [token]
# trigger notification for incoming message
tx2 = "2222"
assert dictproxy.handle_dovecot_request(f"B{tx2}\t{testaddr}", transactions) is None
msg = f"S{tx2}\tpriv/guid00/messagenew"
assert dictproxy.handle_dovecot_request(msg, transactions) is None
queue_item = notifier.retry_queues[0].get()[1]
assert queue_item.token == token
assert dictproxy.handle_dovecot_request(f"C{tx2}", transactions) == "O\n"
assert not transactions
assert queue_item.path.exists()
def test_handle_dovecot_protocol_set_devicetoken(dictproxy):
rfile = io.BytesIO(
b"\n".join(
[
b"HELLO",
b"Btx00\tuser@example.org",
b"Stx00\tpriv/guid00/devicetoken\t01234",
b"Ctx00",
]
)
)
wfile = io.BytesIO()
dictproxy.loop_forever(rfile, wfile)
assert wfile.getvalue() == b"O\n"
assert dictproxy.metadata.get_tokens_for_addr("user@example.org") == ["01234"]
def test_handle_dovecot_protocol_set_get_devicetoken(dictproxy):
rfile = io.BytesIO(
b"\n".join(
[
b"HELLO",
b"Btx00\tuser@example.org",
b"Stx00\tpriv/guid00/devicetoken\t01234",
b"Ctx00",
]
)
)
wfile = io.BytesIO()
dictproxy.loop_forever(rfile, wfile)
assert dictproxy.metadata.get_tokens_for_addr("user@example.org") == ["01234"]
assert wfile.getvalue() == b"O\n"
rfile = io.BytesIO(
b"\n".join([b"HELLO", b"Lpriv/0123/devicetoken\tuser@example.org"])
)
wfile = io.BytesIO()
dictproxy.loop_forever(rfile, wfile)
assert wfile.getvalue() == b"O01234\n"
def test_handle_dovecot_protocol_iterate(dictproxy):
rfile = io.BytesIO(
b"\n".join(
[
b"H",
b"I9\t0\tpriv/5cbe730f146fea6535be0d003dd4fc98/\tci-2dzsrs@nine.testrun.org",
]
)
)
wfile = io.BytesIO()
dictproxy.loop_forever(rfile, wfile)
assert wfile.getvalue() == b"\n"
def test_notifier_thread_deletes_persistent_file(metadata, notifier, testaddr):
reqmock = get_mocked_requests([200])
metadata.add_token_to_addr(testaddr, "01234")
notifier.new_message_for_addr(testaddr, metadata)
NotifyThread(notifier, 0, None).retry_one(reqmock)
url, data, timeout = reqmock.requests[0]
assert data == "01234"
assert metadata.get_tokens_for_addr(testaddr) == ["01234"]
notifier.requeue_persistent_queue_items()
assert notifier.retry_queues[0].qsize() == 0
@pytest.mark.parametrize("status", [requests.exceptions.RequestException(), 404, 500])
def test_notifier_thread_connection_failures(
metadata, notifier, testaddr, status, caplog
):
"""test that tokens keep getting retried until they are given up."""
metadata.add_token_to_addr(testaddr, "01234")
notifier.new_message_for_addr(testaddr, metadata)
notifier.NOTIFICATION_RETRY_DELAY = 5
max_tries = len(notifier.retry_queues)
for i in range(max_tries):
caplog.clear()
reqmock = get_mocked_requests([status])
sleep_calls = []
NotifyThread(notifier, i, None).retry_one(reqmock, sleep=sleep_calls.append)
assert notifier.retry_queues[i].qsize() == 0
assert "request failed" in caplog.records[0].msg
if i > 0:
assert len(sleep_calls) == 1
if i + 1 < max_tries:
assert notifier.retry_queues[i + 1].qsize() == 1
assert len(caplog.records) == 1
else:
assert len(caplog.records) == 2
assert "deadline" in caplog.records[1].msg
notifier.requeue_persistent_queue_items()
assert notifier.retry_queues[0].qsize() == 0
def test_requeue_removes_tmp_files(notifier, metadata, testaddr, caplog):
metadata.add_token_to_addr(testaddr, "01234")
notifier.new_message_for_addr(testaddr, metadata)
p = notifier.queue_dir.joinpath("1203981203.tmp")
p.touch()
notifier2 = notifier.__class__(notifier.queue_dir)
notifier2.requeue_persistent_queue_items()
assert "spurious" in caplog.records[0].msg
assert not p.exists()
assert notifier2.retry_queues[0].qsize() == 1
when, queue_item = notifier2.retry_queues[0].get()
assert when <= int(time.time())
assert queue_item.addr == testaddr
def test_requeue_removes_invalid_files(notifier, metadata, testaddr, caplog):
metadata.add_token_to_addr(testaddr, "01234")
notifier.new_message_for_addr(testaddr, metadata)
# empty/invalid files should be ignored
p = notifier.queue_dir.joinpath("1203981203")
p.touch()
notifier2 = notifier.__class__(notifier.queue_dir)
notifier2.requeue_persistent_queue_items()
assert "spurious" in caplog.records[0].msg
assert not p.exists()
assert notifier2.retry_queues[0].qsize() == 1
when, queue_item = notifier2.retry_queues[0].get()
assert when <= int(time.time())
assert queue_item.addr == testaddr
def test_start_and_stop_notification_threads(notifier, testaddr):
threads = notifier.start_notification_threads(None)
for retry_num, threadlist in threads.items():
for t in threadlist:
t.stop()
t.join()
def test_multi_device_notifier(metadata, notifier, testaddr):
metadata.add_token_to_addr(testaddr, "01234")
metadata.add_token_to_addr(testaddr, "56789")
notifier.new_message_for_addr(testaddr, metadata)
reqmock = get_mocked_requests([200, 200])
NotifyThread(notifier, 0, None).retry_one(reqmock)
NotifyThread(notifier, 0, None).retry_one(reqmock)
assert notifier.retry_queues[0].qsize() == 0
assert notifier.retry_queues[1].qsize() == 0
url, data, timeout = reqmock.requests[0]
assert data == "01234"
url, data, timeout = reqmock.requests[1]
assert data == "56789"
assert metadata.get_tokens_for_addr(testaddr) == ["01234", "56789"]
def test_notifier_thread_run_gone_removes_token(metadata, notifier, testaddr):
metadata.add_token_to_addr(testaddr, "01234")
metadata.add_token_to_addr(testaddr, "45678")
notifier.new_message_for_addr(testaddr, metadata)
reqmock = get_mocked_requests([410, 200])
NotifyThread(notifier, 0, metadata.remove_token_from_addr).retry_one(reqmock)
NotifyThread(notifier, 0, None).retry_one(reqmock)
url, data, timeout = reqmock.requests[0]
assert data == "01234"
url, data, timeout = reqmock.requests[1]
assert data == "45678"
assert metadata.get_tokens_for_addr(testaddr) == ["45678"]
assert notifier.retry_queues[0].qsize() == 0
assert notifier.retry_queues[1].qsize() == 0
def test_persistent_queue_items(tmp_path, testaddr, token):
queue_item = PersistentQueueItem.create(tmp_path, testaddr, 432, token)
assert queue_item.addr == testaddr
assert queue_item.start_ts == 432
assert queue_item.token == token
item2 = PersistentQueueItem.read_from_path(queue_item.path)
assert item2.addr == testaddr
assert item2.start_ts == 432
assert item2.token == token
assert item2 == queue_item
item2.delete()
assert not item2.path.exists()
assert not queue_item < item2 and not item2 < queue_item
def test_turn_credentials_exception_returns_N(notifier, metadata, monkeypatch):
"""Test that turn_credentials() failure returns N\\n instead of crashing."""
import chatmaild.metadata
dictproxy = MetadataDictProxy(
notifier=notifier,
metadata=metadata,
turn_hostname="turn.example.org",
)
def mock_turn_credentials():
raise ConnectionRefusedError("socket not available")
monkeypatch.setattr(chatmaild.metadata, "turn_credentials", mock_turn_credentials)
transactions = {}
res = dictproxy.handle_dovecot_request(
"Lshared/0123/vendor/vendor.dovecot/pvt/server/vendor/deltachat/turn"
"\tuser@example.org",
transactions,
)
assert res == "N\n"
def test_turn_credentials_success(notifier, metadata, monkeypatch):
"""Test that valid turn_credentials() returns TURN URI."""
import chatmaild.metadata
dictproxy = MetadataDictProxy(
notifier=notifier,
metadata=metadata,
turn_hostname="turn.example.org",
)
monkeypatch.setattr(chatmaild.metadata, "turn_credentials", lambda: "user:pass")
transactions = {}
res = dictproxy.handle_dovecot_request(
"Lshared/0123/vendor/vendor.dovecot/pvt/server/vendor/deltachat/turn"
"\tuser@example.org",
transactions,
)
assert res == "Oturn.example.org:3478:user:pass\n"
def test_iroh_relay(dictproxy):
rfile = io.BytesIO(
b"\n".join(
[
b"H",
b"Lshared/0123/vendor/vendor.dovecot/pvt/server/vendor/deltachat/irohrelay\tuser@example.org",
]
)
)
wfile = io.BytesIO()
dictproxy.iroh_relay = "https://example.org/"
dictproxy.loop_forever(rfile, wfile)
assert wfile.getvalue() == b"Ohttps://example.org/\n"

View File

@@ -1,67 +0,0 @@
import sqlite3
from chatmaild.migrate_db import migrate_from_db_to_maildir
def test_migration_not_exists(tmp_path, example_config):
example_config.passdb_path = tmp_path.joinpath("sqlite")
def test_migration(tmp_path, example_config, caplog):
passdb_path = tmp_path.joinpath("passdb.sqlite")
uri = f"file:{passdb_path}?mode=rwc"
sqlconn = sqlite3.connect(uri, timeout=60, uri=True)
sqlconn.execute(
"""
CREATE TABLE users (
addr TEXT PRIMARY KEY,
password TEXT,
last_login INTEGER
)
"""
)
all = {}
for i in range(500):
values = (f"somsom{i:03}@example.org", f"passwo{i:03}", i * 86400)
sqlconn.execute(
"""
INSERT INTO users (addr, password, last_login)
VALUES (?, ?, ?)""",
values,
)
all[values[0]] = values[1:]
for i in range(500):
values = (f"pompom{i:03}@example.org", f"wopass{i:03}", "")
sqlconn.execute(
"""
INSERT INTO users (addr, password, last_login)
VALUES (?, ?, ?)""",
values,
)
all[values[0]] = values[1:]
sqlconn.commit()
sqlconn.close()
assert passdb_path.stat().st_size > 10000
example_config.passdb_path = passdb_path
assert not caplog.records
migrate_from_db_to_maildir(example_config, chunking=500)
assert len(caplog.records) > 3
for path in example_config.mailboxes_dir.iterdir():
if "@" not in path.name:
continue
password, last_login = all.pop(path.name)
user = example_config.get_user(path.name)
if last_login:
assert user.get_last_login_timestamp() == last_login
assert password == user.get_userdb_dict()["password"]
assert not all
assert not example_config.passdb_path.exists()

View File

@@ -1,60 +0,0 @@
import json
import chatmaild
from chatmaild.newemail import (
create_dclogin_url,
create_newemail_dict,
print_new_account,
)
def test_create_newemail_dict(example_config):
ac1 = create_newemail_dict(example_config)
assert "@" in ac1["email"]
assert len(ac1["password"]) >= 10
ac2 = create_newemail_dict(example_config)
assert ac1["email"] != ac2["email"]
assert ac1["password"] != ac2["password"]
def test_create_dclogin_url():
url = create_dclogin_url("user@example.org", "p@ss w+rd")
assert url.startswith("dclogin:")
assert "v=1" in url
assert "ic=3" in url
assert "user@example.org" in url
# password special chars must be encoded
assert "p%40ss" in url
assert "w%2Brd" in url
def test_print_new_account(capsys, monkeypatch, maildomain, tmpdir, example_config):
monkeypatch.setattr(chatmaild.newemail, "CONFIG_PATH", str(example_config._inipath))
print_new_account()
out, err = capsys.readouterr()
lines = out.split("\n")
assert lines[0] == "Content-Type: application/json"
assert not lines[1]
dic = json.loads(lines[2])
assert dic["email"].endswith(f"@{example_config.mail_domain}")
assert len(dic["password"]) >= 10
# default tls_cert=acme should not include dclogin_url
assert "dclogin_url" not in dic
def test_print_new_account_self_signed(capsys, monkeypatch, make_config):
config = make_config("_test.example.org")
monkeypatch.setattr(chatmaild.newemail, "CONFIG_PATH", str(config._inipath))
print_new_account()
out, err = capsys.readouterr()
lines = out.split("\n")
dic = json.loads(lines[2])
assert "dclogin_url" in dic
url = dic["dclogin_url"]
assert url.startswith("dclogin:")
assert "ic=3" in url
assert dic["email"].split("@")[0] in url

View File

@@ -1,73 +0,0 @@
import socket
import threading
import time
from unittest.mock import patch
import pytest
from chatmaild.turnserver import turn_credentials
SOCKET_PATH = "/run/chatmail-turn/turn.socket"
@pytest.fixture
def turn_socket(tmp_path):
"""Create a real Unix socket server at a temp path."""
sock_path = str(tmp_path / "turn.socket")
server = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server.bind(sock_path)
server.listen(1)
yield sock_path, server
server.close()
def _call_turn_credentials(sock_path):
"""Call turn_credentials but connect to sock_path instead of hardcoded path."""
original_connect = socket.socket.connect
def patched_connect(self, address):
if address == SOCKET_PATH:
address = sock_path
return original_connect(self, address)
with patch.object(socket.socket, "connect", patched_connect):
return turn_credentials()
def test_turn_credentials_timeout(turn_socket):
"""Server accepts but never responds — must raise socket.timeout."""
sock_path, server = turn_socket
def accept_and_hang():
conn, _ = server.accept()
time.sleep(30)
conn.close()
t = threading.Thread(target=accept_and_hang, daemon=True)
t.start()
with pytest.raises(socket.timeout):
_call_turn_credentials(sock_path)
def test_turn_credentials_connection_refused(tmp_path):
"""Socket file doesn't exist — must raise ConnectionRefusedError or FileNotFoundError."""
missing = str(tmp_path / "nonexistent.socket")
with pytest.raises((ConnectionRefusedError, FileNotFoundError)):
_call_turn_credentials(missing)
def test_turn_credentials_success(turn_socket):
"""Server responds with credentials — must return stripped string."""
sock_path, server = turn_socket
def respond():
conn, _ = server.accept()
conn.sendall(b"testuser:testpass\n")
conn.close()
t = threading.Thread(target=respond, daemon=True)
t.start()
result = _call_turn_credentials(sock_path)
assert result == "testuser:testpass"

View File

@@ -1,56 +0,0 @@
def test_login_timestamp(testaddr, example_config):
user = example_config.get_user(testaddr)
user.set_password("someeqkjwelkqwjleqwe")
user.set_last_login_timestamp(100000)
assert user.get_last_login_timestamp() == 86400
user.set_last_login_timestamp(200000)
assert user.get_last_login_timestamp() == 86400 * 2
def test_get_user_dict_not_set(testaddr, example_config, caplog):
user = example_config.get_user(testaddr)
assert not caplog.records
assert user.get_userdb_dict() == {}
assert len(caplog.records) == 0
user.set_password("")
assert user.get_userdb_dict() == {}
assert len(caplog.records) == 1
def test_get_user_dict(make_config, tmp_path):
config = make_config("something.testrun.org")
addr = "user1@something.org"
user = config.get_user(addr)
enc_password = "l1k2j31lk2j3l1k23j123"
user.set_password(enc_password)
data = user.get_userdb_dict()
assert addr in str(data["home"])
assert data["uid"] == "vmail"
assert data["gid"] == "vmail"
assert data["password"] == enc_password
def test_no_mailboxes_dir(testaddr, example_config, tmp_path):
p = tmp_path.joinpath("a", "mailboxes")
example_config.mailboxes_dir = p
user = example_config.get_user(testaddr)
user.set_password("someeqkjwelkqwjleqwe")
user.set_last_login_timestamp(100000)
assert user.get_last_login_timestamp() == 86400
def test_set_get_cleartext_flag(testaddr, example_config, tmp_path):
p = tmp_path.joinpath("a", "mailboxes")
example_config.mailboxes_dir = p
user = example_config.get_user(testaddr)
user.set_password("someeqkjwelkqwjleqwe")
user.set_last_login_timestamp(100000)
assert user.get_last_login_timestamp() == 86400
assert not user.is_incoming_cleartext_ok()
user.allow_incoming_cleartext()
assert user.is_incoming_cleartext_ok()

View File

@@ -1,10 +0,0 @@
#!/usr/bin/env python3
import socket
def turn_credentials() -> str:
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as client_socket:
client_socket.settimeout(5)
client_socket.connect("/run/chatmail-turn/turn.socket")
with client_socket.makefile("rb") as file:
return file.readline().decode("utf-8").strip()

View File

@@ -1,82 +0,0 @@
import logging
import os
from chatmaild.filedict import write_bytes_atomic
def get_daytimestamp(timestamp) -> int:
return int(timestamp) // 86400 * 86400
class User:
def __init__(self, maildir, addr, password_path, uid, gid):
self.maildir = maildir
self.addr = addr
self.password_path = password_path
self.enforce_E2EE_path = maildir.joinpath("enforceE2EEincoming")
self.uid = uid
self.gid = gid
@property
def can_track(self):
return "@" in self.addr
def get_userdb_dict(self):
"""Return a non-empty dovecot 'userdb' style dict
if the user has an existing non-empty password"""
try:
pw = self.password_path.read_text()
except FileNotFoundError:
return {}
if not pw:
logging.error(f"password is empty for: {self.addr}")
return {}
home = str(self.maildir)
return dict(addr=self.addr, home=home, uid=self.uid, gid=self.gid, password=pw)
def is_incoming_cleartext_ok(self):
return not self.enforce_E2EE_path.exists()
def allow_incoming_cleartext(self):
if self.enforce_E2EE_path.exists():
self.enforce_E2EE_path.unlink()
def set_password(self, enc_password):
"""Set the specified password for this user.
This method can be called concurrently
but there is no guarantee which of the password-set calls will win.
"""
self.maildir.mkdir(exist_ok=True, parents=True)
password = enc_password.encode("ascii")
try:
write_bytes_atomic(self.password_path, password)
except PermissionError:
logging.error(f"could not write password for: {self.addr}")
raise
self.enforce_E2EE_path.touch()
def set_last_login_timestamp(self, timestamp):
"""Track login time with daily granularity
to minimize touching files and to minimize metadata leakage."""
if not self.can_track:
return
try:
mtime = int(os.stat(self.password_path).st_mtime)
except FileNotFoundError:
logging.error(f"Can not get last login timestamp for {self.addr}")
return
timestamp = get_daytimestamp(timestamp)
if mtime != timestamp:
os.utime(self.password_path, (timestamp, timestamp))
def get_last_login_timestamp(self):
if self.can_track:
try:
return int(self.password_path.stat().st_mtime)
except FileNotFoundError:
pass

View File

@@ -1,94 +0,0 @@
# git-cliff ~ configuration file
# https://git-cliff.org/docs/configuration
[changelog]
# A Tera template to be rendered for each release in the changelog.
# See https://keats.github.io/tera/docs/#introduction
body = """
{% if version %}\
## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }}
{% else %}\
## [unreleased]
{% endif %}\
{% for group, commits in commits | group_by(attribute="group") %}
### {{ group | striptags | trim | upper_first }}
{% for commit in commits %}
- {% if commit.scope %}*({{ commit.scope }})* {% endif %}\
{% if commit.breaking %}[**breaking**] {% endif %}\
{{ commit.message | upper_first }}\
{% endfor %}
{% endfor %}
"""
# Remove leading and trailing whitespaces from the changelog's body.
trim = true
# Render body even when there are no releases to process.
render_always = true
# An array of regex based postprocessors to modify the changelog.
postprocessors = [
# Replace the placeholder <REPO> with a URL.
#{ pattern = '<REPO>', replace = "https://github.com/orhun/git-cliff" },
]
# render body even when there are no releases to process
# render_always = true
# output file path
# output = "test.md"
[git]
# Parse commits according to the conventional commits specification.
# See https://www.conventionalcommits.org
conventional_commits = true
# Exclude commits that do not match the conventional commits specification.
filter_unconventional = true
# Require all commits to be conventional.
# Takes precedence over filter_unconventional.
require_conventional = false
# Split commits on newlines, treating each line as an individual commit.
split_commits = false
# An array of regex based parsers to modify commit messages prior to further processing.
commit_preprocessors = [
# Replace issue numbers with link templates to be updated in `changelog.postprocessors`.
#{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](<REPO>/issues/${2}))"},
# Check spelling of the commit message using https://github.com/crate-ci/typos.
# If the spelling is incorrect, it will be fixed automatically.
#{ pattern = '.*', replace_command = 'typos --write-changes -' },
]
# Prevent commits that are breaking from being excluded by commit parsers.
protect_breaking_commits = false
# An array of regex based parsers for extracting data from the commit message.
# Assigns commits to groups.
# Optionally sets the commit's scope and can decide to exclude commits from further processing.
commit_parsers = [
{ message = "^feat", group = "Features" },
{ message = "^fix", group = "Bug Fixes" },
{ message = "^docs", group = "Documentation" },
{ message = "^perf", group = "Performance" },
{ message = "^refactor", group = "Refactor" },
{ message = "^style", group = "Styling" },
{ message = "^test", group = "Testing" },
{ message = "^chore\\(release\\): prepare for", skip = true },
{ message = "^chore\\(deps.*\\)", skip = true },
{ message = "^chore\\(pr\\)", skip = true },
{ message = "^chore\\(pull\\)", skip = true },
{ message = "^chore|^ci", group = "Miscellaneous Tasks" },
{ body = ".*security", group = "Security" },
{ message = "^revert", group = "Revert" },
{ message = ".*", group = "Other" },
]
# Exclude commits that are not matched by any commit parser.
filter_commits = false
# Fail on a commit that is not matched by any commit parser.
fail_on_unmatched_commit = false
# An array of link parsers for extracting external references, and turning them into URLs, using regex.
link_parsers = []
# Include only the tags that belong to the current branch.
use_branch_tags = false
# Order releases topologically instead of chronologically.
topo_order = false
# Order commits topologically instead of chronologically.
topo_order_commits = true
# Order of commits in each group/release within the changelog.
# Allowed values: newest, oldest
sort_commits = "oldest"
# Process submodules commits
recurse_submodules = false

View File

@@ -1,47 +0,0 @@
[build-system]
requires = ["setuptools>=68"]
build-backend = "setuptools.build_meta"
[project]
name = "cmdeploy"
version = "0.2"
dependencies = [
"pyinfra>=3",
"pillow",
"qrcode",
"markdown",
"pytest",
"setuptools>=68",
"termcolor",
"build",
"tox",
"ruff",
"pytest",
"pytest-xdist",
"execnet",
"imap_tools",
"deltachat-rpc-client",
]
[project.scripts]
cmdeploy = "cmdeploy.cmdeploy:main"
[project.entry-points.pytest11]
"chatmaild.testplugin" = "chatmaild.tests.plugin"
"cmdeploy.testplugin" = "cmdeploy.tests.plugin"
[tool.pytest.ini_options]
addopts = "-v -ra --strict-markers"
[tool.ruff]
lint.select = [
"F", # Pyflakes
"I", # isort
"PLC", # Pylint Convention
"PLE", # Pylint Error
"PLW", # Pylint Warning
]
lint.ignore = [
"PLC0415" # import-outside-top-level
]

View File

@@ -1,141 +0,0 @@
import importlib.resources
from pyinfra.operations import apt, files, server, systemd
from ..basedeploy import Deployer
class AcmetoolDeployer(Deployer):
def __init__(self, email, domains):
self.domains = domains
self.email = email
self.need_restart_redirector = False
self.need_restart_reconcile_service = False
self.need_restart_reconcile_timer = False
def install(self):
apt.packages(
name="Install acmetool",
packages=["acmetool"],
)
files.file(
name="Remove old acmetool cronjob, it is replaced with systemd timer.",
path="/etc/cron.d/acmetool",
present=False,
)
files.put(
name="Install acmetool hook.",
src=importlib.resources.files(__package__)
.joinpath("acmetool.hook")
.open("rb"),
dest="/etc/acme/hooks/nginx",
user="root",
group="root",
mode="755",
)
files.file(
name="Remove acmetool hook from the wrong location where it was previously installed.",
path="/usr/lib/acme/hooks/nginx",
present=False,
)
def configure(self):
files.template(
src=importlib.resources.files(__package__).joinpath(
"response-file.yaml.j2"
),
dest="/var/lib/acme/conf/responses",
user="root",
group="root",
mode="644",
email=self.email,
)
files.template(
src=importlib.resources.files(__package__).joinpath("target.yaml.j2"),
dest="/var/lib/acme/conf/target",
user="root",
group="root",
mode="644",
)
server.shell(
name=f"Remove old acmetool desired files for {self.domains[0]}",
commands=[f"rm -f /var/lib/acme/desired/{self.domains[0]}-*"],
)
files.template(
src=importlib.resources.files(__package__).joinpath("desired.yaml.j2"),
dest=f"/var/lib/acme/desired/{self.domains[0]}", # 0 is mailhost TLD
user="root",
group="root",
mode="644",
domains=self.domains,
)
service_file = files.put(
src=importlib.resources.files(__package__).joinpath(
"acmetool-redirector.service"
),
dest="/etc/systemd/system/acmetool-redirector.service",
user="root",
group="root",
mode="644",
)
self.need_restart_redirector = service_file.changed
reconcile_service_file = files.put(
src=importlib.resources.files(__package__).joinpath(
"acmetool-reconcile.service"
),
dest="/etc/systemd/system/acmetool-reconcile.service",
user="root",
group="root",
mode="644",
)
self.need_restart_reconcile_service = reconcile_service_file.changed
reconcile_timer_file = files.put(
src=importlib.resources.files(__package__).joinpath(
"acmetool-reconcile.timer"
),
dest="/etc/systemd/system/acmetool-reconcile.timer",
user="root",
group="root",
mode="644",
)
self.need_restart_reconcile_timer = reconcile_timer_file.changed
def activate(self):
systemd.service(
name="Setup acmetool-redirector service",
service="acmetool-redirector.service",
running=True,
enabled=True,
restarted=self.need_restart_redirector,
)
self.need_restart_redirector = False
systemd.service(
name="Setup acmetool-reconcile service",
service="acmetool-reconcile.service",
running=False,
enabled=False,
daemon_reload=self.need_restart_reconcile_service,
)
self.need_restart_reconcile_service = False
systemd.service(
name="Setup acmetool-reconcile timer",
service="acmetool-reconcile.timer",
running=True,
enabled=True,
daemon_reload=self.need_restart_reconcile_timer,
)
self.need_restart_reconcile_timer = False
server.shell(
name=f"Reconcile certificates for: {', '.join(self.domains)}",
commands=["acmetool --batch --xlog.severity=debug reconcile"],
)

View File

@@ -1,8 +0,0 @@
[Unit]
Description=Renew TLS certificates with acmetool
After=network.target
[Service]
Type=oneshot
ExecStart=/usr/bin/acmetool --batch reconcile

View File

@@ -1,8 +0,0 @@
[Unit]
Description=Renew TLS certificates with acmetool
[Timer]
OnCalendar=*-*-* 16:20:00
[Install]
WantedBy=timers.target

View File

@@ -1,6 +0,0 @@
satisfy:
names:
{%- for domain in domains %}
- {{ domain }}
{%- endfor %}

View File

@@ -1,2 +0,0 @@
"acme-enter-email": "{{ email }}"
"acme-agreement:https://letsencrypt.org/documents/LE-SA-v1.6-August-18-2025.pdf": true

View File

@@ -1,8 +0,0 @@
request:
provider: https://acme-v02.api.letsencrypt.org/directory
key:
type: ecdsa
ecdsa-curve: nistp256
challenge:
webroot-paths:
- /var/www/html/.well-known/acme-challenge

View File

@@ -1,116 +0,0 @@
import importlib.resources
import io
import os
from pyinfra.operations import files, server, systemd
def has_systemd():
"""Returns False during Docker image builds or any other non-systemd environment."""
return os.path.isdir("/run/systemd/system")
def get_resource(arg, pkg=__package__):
return importlib.resources.files(pkg).joinpath(arg)
def configure_remote_units(mail_domain, units) -> None:
remote_base_dir = "/usr/local/lib/chatmaild"
remote_venv_dir = f"{remote_base_dir}/venv"
remote_chatmail_inipath = f"{remote_base_dir}/chatmail.ini"
root_owned = dict(user="root", group="root", mode="644")
# install systemd units
for fn in units:
params = dict(
execpath=f"{remote_venv_dir}/bin/{fn}",
config_path=remote_chatmail_inipath,
remote_venv_dir=remote_venv_dir,
mail_domain=mail_domain,
)
basename = fn if "." in fn else f"{fn}.service"
source_path = get_resource(f"service/{basename}.f")
content = source_path.read_text().format(**params).encode()
files.put(
name=f"Upload {basename}",
src=io.BytesIO(content),
dest=f"/etc/systemd/system/{basename}",
**root_owned,
)
def activate_remote_units(units) -> None:
# activate systemd units
for fn in units:
basename = fn if "." in fn else f"{fn}.service"
if fn == "chatmail-expire" or fn == "chatmail-fsreport":
# don't auto-start but let the corresponding timer trigger execution
enabled = False
else:
enabled = True
systemd.service(
name=f"Setup {basename}",
service=basename,
running=enabled,
enabled=enabled,
restarted=enabled,
daemon_reload=True,
)
class Deployment:
def install(self, deployer):
# optional 'required_users' contains a list of (user, group, secondary-group-list) tuples.
# If the group is None, no group is created corresponding to that user.
# If the secondary group list is not None, all listed groups are created as well.
required_users = getattr(deployer, "required_users", [])
for user, group, groups in required_users:
if group is not None:
server.group(
name="Create {} group".format(group), group=group, system=True
)
if groups is not None:
for group2 in groups:
server.group(
name="Create {} group".format(group2), group=group2, system=True
)
server.user(
name="Create {} user".format(user),
user=user,
group=group,
groups=groups,
system=True,
)
deployer.install()
def configure(self, deployer):
deployer.configure()
def activate(self, deployer):
deployer.activate()
def perform_stages(self, deployers):
default_stages = "install,configure,activate"
stages = os.getenv("CMDEPLOY_STAGES", default_stages).split(",")
for stage in stages:
for deployer in deployers:
getattr(self, stage)(deployer)
class Deployer:
need_restart = False
def install(self):
pass
def configure(self):
pass
def activate(self):
pass

View File

@@ -1,506 +0,0 @@
"""
Provides the `cmdeploy` entry point function,
along with command line option and subcommand parsing.
"""
import argparse
import importlib.resources
import os
import pathlib
import shutil
import subprocess
import sys
import time
from contextlib import contextmanager
from pathlib import Path
import pyinfra
from chatmaild.config import read_config, write_initial_config
from packaging import version
from termcolor import colored
from . import dns, remote
from .lxc.cli import ( # noqa: F401
lxc_start_cmd,
lxc_start_cmd_options,
lxc_status_cmd,
lxc_status_cmd_options,
lxc_stop_cmd,
lxc_stop_cmd_options,
lxc_test_cmd,
lxc_test_cmd_options,
)
from .sshexec import (
LocalExec,
SSHExec,
resolve_host_from_ssh_config,
resolve_key_from_ssh_config,
)
from .util import build_chatmaild_sdist
from .www import main as webdev_main
#
# cmdeploy sub commands and options
#
def init_cmd_options(parser):
parser.add_argument(
"chatmail_domain",
action="store",
help="fully qualified DNS domain name for your chatmail instance",
)
parser.add_argument(
"--force",
dest="recreate_ini",
action="store_true",
help="force reacreate ini file",
)
def init_cmd(args, out):
"""Initialize chatmail config file."""
mail_domain = args.chatmail_domain
inipath = args.inipath
if args.inipath.exists():
if not args.recreate_ini:
print(f"[WARNING] Path exists, not modifying: {inipath}")
return 1
else:
print(
f"[WARNING] Force argument was provided, deleting config file: {inipath}"
)
inipath.unlink()
write_initial_config(inipath, mail_domain, overrides={})
out.green(f"created config file for {mail_domain} in {inipath}")
def run_cmd_options(parser):
parser.add_argument(
"--dry-run",
dest="dry_run",
action="store_true",
help="don't actually modify the server",
)
parser.add_argument(
"--disable-mail",
dest="disable_mail",
action="store_true",
help="install/upgrade the server, but disable postfix & dovecot for now",
)
parser.add_argument(
"--website-only",
action="store_true",
help="only update/deploy the website, skipping full server upgrade/deployment, useful when you only changed/updated the web pages and don't need to re-run a full server upgrade",
)
parser.add_argument(
"--skip-dns-check",
dest="dns_check_disabled",
action="store_true",
help="disable checks nslookup for dns",
)
add_ssh_host_option(parser)
add_ssh_config_option(parser)
def run_cmd(args, out):
"""Deploy chatmail services on the remote server."""
ssh_host = args.ssh_host if args.ssh_host else args.config.mail_domain
sshexec = get_sshexec(ssh_host, ssh_config=args.ssh_config)
require_iroh = args.config.enable_iroh_relay
strict_tls = args.config.tls_cert_mode == "acme"
if not args.dns_check_disabled:
remote_data = dns.get_initial_remote_data(sshexec, args.config.mail_domain)
if not dns.check_initial_remote_data(
remote_data, strict_tls=strict_tls, print=out.red
):
return 1
env = os.environ.copy()
env["CHATMAIL_INI"] = args.inipath
env["CHATMAIL_WEBSITE_ONLY"] = "True" if args.website_only else ""
env["CHATMAIL_DISABLE_MAIL"] = "True" if args.disable_mail else ""
env["CHATMAIL_REQUIRE_IROH"] = "True" if require_iroh else ""
if not args.website_only:
build_chatmaild_sdist()
if not args.dns_check_disabled:
env["CHATMAIL_ADDR_V4"] = remote_data.get("A") or ""
env["CHATMAIL_ADDR_V6"] = remote_data.get("AAAA") or ""
deploy_path = importlib.resources.files(__package__).joinpath("run.py").resolve()
pyinf = "pyinfra --dry" if args.dry_run else "pyinfra"
cmd = f"{pyinf} --ssh-user root {ssh_host} {deploy_path} -y"
ssh_config = args.ssh_config
if ssh_config:
ssh_config = str(Path(ssh_config).resolve())
# Use pyinfra's native SSH data keys to configure the connection directly
# rather than relying on paramiko config parsing (see also sshexec.py)
ip = resolve_host_from_ssh_config(ssh_host, ssh_config)
key = resolve_key_from_ssh_config(ssh_host, ssh_config)
data_args = f"--data ssh_hostname={ip} --data ssh_known_hosts_file=/dev/null"
if key:
data_args += f" --data ssh_key={key}"
cmd = f"{pyinf} --ssh-user root {ssh_host} {deploy_path} -y {data_args}"
if ssh_host in ["localhost", "@docker"]:
if ssh_host == "@docker":
env["CHATMAIL_NOPORTCHECK"] = "True"
env["CHATMAIL_NOSYSCTL"] = "True"
cmd = f"{pyinf} @local {deploy_path} -y"
if version.parse(pyinfra.__version__) < version.parse("3"):
out.red("Please re-run scripts/initenv.sh to update pyinfra to version 3.")
return 1
try:
out.check_call(cmd, env=env)
if args.website_only:
out.green("Website deployment completed.")
elif (
not args.dns_check_disabled
and strict_tls
and not remote_data["acme_account_url"]
):
out.red("Deploy completed but letsencrypt not configured")
out.red("Run 'cmdeploy run' again")
else:
out.green("Deploy completed, call `cmdeploy dns` next.")
return 0
except subprocess.CalledProcessError:
out.red("Deploy failed")
return 1
def dns_cmd_options(parser):
parser.add_argument(
"--zonefile",
dest="zonefile",
type=pathlib.Path,
default=None,
help="write DNS records in standard BIND format to the given file",
)
add_ssh_host_option(parser)
add_ssh_config_option(parser)
def dns_cmd(args, out):
"""Check DNS entries and optionally generate dns zone file."""
ssh_host = args.ssh_host if args.ssh_host else args.config.mail_domain
sshexec = get_sshexec(ssh_host, verbose=args.verbose, ssh_config=args.ssh_config)
tls_cert_mode = args.config.tls_cert_mode
strict_tls = tls_cert_mode == "acme"
remote_data = dns.get_initial_remote_data(sshexec, args.config.mail_domain)
if not dns.check_initial_remote_data(remote_data, strict_tls=strict_tls):
return 1
if strict_tls and not remote_data["acme_account_url"]:
out.red("could not get letsencrypt account url, please run 'cmdeploy run'")
return 1
if not remote_data["dkim_entry"]:
out.red("could not determine dkim_entry, please run 'cmdeploy run'")
return 1
remote_data["strict_tls"] = strict_tls
zonefile = dns.get_filled_zone_file(remote_data)
if args.zonefile:
args.zonefile.write_text(zonefile)
out.green(f"DNS records successfully written to: {args.zonefile}")
return 0
retcode = dns.check_full_zone(
sshexec, remote_data=remote_data, zonefile=zonefile, out=out
)
return retcode
def status_cmd_options(parser):
add_ssh_host_option(parser)
add_ssh_config_option(parser)
def status_cmd(args, out):
"""Display status for online chatmail instance."""
ssh_host = args.ssh_host if args.ssh_host else args.config.mail_domain
sshexec = get_sshexec(ssh_host, verbose=args.verbose, ssh_config=args.ssh_config)
out.green(f"chatmail domain: {args.config.mail_domain}")
if args.config.privacy_mail:
out.green("privacy settings: present")
else:
out.red("no privacy settings")
for line in sshexec(remote.rshell.get_systemd_running):
print(line)
def test_cmd_options(parser):
parser.add_argument(
"--slow",
dest="slow",
action="store_true",
help="also run slow tests",
)
add_ssh_host_option(parser)
add_ssh_config_option(parser)
def test_cmd(args, out):
"""Run local and online tests for chatmail deployment."""
env = os.environ.copy()
env["CHATMAIL_INI"] = str(args.inipath.resolve())
pytest_path = shutil.which("pytest")
pytest_args = [
pytest_path,
"cmdeploy/src/",
"-n4",
"-rs",
"-x",
"-v",
"--durations=5",
]
if args.slow:
pytest_args.append("--slow")
if args.ssh_host:
pytest_args.extend(["--ssh-host", args.ssh_host])
if args.ssh_config:
pytest_args.extend(["--ssh-config", str(Path(args.ssh_config).resolve())])
ret = out.run_ret(pytest_args, env=env)
return ret
def fmt_cmd_options(parser):
parser.add_argument(
"--check",
"-c",
action="store_true",
help="only check but don't fix problems",
)
def fmt_cmd(args, out):
"""Run formattting fixes on all chatmail source code."""
chatmaild_dir = importlib.resources.files("chatmaild").resolve()
cmdeploy_dir = chatmaild_dir.joinpath(
"..", "..", "..", "cmdeploy", "src", "cmdeploy"
).resolve()
sources = [str(chatmaild_dir), str(cmdeploy_dir)]
format_args = [shutil.which("ruff"), "format"]
check_args = [shutil.which("ruff"), "check"]
if args.check:
format_args.append("--diff")
else:
check_args.append("--fix")
if not args.verbose:
check_args.append("--quiet")
format_args.append("--quiet")
format_args.extend(sources)
check_args.extend(sources)
out.check_call(" ".join(format_args), quiet=not args.verbose)
out.check_call(" ".join(check_args), quiet=not args.verbose)
def bench_cmd(args, out):
"""Run benchmarks against an online chatmail instance."""
args = ["pytest", "--pyargs", "cmdeploy.tests.online.benchmark", "-vrx"]
cmdstring = " ".join(args)
out.green(f"[$ {cmdstring}]")
subprocess.check_call(args)
def webdev_cmd(args, out):
"""Run local web development loop for static web pages."""
webdev_main()
#
# Parsing command line options and starting commands
#
class Out:
"""Convenience output printer providing coloring and section formatting."""
SECTION_WIDTH = 72
def __init__(self):
self.section_timings = []
def red(self, msg, file=sys.stderr):
print(colored(msg, "red"), file=file, flush=True)
def green(self, msg, file=sys.stderr):
print(colored(msg, "green"), file=file, flush=True)
def print(self, msg="", **kwargs):
"""Print to stdout with automatic flush."""
print(msg, flush=True, **kwargs)
@contextmanager
def section(self, title):
"""Context manager that prints a section header and records elapsed time."""
bar = "\u2501" * (self.SECTION_WIDTH - len(title) - 5)
self.green(f"\u2501\u2501\u2501 {title} {bar}")
t0 = time.time()
yield
elapsed = time.time() - t0
self.section_timings.append((title, elapsed))
self.print(f"{'':>{self.SECTION_WIDTH - 10}}({elapsed:.1f}s)")
self.print()
def section_line(self, title):
"""Print a section header without timing."""
bar = "\u2501" * (self.SECTION_WIDTH - len(title) - 5)
self.green(f"\u2501\u2501\u2501 {title} {bar}")
self.print()
def __call__(self, msg, red=False, green=False, file=sys.stdout):
color = "red" if red else ("green" if green else None)
print(colored(msg, color), file=file, flush=True)
def check_call(self, arg, env=None, quiet=False):
if not quiet:
self(f"[$ {arg}]", file=sys.stderr)
return subprocess.check_call(arg, shell=True, env=env)
def run_ret(self, args, env=None, quiet=False):
if not quiet:
cmdstring = " ".join(args)
self(f"[$ {cmdstring}]", file=sys.stderr)
proc = subprocess.run(args, env=env, check=False)
return proc.returncode
def add_ssh_host_option(parser):
parser.add_argument(
"--ssh-host",
dest="ssh_host",
help="Run commands on 'localhost', via '@docker', or on a specific SSH host "
"instead of chatmail.ini's mail_domain.",
)
def add_ssh_config_option(parser):
parser.add_argument(
"--ssh-config",
dest="ssh_config",
type=Path,
default=None,
help="Path to an SSH config file (e.g. lxconfigs/ssh-config).",
)
def add_config_option(parser):
parser.add_argument(
"--config",
dest="inipath",
action="store",
default=Path(os.environ.get("CHATMAIL_INI", "chatmail.ini")),
type=Path,
help="path to the chatmail.ini file",
)
parser.add_argument(
"--verbose",
"-v",
dest="verbose",
action="store_true",
default=False,
help="provide verbose logging",
)
def add_subcommand(subparsers, func, add_config=True):
name = func.__name__
assert name.endswith("_cmd")
name = name[:-4].replace("_", "-")
doc = func.__doc__.strip()
help = doc.split("\n")[0].strip(".")
p = subparsers.add_parser(name, description=doc, help=help)
p.set_defaults(func=func)
if add_config:
add_config_option(p)
return p
description = """
Setup your chatmail server configuration and
deploy it via SSH to your remote location.
"""
def get_parser():
"""Return an ArgumentParser for the 'cmdeploy' CLI"""
parser = argparse.ArgumentParser(description=description.strip())
parser.set_defaults(func=None, inipath=None)
subparsers = parser.add_subparsers(title="subcommands")
# find all subcommands in the module namespace
glob = globals()
for name, func in glob.items():
if name.endswith("_cmd"):
needs_config = not name.startswith("lxc_")
subparser = add_subcommand(subparsers, func, add_config=needs_config)
addopts = glob.get(name + "_options")
if addopts is not None:
addopts(subparser)
return parser
def get_sshexec(ssh_host: str, verbose=True, ssh_config=None):
if ssh_host in ["localhost", "@local"]:
return LocalExec(verbose, docker=False)
elif ssh_host == "@docker":
return LocalExec(verbose, docker=True)
if verbose:
print(f"[ssh] login to {ssh_host}")
return SSHExec(ssh_host, verbose=verbose, ssh_config=ssh_config)
def main(args=None):
"""Provide main entry point for 'cmdeploy' CLI invocation."""
parser = get_parser()
args = parser.parse_args(args=args)
if args.func is None:
return parser.parse_args(["-h"])
out = Out()
kwargs = {}
if args.inipath is not None and args.func.__name__ not in ("init_cmd", "fmt_cmd"):
if not args.inipath.exists():
out.red(f"expecting {args.inipath} to exist, run init first?")
raise SystemExit(1)
try:
args.config = read_config(args.inipath)
except Exception as ex:
out.red(ex)
raise SystemExit(1)
try:
res = args.func(args, out, **kwargs)
if res is None:
res = 0
return res
except KeyboardInterrupt:
out.red("KeyboardInterrupt")
sys.exit(130)
if __name__ == "__main__":
main()

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

View File

@@ -1,647 +0,0 @@
"""
Chat Mail pyinfra deploy.
"""
import os
from io import BytesIO, StringIO
from pathlib import Path
from chatmaild.config import read_config
from pyinfra import facts, host, logger
from pyinfra.api import FactBase
from pyinfra.facts import hardware
from pyinfra.facts.files import Sha256File
from pyinfra.facts.systemd import SystemdEnabled
from pyinfra.operations import apt, files, pip, server, systemd
from cmdeploy.cmdeploy import Out
from cmdeploy.util import get_chatmaild_sdist, get_version_string
from .acmetool import AcmetoolDeployer
from .basedeploy import (
Deployer,
Deployment,
activate_remote_units,
configure_remote_units,
get_resource,
has_systemd,
)
from .dovecot.deployer import DovecotDeployer
from .external.deployer import ExternalTlsDeployer
from .filtermail.deployer import FiltermailDeployer
from .mtail.deployer import MtailDeployer
from .nginx.deployer import NginxDeployer
from .opendkim.deployer import OpendkimDeployer
from .postfix.deployer import PostfixDeployer
from .selfsigned.deployer import SelfSignedTlsDeployer
from .www import build_webpages, find_merge_conflict, get_paths
class Port(FactBase):
"""
Returns the process occupying a port.
"""
def command(self, port: int) -> str:
return (
"ss -lptn 'src :%d' | awk 'NR>1 {print $6,$7}' | sed 's/users:((\"//;s/\".*//'"
% (port,)
)
def process(self, output: [str]) -> str:
return output[0]
def remove_legacy_artifacts():
if not has_systemd():
return
# disable legacy doveauth-dictproxy.service
if host.get_fact(SystemdEnabled).get("doveauth-dictproxy.service"):
systemd.service(
name="Disable legacy doveauth-dictproxy.service",
service="doveauth-dictproxy.service",
running=False,
enabled=False,
)
def _install_remote_venv_with_chatmaild() -> None:
remove_legacy_artifacts()
dist_file = get_chatmaild_sdist()
remote_base_dir = "/usr/local/lib/chatmaild"
remote_dist_file = f"{remote_base_dir}/dist/{dist_file.name}"
remote_venv_dir = f"{remote_base_dir}/venv"
root_owned = dict(user="root", group="root", mode="644")
apt.packages(
name="apt install python3-virtualenv",
packages=["python3-virtualenv"],
)
files.put(
name="Upload chatmaild source package",
src=dist_file.open("rb"),
dest=remote_dist_file,
create_remote_dir=True,
**root_owned,
)
pip.virtualenv(
name=f"chatmaild virtualenv {remote_venv_dir}",
path=remote_venv_dir,
always_copy=True,
)
apt.packages(
name="install gcc and headers to build crypt_r source package",
packages=["gcc", "python3-dev"],
)
server.shell(
name=f"forced pip-install {dist_file.name}",
commands=[
f"{remote_venv_dir}/bin/pip install --force-reinstall {remote_dist_file}"
],
)
def _configure_remote_venv_with_chatmaild(config) -> None:
remote_base_dir = "/usr/local/lib/chatmaild"
remote_chatmail_inipath = f"{remote_base_dir}/chatmail.ini"
root_owned = dict(user="root", group="root", mode="644")
files.put(
name=f"Upload {remote_chatmail_inipath}",
src=config._getbytefile(),
dest=remote_chatmail_inipath,
**root_owned,
)
files.file(
path="/etc/cron.d/chatmail-metrics",
present=False,
)
files.file(
path="/var/www/html/metrics",
present=False,
)
class UnboundDeployer(Deployer):
def __init__(self, config):
self.config = config
self.need_restart = False
def install(self):
# Run local DNS resolver `unbound`.
# `resolvconf` takes care of setting up /etc/resolv.conf
# to use 127.0.0.1 as the resolver.
#
# On an IPv4-only system, if unbound is started but not
# configured, it causes subsequent steps to fail to resolve hosts.
# Here, we use policy-rc.d to prevent unbound from starting up
# on initial install. Later, we will configure it and start it.
#
# For documentation about policy-rc.d, see:
# https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt
#
files.put(
src=get_resource("policy-rc.d"),
dest="/usr/sbin/policy-rc.d",
user="root",
group="root",
mode="755",
)
apt.packages(
name="Install unbound",
packages=["unbound", "unbound-anchor", "dnsutils"],
)
files.file("/usr/sbin/policy-rc.d", present=False)
def configure(self):
server.shell(
name="Generate root keys for validating DNSSEC",
commands=[
"unbound-anchor -a /var/lib/unbound/root.key || true",
],
)
if self.config.disable_ipv6:
files.directory(
path="/etc/unbound/unbound.conf.d",
present=True,
user="root",
group="root",
mode="755",
)
conf = files.put(
src=get_resource("unbound/unbound.conf.j2"),
dest="/etc/unbound/unbound.conf.d/chatmail.conf",
user="root",
group="root",
mode="644",
)
else:
conf = files.file(
path="/etc/unbound/unbound.conf.d/chatmail.conf",
present=False,
)
self.need_restart |= conf.changed
def activate(self):
server.shell(
name="Generate root keys for validating DNSSEC",
commands=[
"systemctl reset-failed unbound.service",
],
)
systemd.service(
name="Start and enable unbound",
service="unbound.service",
running=True,
enabled=True,
restarted=self.need_restart,
)
class MtastsDeployer(Deployer):
def configure(self):
# Remove configuration.
files.file("/etc/mta-sts-daemon.yml", present=False)
files.directory("/usr/local/lib/postfix-mta-sts-resolver", present=False)
files.file("/etc/systemd/system/mta-sts-daemon.service", present=False)
def activate(self):
systemd.service(
name="Stop MTA-STS daemon",
service="mta-sts-daemon.service",
daemon_reload=True,
running=False,
enabled=False,
)
class WebsiteDeployer(Deployer):
def __init__(self, config):
self.config = config
def install(self):
files.directory(
name="Ensure /var/www exists",
path="/var/www",
user="root",
group="root",
mode="755",
present=True,
)
def configure(self):
www_path, src_dir, build_dir = get_paths(self.config)
# if www_folder was set to a non-existing folder, skip upload
if not www_path.is_dir():
logger.warning("Building web pages is disabled in chatmail.ini, skipping")
elif (path := find_merge_conflict(src_dir)) is not None:
logger.warning(
f"Merge conflict found in {path}, skipping website deployment. Fix merge conflict if you want to upload your web page."
)
else:
# if www_folder is a hugo page, build it
if build_dir:
www_path = build_webpages(src_dir, build_dir, self.config)
if www_path is None:
logger.warning("Web page build failed, skipping website deployment")
return
# if it is not a hugo page, upload it as is
# pyinfra files.rsync (experimental) causes problems with ssh-config configuration
# the stable files.sync should do
files.sync(
src=str(www_path),
dest="/var/www/html",
user="www-data",
group="www-data",
delete=True,
)
class LegacyRemoveDeployer(Deployer):
def install(self):
apt.packages(name="Remove rspamd", packages="rspamd", present=False)
# remove historic expunge script
# which is now implemented through a systemd timer (chatmail-expire)
files.file(
path="/etc/cron.d/expunge",
present=False,
)
# Remove OBS repository key that is no longer used.
files.file("/etc/apt/keyrings/obs-home-deltachat.gpg", present=False)
files.line(
name="Remove DeltaChat OBS home repository from sources.list",
path="/etc/apt/sources.list",
line="deb [signed-by=/etc/apt/keyrings/obs-home-deltachat.gpg] https://download.opensuse.org/repositories/home:/deltachat/Debian_12/ ./",
escape_regex_characters=True,
present=False,
)
# prior relay versions used filelogging
files.directory(
name="Ensure old logs on disk are deleted",
path="/var/log/journal/",
present=False,
)
# remove echobot if it is still running
if has_systemd() and host.get_fact(SystemdEnabled).get("echobot.service"):
systemd.service(
name="Disable echobot.service",
service="echobot.service",
running=False,
enabled=False,
)
def check_config(config):
mail_domain = config.mail_domain
if mail_domain != "testrun.org" and not mail_domain.endswith(".testrun.org"):
blocked_words = "merlinux schmieder testrun.org".split()
for key in config.__dict__:
value = config.__dict__[key]
if key.startswith("privacy") and any(
x in str(value) for x in blocked_words
):
raise ValueError(
f"please set your own privacy contacts/addresses in {config._inipath}"
)
return config
class TurnDeployer(Deployer):
def __init__(self, mail_domain):
self.mail_domain = mail_domain
self.units = ["turnserver"]
def install(self):
(url, sha256sum) = {
"x86_64": (
"https://github.com/chatmail/chatmail-turn/releases/download/v0.4/chatmail-turn-x86_64-linux",
"1ec1f5c50122165e858a5a91bcba9037a28aa8cb8b64b8db570aa457c6141a8a",
),
"aarch64": (
"https://github.com/chatmail/chatmail-turn/releases/download/v0.4/chatmail-turn-aarch64-linux",
"0fb3e792419494e21ecad536464929dba706bb2c88884ed8f1788141d26fc756",
),
}[host.get_fact(facts.server.Arch)]
existing_sha256sum = host.get_fact(Sha256File, "/usr/local/bin/chatmail-turn")
if existing_sha256sum != sha256sum:
server.shell(
name="Download chatmail-turn",
commands=[
f"(curl -L {url} >/usr/local/bin/chatmail-turn.new && (echo '{sha256sum} /usr/local/bin/chatmail-turn.new' | sha256sum -c) && mv /usr/local/bin/chatmail-turn.new /usr/local/bin/chatmail-turn)",
"chmod 755 /usr/local/bin/chatmail-turn",
],
)
def configure(self):
configure_remote_units(self.mail_domain, self.units)
def activate(self):
activate_remote_units(self.units)
class IrohDeployer(Deployer):
def __init__(self, enable_iroh_relay):
self.enable_iroh_relay = enable_iroh_relay
def install(self):
(url, sha256sum) = {
"x86_64": (
"https://github.com/n0-computer/iroh/releases/download/v0.35.0/iroh-relay-v0.35.0-x86_64-unknown-linux-musl.tar.gz",
"45c81199dbd70f8c4c30fef7f3b9727ca6e3cea8f2831333eeaf8aa71bf0fac1",
),
"aarch64": (
"https://github.com/n0-computer/iroh/releases/download/v0.35.0/iroh-relay-v0.35.0-aarch64-unknown-linux-musl.tar.gz",
"f8ef27631fac213b3ef668d02acd5b3e215292746a3fc71d90c63115446008b1",
),
}[host.get_fact(facts.server.Arch)]
existing_sha256sum = host.get_fact(Sha256File, "/usr/local/bin/iroh-relay")
if existing_sha256sum != sha256sum:
server.shell(
name="Download iroh-relay",
commands=[
f"(curl -L {url} | gunzip | tar -x -f - ./iroh-relay -O >/usr/local/bin/iroh-relay.new && (echo '{sha256sum} /usr/local/bin/iroh-relay.new' | sha256sum -c) && mv /usr/local/bin/iroh-relay.new /usr/local/bin/iroh-relay)",
"chmod 755 /usr/local/bin/iroh-relay",
],
)
self.need_restart = True
def configure(self):
systemd_unit = files.put(
name="Upload iroh-relay systemd unit",
src=get_resource("iroh-relay.service"),
dest="/etc/systemd/system/iroh-relay.service",
user="root",
group="root",
mode="644",
)
self.need_restart |= systemd_unit.changed
iroh_config = files.put(
name="Upload iroh-relay config",
src=get_resource("iroh-relay.toml"),
dest="/etc/iroh-relay.toml",
user="root",
group="root",
mode="644",
)
self.need_restart |= iroh_config.changed
def activate(self):
systemd.service(
name="Start and enable iroh-relay",
service="iroh-relay.service",
running=True,
enabled=self.enable_iroh_relay,
restarted=self.need_restart,
)
self.need_restart = False
class JournaldDeployer(Deployer):
def configure(self):
journald_conf = files.put(
name="Configure journald",
src=get_resource("journald.conf"),
dest="/etc/systemd/journald.conf",
user="root",
group="root",
mode="644",
)
self.need_restart = journald_conf.changed
def activate(self):
systemd.service(
name="Start and enable journald",
service="systemd-journald.service",
running=True,
enabled=True,
restarted=self.need_restart,
)
self.need_restart = False
class ChatmailVenvDeployer(Deployer):
def __init__(self, config):
self.config = config
self.units = (
"chatmail-metadata",
"lastlogin",
"chatmail-expire",
"chatmail-expire.timer",
"chatmail-fsreport",
"chatmail-fsreport.timer",
)
def install(self):
_install_remote_venv_with_chatmaild()
def configure(self):
_configure_remote_venv_with_chatmaild(self.config)
configure_remote_units(self.config.mail_domain, self.units)
def activate(self):
activate_remote_units(self.units)
class ChatmailDeployer(Deployer):
required_users = [
("vmail", "vmail", None),
("iroh", None, None),
]
def __init__(self, mail_domain):
self.mail_domain = mail_domain
def install(self):
files.put(
name="Disable installing recommended packages globally",
src=BytesIO(b'APT::Install-Recommends "0";\n'),
dest="/etc/apt/apt.conf.d/99no-recommends",
user="root",
group="root",
mode="644",
)
apt.update(name="apt update", cache_time=24 * 3600)
apt.upgrade(name="upgrade apt packages", auto_remove=True)
apt.packages(
name="Install curl",
packages=["curl"],
)
apt.packages(
name="Install rsync",
packages=["rsync"],
)
apt.packages(
name="Ensure cron is installed",
packages=["cron"],
)
def configure(self):
# This file is used by auth proxy.
# https://wiki.debian.org/EtcMailName
server.shell(
name="Setup /etc/mailname",
commands=[
f"echo {self.mail_domain} >/etc/mailname; chmod 644 /etc/mailname"
],
)
class FcgiwrapDeployer(Deployer):
def install(self):
apt.packages(
name="Install fcgiwrap",
packages=["fcgiwrap"],
)
def activate(self):
systemd.service(
name="Start and enable fcgiwrap",
service="fcgiwrap.service",
running=True,
enabled=True,
)
class GithashDeployer(Deployer):
def activate(self):
files.put(
name="Upload chatmail relay git commit hash",
src=StringIO(get_version_string()),
dest="/etc/chatmail-version",
mode="700",
)
def get_tls_deployer(config, mail_domain):
"""Select the appropriate TLS deployer based on config."""
tls_domains = [mail_domain, f"mta-sts.{mail_domain}", f"www.{mail_domain}"]
if config.tls_cert_mode == "acme":
return AcmetoolDeployer(config.acme_email, tls_domains)
elif config.tls_cert_mode == "self":
return SelfSignedTlsDeployer(mail_domain)
elif config.tls_cert_mode == "external":
return ExternalTlsDeployer(config.tls_cert_path, config.tls_key_path)
else:
raise ValueError(f"Unknown tls_cert_mode: {config.tls_cert_mode}")
def deploy_chatmail(config_path: Path, disable_mail: bool, website_only: bool) -> None:
"""Deploy a chat-mail instance.
:param config_path: path to chatmail.ini
:param disable_mail: whether to disable postfix & dovecot
:param website_only: if True, only deploy the website
"""
config = read_config(config_path)
check_config(config)
mail_domain = config.mail_domain
if website_only:
Deployment().perform_stages([WebsiteDeployer(config)])
return
if host.get_fact(Port, port=53) != "unbound":
files.line(
name="Add 9.9.9.9 to resolv.conf",
path="/etc/resolv.conf",
# Guard against resolv.conf missing a trailing newline (SolusVM bug).
line="\nnameserver 9.9.9.9",
)
# Check if mtail_address interface is available (if configured)
if config.mtail_address and config.mtail_address not in (
"127.0.0.1",
"::1",
"localhost",
):
ipv4_addrs = host.get_fact(hardware.Ipv4Addrs)
all_addresses = [addr for addrs in ipv4_addrs.values() for addr in addrs]
if config.mtail_address not in all_addresses:
Out().red(
f"Deploy failed: mtail_address {config.mtail_address} is not available (VPN up?).\n"
)
exit(1)
if not os.environ.get("CHATMAIL_NOPORTCHECK"):
port_services = [
(["master", "smtpd"], 25),
("unbound", 53),
]
if config.tls_cert_mode == "acme":
port_services.append(("acmetool", 402))
port_services += [
(["imap-login", "dovecot"], 143),
# acmetool previously listened on port 80,
# so don't complain during upgrade that moved it to port 402
# and gave the port to nginx.
(["acmetool", "nginx"], 80),
("nginx", 443),
(["master", "smtpd"], 465),
(["master", "smtpd"], 587),
(["imap-login", "dovecot"], 993),
("iroh-relay", 3340),
("mtail", 3903),
("stats", 3904),
("nginx", 8443),
(["master", "smtpd"], config.postfix_reinject_port),
(["master", "smtpd"], config.postfix_reinject_port_incoming),
("filtermail", config.filtermail_smtp_port),
("filtermail", config.filtermail_smtp_port_incoming),
]
for service, port in port_services:
print(f"Checking if port {port} is available for {service}...")
running_service = host.get_fact(Port, port=port)
services = [service] if isinstance(service, str) else service
if running_service:
if running_service not in services:
Out().red(
f"Deploy failed: port {port} is occupied by: {running_service}"
)
exit(1)
tls_deployer = get_tls_deployer(config, mail_domain)
all_deployers = [
ChatmailDeployer(mail_domain),
LegacyRemoveDeployer(),
FiltermailDeployer(),
JournaldDeployer(),
UnboundDeployer(config),
TurnDeployer(mail_domain),
IrohDeployer(config.enable_iroh_relay),
tls_deployer,
WebsiteDeployer(config),
ChatmailVenvDeployer(config),
MtastsDeployer(),
OpendkimDeployer(mail_domain),
# Dovecot should be started before Postfix
# because it creates authentication socket
# required by Postfix.
DovecotDeployer(config, disable_mail),
PostfixDeployer(config, disable_mail),
FcgiwrapDeployer(),
NginxDeployer(config),
MtailDeployer(config.mtail_address),
GithashDeployer(),
]
Deployment().perform_stages(all_deployers)

View File

@@ -1,109 +0,0 @@
import datetime
from . import remote
def parse_zone_records(text):
"""Yield ``(name, ttl, rtype, rdata)`` from standard BIND-format text.
Skips comment lines (starting with ``;``) and blank lines.
Each record line must have the format ``name TTL IN type rdata``.
"""
for raw_line in text.splitlines():
line = raw_line.strip()
if not line or line.startswith(";"):
continue
try:
name, ttl, _in, rtype, rdata = line.split(None, 4)
except ValueError:
raise ValueError(f"Bad zone record line: {line!r}") from None
name = name.rstrip(".")
yield name, ttl, rtype.upper(), rdata
def get_initial_remote_data(sshexec, mail_domain):
return sshexec.logged(
call=remote.rdns.perform_initial_checks, kwargs=dict(mail_domain=mail_domain)
)
def check_initial_remote_data(remote_data, *, strict_tls=True, print=print):
mail_domain = remote_data["mail_domain"]
if not remote_data["A"] and not remote_data["AAAA"]:
print(f"Missing A and/or AAAA DNS records for {mail_domain}!")
elif strict_tls and remote_data["MTA_STS"] != f"{mail_domain}.":
print("Missing MTA-STS CNAME record:")
print(f"mta-sts.{mail_domain}. CNAME {mail_domain}.")
elif strict_tls and remote_data["WWW"] != f"{mail_domain}.":
print("Missing www CNAME record:")
print(f"www.{mail_domain}. CNAME {mail_domain}.")
else:
return remote_data
def get_filled_zone_file(remote_data):
sts_id = remote_data.get("sts_id")
if not sts_id:
remote_data["sts_id"] = datetime.datetime.now().strftime("%Y%m%d%H%M")
d = remote_data["mail_domain"]
lines = ["; Required DNS entries"]
if remote_data.get("A"):
lines.append(f"{d}. 3600 IN A {remote_data['A']}")
if remote_data.get("AAAA"):
lines.append(f"{d}. 3600 IN AAAA {remote_data['AAAA']}")
lines.append(f"{d}. 3600 IN MX 10 {d}.")
if remote_data.get("strict_tls"):
lines.append(
f'_mta-sts.{d}. 3600 IN TXT "v=STSv1; id={remote_data["sts_id"]}"'
)
lines.append(f"mta-sts.{d}. 3600 IN CNAME {d}.")
lines.append(f"www.{d}. 3600 IN CNAME {d}.")
lines.append(remote_data["dkim_entry"])
lines.append("")
lines.append("; Recommended DNS entries")
lines.append(f'{d}. 3600 IN TXT "v=spf1 a ~all"')
lines.append(f'_dmarc.{d}. 3600 IN TXT "v=DMARC1;p=reject;adkim=s;aspf=s"')
if remote_data.get("acme_account_url"):
lines.append(
f"{d}. 3600 IN CAA 0 issue"
f' "letsencrypt.org;accounturi={remote_data["acme_account_url"]}"'
)
lines.append(f'_adsp._domainkey.{d}. 3600 IN TXT "dkim=discardable"')
lines.append(f"_submission._tcp.{d}. 3600 IN SRV 0 1 587 {d}.")
lines.append(f"_submissions._tcp.{d}. 3600 IN SRV 0 1 465 {d}.")
lines.append(f"_imap._tcp.{d}. 3600 IN SRV 0 1 143 {d}.")
lines.append(f"_imaps._tcp.{d}. 3600 IN SRV 0 1 993 {d}.")
lines.append("")
return "\n".join(lines)
def check_full_zone(sshexec, remote_data, out, zonefile) -> int:
"""Check existing DNS records, optionally write them to zone file
and return (exitcode, remote_data) tuple."""
required_diff, recommended_diff = sshexec.logged(
remote.rdns.check_zonefile,
kwargs=dict(zonefile=zonefile, verbose=False),
)
returncode = 0
if required_diff:
out.red("Please set required DNS entries at your DNS provider:\n")
for line in required_diff:
out(line)
out("")
returncode = 1
if remote_data.get("dkim_entry") in required_diff:
out(
"If the DKIM entry above does not work with your DNS provider, you can try this one:\n"
)
out(remote_data.get("web_dkim_entry") + "\n")
if recommended_diff:
out("WARNING: these recommended DNS entries are not set:\n")
for line in recommended_diff:
out(line)
if not (recommended_diff or required_diff):
out.green("Great! All your DNS entries are verified and correct.")
return returncode

View File

@@ -1,12 +0,0 @@
uri = proxy:/run/doveauth/doveauth.socket:auth
iterate_disable = no
iterate_prefix = userdb/
default_pass_scheme = plain
# %E escapes characters " (double quote), ' (single quote) and \ (backslash) with \ (backslash).
# See <https://doc.dovecot.org/2.3/configuration_manual/config_file/config_variables/#modifiers>
# for documentation.
#
# We escape user-provided input and use double quote as a separator.
password_key = passdb/%Ew"%Eu
user_key = userdb/%Eu

View File

@@ -1,172 +0,0 @@
import os
import urllib.request
from chatmaild.config import Config
from pyinfra import host
from pyinfra.facts.server import Arch, Sysctl
from pyinfra.facts.systemd import SystemdEnabled
from pyinfra.operations import apt, files, server, systemd
from cmdeploy.basedeploy import (
Deployer,
activate_remote_units,
configure_remote_units,
get_resource,
has_systemd,
)
class DovecotDeployer(Deployer):
daemon_reload = False
def __init__(self, config, disable_mail):
self.config = config
self.disable_mail = disable_mail
self.units = ["doveauth"]
def install(self):
arch = host.get_fact(Arch)
if has_systemd() and "dovecot.service" in host.get_fact(SystemdEnabled):
return # already installed and running
_install_dovecot_package("core", arch)
_install_dovecot_package("imapd", arch)
_install_dovecot_package("lmtpd", arch)
def configure(self):
configure_remote_units(self.config.mail_domain, self.units)
self.need_restart, self.daemon_reload = _configure_dovecot(self.config)
def activate(self):
activate_remote_units(self.units)
restart = False if self.disable_mail else self.need_restart
systemd.service(
name="Disable dovecot for now"
if self.disable_mail
else "Start and enable Dovecot",
service="dovecot.service",
running=False if self.disable_mail else True,
enabled=False if self.disable_mail else True,
restarted=restart,
daemon_reload=self.daemon_reload,
)
self.need_restart = False
def _pick_url(primary, fallback):
try:
req = urllib.request.Request(primary, method="HEAD")
urllib.request.urlopen(req, timeout=10)
return primary
except Exception:
return fallback
def _install_dovecot_package(package: str, arch: str):
arch = "amd64" if arch == "x86_64" else arch
arch = "arm64" if arch == "aarch64" else arch
primary_url = f"https://download.delta.chat/dovecot/dovecot-{package}_2.3.21%2Bdfsg1-3_{arch}.deb"
fallback_url = f"https://github.com/chatmail/dovecot/releases/download/upstream%2F2.3.21%2Bdfsg1/dovecot-{package}_2.3.21%2Bdfsg1-3_{arch}.deb"
url = _pick_url(primary_url, fallback_url)
deb_filename = "/root/" + url.split("/")[-1]
match (package, arch):
case ("core", "amd64"):
sha256 = "dd060706f52a306fa863d874717210b9fe10536c824afe1790eec247ded5b27d"
case ("core", "arm64"):
sha256 = "e7548e8a82929722e973629ecc40fcfa886894cef3db88f23535149e7f730dc9"
case ("imapd", "amd64"):
sha256 = "8d8dc6fc00bbb6cdb25d345844f41ce2f1c53f764b79a838eb2a03103eebfa86"
case ("imapd", "arm64"):
sha256 = "178fa877ddd5df9930e8308b518f4b07df10e759050725f8217a0c1fb3fd707f"
case ("lmtpd", "amd64"):
sha256 = "2f69ba5e35363de50962d42cccbfe4ed8495265044e244007d7ccddad77513ab"
case ("lmtpd", "arm64"):
sha256 = "89f52fb36524f5877a177dff4a713ba771fd3f91f22ed0af7238d495e143b38f"
case _:
apt.packages(packages=[f"dovecot-{package}"])
return
files.download(
name=f"Download dovecot-{package}",
src=url,
dest=deb_filename,
sha256sum=sha256,
cache_time=60 * 60 * 24 * 365 * 10, # never redownload the package
)
apt.deb(name=f"Install dovecot-{package}", src=deb_filename)
def _configure_dovecot(config: Config, debug: bool = False) -> (bool, bool):
"""Configures Dovecot IMAP server."""
need_restart = False
daemon_reload = False
main_config = files.template(
src=get_resource("dovecot/dovecot.conf.j2"),
dest="/etc/dovecot/dovecot.conf",
user="root",
group="root",
mode="644",
config=config,
debug=debug,
disable_ipv6=config.disable_ipv6,
)
need_restart |= main_config.changed
auth_config = files.put(
src=get_resource("dovecot/auth.conf"),
dest="/etc/dovecot/auth.conf",
user="root",
group="root",
mode="644",
)
need_restart |= auth_config.changed
lua_push_notification_script = files.put(
src=get_resource("dovecot/push_notification.lua"),
dest="/etc/dovecot/push_notification.lua",
user="root",
group="root",
mode="644",
)
need_restart |= lua_push_notification_script.changed
# as per https://doc.dovecot.org/2.3/configuration_manual/os/
# it is recommended to set the following inotify limits
if not os.environ.get("CHATMAIL_NOSYSCTL"):
for name in ("max_user_instances", "max_user_watches"):
key = f"fs.inotify.{name}"
if host.get_fact(Sysctl)[key] > 65535:
# Skip updating limits if already sufficient
# (enables running in incus containers where sysctl readonly)
continue
server.sysctl(
name=f"Change {key}",
key=key,
value=65535,
persist=True,
)
timezone_env = files.line(
name="Set TZ environment variable",
path="/etc/environment",
line="TZ=:/etc/localtime",
)
need_restart |= timezone_env.changed
restart_conf = files.put(
name="dovecot: restart automatically on failure",
src=get_resource("service/10_restart.conf"),
dest="/etc/systemd/system/dovecot.service.d/10_restart.conf",
)
daemon_reload |= restart_conf.changed
# Validate dovecot configuration before restart
if need_restart:
server.shell(
name="Validate dovecot configuration",
commands=["doveconf -n >/dev/null"],
)
return need_restart, daemon_reload

View File

@@ -1,434 +0,0 @@
## Dovecot configuration file
{% if disable_ipv6 %}
listen = 0.0.0.0
{% endif %}
protocols = imap lmtp
auth_mechanisms = plain
{% if debug == true %}
auth_verbose = yes
auth_debug = yes
auth_debug_passwords = yes
auth_verbose_passwords = plain
auth_cache_size = 100M
mail_debug = yes
{% endif %}
# Prevent warnings similar to:
# config: Warning: service auth { client_limit=1000 } is lower than required under max. load (10200). Counted for protocol services with service_count != 1: service lmtp { process_limit=100 } + service imap-urlauth-login { process_limit=100 } + service imap-login { process_limit=10000 }
# config: Warning: service anvil { client_limit=1000 } is lower than required under max. load (10103). Counted with: service imap-urlauth-login { process_limit=100 } + service imap-login { process_limit=10000 } + service auth { process_limit=1 }
# master: Warning: service(stats): client_limit (1000) reached, client connections are being dropped
default_client_limit = 20000
# Increase number of logged in IMAP connections.
# Each connection is handled by a separate `imap` process.
# `imap` process should have `client_limit=1` as described in
# <https://doc.dovecot.org/2.3/configuration_manual/service_configuration/#service-limits>
# so each logged in IMAP session will need its own `imap` process.
#
# If this limit is reached,
# users will fail to LOGIN as `imap-login` process
# will accept them logging in but fail to transfer logged in
# connection to `imap` process until someone logs out and
# the following warning will be logged:
# Warning: service(imap): process_limit (1024) reached, client connections are being dropped
service imap {
process_limit = 50000
}
mail_server_admin = mailto:root@{{ config.mail_domain }}
mail_server_comment = Chatmail server
# `zlib` enables compressing messages stored in the maildir.
# See
# <https://doc.dovecot.org/2.3/configuration_manual/zlib_plugin/>
# for documentation.
#
# quota plugin documentation:
# <https://doc.dovecot.org/2.3/configuration_manual/quota_plugin/>
mail_plugins = zlib quota
imap_capability = +XDELTAPUSH XCHATMAIL
# Authentication for system users.
passdb {
driver = dict
args = /etc/dovecot/auth.conf
}
userdb {
driver = dict
args = /etc/dovecot/auth.conf
}
##
## Mailbox locations and namespaces
##
# Mailboxes are stored in the "mail" directory of the vmail user home.
mail_location = maildir:{{ config.mailboxes_dir }}/%u
# index/cache files are not very useful for chatmail relay operations
# but it's not clear how to disable them completely.
# According to https://doc.dovecot.org/2.3/settings/advanced/#core_setting-mail_cache_max_size
# if the cache file becomes larger than the specified size, it is truncated by dovecot
mail_cache_max_size = 500K
namespace inbox {
inbox = yes
mailbox Drafts {
special_use = \Drafts
}
mailbox Junk {
special_use = \Junk
}
mailbox Trash {
special_use = \Trash
}
# For \Sent mailboxes there are two widely used names. We'll mark both of
# them as \Sent. User typically deletes one of them if duplicates are created.
mailbox Sent {
special_use = \Sent
}
mailbox "Sent Messages" {
special_use = \Sent
}
}
mail_uid = vmail
mail_gid = vmail
mail_privileged_group = vmail
##
## Mail processes
##
# Pass all IMAP METADATA requests to the server implementing Dovecot's dict protocol.
mail_attribute_dict = proxy:/run/chatmail-metadata/metadata.socket:metadata
# `imap_zlib` enables IMAP COMPRESS (RFC 4978).
# <https://datatracker.ietf.org/doc/html/rfc4978.html>
protocol imap {
mail_plugins = $mail_plugins imap_quota last_login {% if config.imap_compress %}imap_zlib{% endif %}
imap_metadata = yes
}
plugin {
last_login_dict = proxy:/run/chatmail-lastlogin/lastlogin.socket:lastlogin
#last_login_key = last-login/%u # default
last_login_precision = s
}
protocol lmtp {
# notify plugin is a dependency of push_notification plugin:
# <https://doc.dovecot.org/2.3/settings/plugin/notify-plugin/>
#
# push_notification plugin documentation:
# <https://doc.dovecot.org/2.3/configuration_manual/push_notification/>
#
# mail_lua and push_notification_lua are needed for Lua push notification handler.
# <https://doc.dovecot.org/2.3/configuration_manual/push_notification/#configuration>
mail_plugins = $mail_plugins mail_lua notify push_notification push_notification_lua
}
plugin {
zlib_save = gz
}
plugin {
imap_compress_deflate_level = 6
}
plugin {
# for now we define static quota-rules for all users
quota = maildir:User quota
quota_rule = *:storage={{ config.max_mailbox_size }}
quota_max_mail_size={{ config.max_message_size }}
quota_grace = 0
# quota_over_flag_value = TRUE
}
# push_notification configuration
plugin {
# <https://doc.dovecot.org/2.3/configuration_manual/push_notification/#lua-lua>
push_notification_driver = lua:file=/etc/dovecot/push_notification.lua
}
service lmtp {
user=vmail
unix_listener /var/spool/postfix/private/dovecot-lmtp {
group = postfix
mode = 0600
user = postfix
}
}
lmtp_add_received_header = no
service auth {
unix_listener /var/spool/postfix/private/auth {
mode = 0660
user = postfix
group = postfix
}
}
service auth-worker {
# Default is root.
# Drop privileges we don't need.
user = vmail
}
service imap-login {
# High-performance mode as described in
# <https://doc.dovecot.org/2.3/admin_manual/login_processes/#high-performance-mode>
#
# So-called high-security mode described in
# <https://doc.dovecot.org/2.3/admin_manual/login_processes/#high-security-mode>
# and enabled by default with `service_count = 1` starts one process per connection
# and has problems logging in thousands of users after Dovecot restart.
service_count = 0
# Increase virtual memory size limit.
# Since imap-login processes handle TLS connections
# even after logging users in
# and many connections are handled by each process,
# memory size limit should be increased.
#
# Otherwise the whole process eventually dies
# with an error similar to
# imap-login: Fatal: master: service(imap-login):
# child 1422951 returned error 83
# (Out of memory (service imap-login { vsz_limit=256 MB },
# you may need to increase it)
# and takes down all its TLS connections at once.
vsz_limit = 1G
# Avoid startup latency for new connections.
#
# Should be set to at least the number of CPU cores
# according to the documentation.
process_min_avail = 10
}
service anvil {
# We are disabling anvil penalty on failed login attempts
# because it can only detect brute forcing by IP address
# not by username. As the correct IP address is not handed
# to dovecot anyway, it is more of hindrance than of use.
# See <https://www.dovecot.org/list/dovecot/2012-May/135485.html> for details.
unix_listener anvil-auth-penalty {
mode = 0
}
}
ssl = required
ssl_cert = <{{ config.tls_cert_path }}
ssl_key = <{{ config.tls_key_path }}
ssl_dh = </usr/share/dovecot/dh.pem
ssl_min_protocol = TLSv1.3
ssl_prefer_server_ciphers = yes
{% if config.imap_rawlog %}
service postlogin {
executable = script-login -d rawlog
unix_listener postlogin {
}
}
service imap {
executable = imap postlogin
}
protocol imap {
#rawlog_dir = /tmp/rawlog/%u
# Put .in and .out imap protocol logging files into per-user homedir
# You can use a command like this to combine into one protocol stream:
# sort -sn <(sed 's/ / C: /' *.in) <(sed 's/ / S: /' cat *.out)
rawlog_dir = %h
}
{% endif %}
{% if not config.imap_compress %}
# Hibernate IDLE users to save memory and CPU resources
# NOTE: this will have no effect if imap_zlib plugin is used
imap_hibernate_timeout = 30s
service imap {
# Note that this change will allow any process running as
# $default_internal_user (dovecot) to access mails as any other user.
# This may be insecure in some installations, which is why this isn't
# done by default.
unix_listener imap-master {
user = $default_internal_user
}
}
# The following is the default already in v2.3.1+:
service imap {
extra_groups = $default_internal_group
}
service imap-hibernate {
unix_listener imap-hibernate {
mode = 0660
group = $default_internal_group
}
}
{% endif %}
{% if config.mtail_address %}
#
# Dovecot Statistics
#
# OpenMetrics endpoint at http://{{- config.mtail_address}}:3904/metrics
service stats {
inet_listener http {
port = 3904
address = {{- config.mtail_address}}
}
}
# IMAP Command Metrics
# - Bytes in/out for compression efficiency analysis
# - Lock wait time for contention debugging
# - Grouped by command name and reply state
metric imap_command {
filter = event=imap_command_finished
fields = bytes_in bytes_out lock_wait_usecs running_usecs
group_by = cmd_name tagged_reply_state
}
# Duration buckets for latency histograms (base 10: 10us, 100us, 1ms, 10ms, 100ms, 1s, 10s, 100s)
metric imap_command_duration {
filter = event=imap_command_finished
group_by = cmd_name duration:exponential:1:8:10
}
# Slow command outliers (>1 second = 1000000 usecs)
# Useful for alerting without high cardinality
metric imap_command_slow {
filter = event=imap_command_finished AND duration>1000000 AND NOT cmd_name=IDLE
group_by = cmd_name
}
# IDLE-specific Metrics
metric imap_idle {
filter = event=imap_command_finished AND cmd_name=IDLE
fields = bytes_in bytes_out running_usecs
group_by = tagged_reply_state
}
metric imap_idle_duration {
filter = event=imap_command_finished AND cmd_name=IDLE
# Base 10: 100ms to 27h (covers short wakeups to long idle sessions)
group_by = duration:exponential:5:11:10
}
metric imap_idle_commands {
filter = event=imap_command_finished AND cmd_name=IDLE
group_by = tagged_reply_state
}
metric imap_idle_failed {
filter = event=imap_command_finished AND cmd_name=IDLE AND NOT tagged_reply_state=OK
}
# Hibernation Metrics (requires imap_hibernate_timeout)
metric imap_hibernated {
filter = event=imap_client_hibernated
}
metric imap_hibernated_failed {
filter = event=imap_client_hibernated AND error=*
}
metric imap_unhibernated {
filter = event=imap_client_unhibernated
fields = hibernation_usecs
}
metric imap_unhibernated_reason {
filter = event=imap_client_unhibernated
group_by = reason
fields = hibernation_usecs
}
metric imap_unhibernated_reason_sleep {
filter = event=imap_client_unhibernated
group_by = reason hibernation_usecs:exponential:4:8:10
}
metric imap_unhibernated_failed {
filter = event=imap_client_unhibernated AND error=*
}
# Hibernation duration buckets (how long clients stayed hibernated)
# Base 10: 100ms to 27h
metric imap_hibernation_duration {
filter = event=imap_client_unhibernated
group_by = reason duration:exponential:5:11:10
}
# Authentication / Login Metrics
metric auth_request {
filter = event=auth_request_finished
group_by = success
}
metric auth_request_duration {
filter = event=auth_request_finished
group_by = success duration:exponential:2:6:10
}
metric auth_failed {
filter = event=auth_request_finished AND success=no
}
# Passdb cache effectiveness
metric auth_passdb {
filter = event=auth_passdb_request_finished
group_by = result cache
}
# Master login (post-auth userdb lookup)
metric auth_master_login {
filter = event=auth_master_client_login_finished
}
metric auth_master_login_failed {
filter = event=auth_master_client_login_finished AND error=*
}
# Mail Delivery (LMTP) - affects IDLE wakeup latency
metric mail_delivery {
filter = event=mail_delivery_finished
}
metric mail_delivery_duration {
filter = event=mail_delivery_finished
group_by = duration:exponential:3:7:10
}
metric mail_delivery_failed {
filter = event=mail_delivery_finished AND error=*
}
# Connection Events
metric client_connected {
filter = event=client_connection_connected AND category="service:imap"
}
metric client_disconnected {
filter = event=client_connection_disconnected AND category="service:imap"
fields = bytes_in bytes_out
}
{% endif %}

View File

@@ -1,19 +0,0 @@
function dovecot_lua_notify_begin_txn(user)
return user
end
function dovecot_lua_notify_event_message_new(user, event)
local mbox = user:mailbox(event.mailbox)
mbox:sync()
if user.username ~= event.from_address then
-- Incoming message
-- Notify METADATA server about new message.
mbox:metadata_set("/private/messagenew", "")
end
mbox:free()
end
function dovecot_lua_notify_end_txn(ctx, success)
end

View File

@@ -1,67 +0,0 @@
import io
from pyinfra import host
from pyinfra.facts.files import File
from pyinfra.operations import files, systemd
from cmdeploy.basedeploy import Deployer, get_resource
class ExternalTlsDeployer(Deployer):
"""Expects TLS certificates to be managed on the server.
Validates that the configured certificate and key files
exist on the remote host. Installs a systemd path unit
that watches the certificate file and automatically
restarts/reloads affected services when it changes.
"""
def __init__(self, cert_path, key_path):
self.cert_path = cert_path
self.key_path = key_path
def configure(self):
# Verify cert and key exist on the remote host using pyinfra facts.
for path in (self.cert_path, self.key_path):
info = host.get_fact(File, path=path)
if info is None:
raise Exception(f"External TLS file not found on server: {path}")
# Deploy the .path unit (templated with the cert path).
# pkg=__package__ is required here because the resource files
# live in cmdeploy.external, not the default cmdeploy package.
source = get_resource("tls-cert-reload.path.f", pkg=__package__)
content = source.read_text().format(cert_path=self.cert_path).encode()
path_unit = files.put(
name="Upload tls-cert-reload.path",
src=io.BytesIO(content),
dest="/etc/systemd/system/tls-cert-reload.path",
user="root",
group="root",
mode="644",
)
service_unit = files.put(
name="Upload tls-cert-reload.service",
src=get_resource("tls-cert-reload.service", pkg=__package__),
dest="/etc/systemd/system/tls-cert-reload.service",
user="root",
group="root",
mode="644",
)
if path_unit.changed or service_unit.changed:
self.need_restart = True
def activate(self):
systemd.service(
name="Enable tls-cert-reload path watcher",
service="tls-cert-reload.path",
running=True,
enabled=True,
restarted=self.need_restart,
daemon_reload=self.need_restart,
)
# No explicit reload needed here: dovecot/nginx read the cert
# on startup, and the .path watcher handles live changes.

View File

@@ -1,15 +0,0 @@
# Watch the TLS certificate file for changes.
# When the cert is updated (e.g. renewed by an external process),
# this triggers tls-cert-reload.service to reload the affected services.
#
# NOTE: changes to the certificates are not detected if they cross bind-mount boundaries.
# After cert renewal, you must then trigger the reload explicitly:
# systemctl start tls-cert-reload.service
[Unit]
Description=Watch TLS certificate for changes
[Path]
PathChanged={cert_path}
[Install]
WantedBy=multi-user.target

View File

@@ -1,15 +0,0 @@
# Reload services that cache the TLS certificate.
#
# dovecot: caches the cert at startup; reload re-reads SSL certs
# without dropping existing connections.
# nginx: caches the cert at startup; reload gracefully picks up
# the new cert for new connections.
# postfix: reads the cert fresh on each TLS handshake,
# does NOT need a reload/restart.
[Unit]
Description=Reload TLS services after certificate change
[Service]
Type=oneshot
ExecStart=/bin/systemctl try-reload-or-restart dovecot
ExecStart=/bin/systemctl try-reload-or-restart nginx

View File

@@ -1,52 +0,0 @@
from pyinfra import facts, host
from pyinfra.operations import files, systemd
from cmdeploy.basedeploy import Deployer, get_resource
class FiltermailDeployer(Deployer):
services = ["filtermail", "filtermail-incoming"]
bin_path = "/usr/local/bin/filtermail"
config_path = "/usr/local/lib/chatmaild/chatmail.ini"
def __init__(self):
self.need_restart = False
def install(self):
arch = host.get_fact(facts.server.Arch)
url = f"https://github.com/chatmail/filtermail/releases/download/v0.5.2/filtermail-{arch}"
sha256sum = {
"x86_64": "ce24ca0075aa445510291d775fb3aea8f4411818c7b885ae51a0fe18c5f789ce",
"aarch64": "c5d783eefa5332db3d97a0e6a23917d72849e3eb45da3d16ce908a9b4e5a797d",
}[arch]
self.need_restart |= files.download(
name="Download filtermail",
src=url,
sha256sum=sha256sum,
dest=self.bin_path,
mode="755",
).changed
def configure(self):
for service in self.services:
self.need_restart |= files.template(
src=get_resource(f"filtermail/{service}.service.j2"),
dest=f"/etc/systemd/system/{service}.service",
user="root",
group="root",
mode="644",
bin_path=self.bin_path,
config_path=self.config_path,
).changed
def activate(self):
for service in self.services:
systemd.service(
name=f"Start and enable {service}",
service=f"{service}.service",
running=True,
enabled=True,
restarted=self.need_restart,
daemon_reload=True,
)
self.need_restart = False

View File

@@ -1,11 +0,0 @@
[Unit]
Description=Incoming Chatmail Postfix before queue filter
[Service]
ExecStart={{ bin_path }} {{ config_path }} incoming
Restart=always
RestartSec=30
User=vmail
[Install]
WantedBy=multi-user.target

View File

@@ -1,11 +0,0 @@
[Unit]
Description=Outgoing Chatmail Postfix before queue filter
[Service]
ExecStart={{ bin_path }} {{ config_path }} outgoing
Restart=always
RestartSec=30
User=vmail
[Install]
WantedBy=multi-user.target

View File

@@ -1,88 +0,0 @@
import importlib
import io
import os
import qrcode
from PIL import Image, ImageDraw, ImageFont
def gen_qr_png_data(maildomain):
url = f"DCACCOUNT:https://{maildomain}/new"
image = gen_qr(maildomain, url)
temp = io.BytesIO()
image.save(temp, format="png")
temp.seek(0)
return temp
def gen_qr(maildomain, url):
# taken and modified from
# https://github.com/deltachat/mailadm/blob/master/src/mailadm/gen_qr.py
# info = f"{maildomain} invite code"
info = ""
# load QR code
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_H,
box_size=1,
border=1,
)
qr.add_data(url)
qr.make(fit=True)
qr_img = qr.make_image(fill_color="black", back_color="white")
# paint all elements
ttf_path = str(
importlib.resources.files(__package__).joinpath("data/opensans-regular.ttf")
)
logo_red_path = str(
importlib.resources.files(__package__).joinpath("data/delta-chat-bw.png")
)
assert os.path.exists(ttf_path), ttf_path
font_size = 16
font = ImageFont.truetype(font=ttf_path, size=font_size)
num_lines = ((info).count("\n") + 1) if info else 0
size = width = 384
qr_padding = 6
text_height = font_size * num_lines
height = size + text_height
image = Image.new("RGBA", (width, height), "white")
qr_final_size = width - (qr_padding * 2)
if num_lines:
draw = ImageDraw.Draw(image)
# draw text
if hasattr(font, "getsize"):
info_pos = (width - font.getsize(info.strip())[0]) // 2
else:
info_pos = (width - font.getbbox(info.strip())[3]) // 2
draw.multiline_text(
(info_pos, size - qr_padding // 2),
info,
font=font,
fill="black",
align="right",
)
# paste QR code
image.paste(
qr_img.resize((qr_final_size, qr_final_size), resample=Image.NEAREST),
(qr_padding, qr_padding),
)
# background delta logo
logo2_img = Image.open(logo_red_path)
logo2_width = int(size / 6)
logo2 = logo2_img.resize((logo2_width, logo2_width), resample=Image.NEAREST)
pos = int((size / 2) - (logo2_width / 2))
image.paste(logo2, (pos, pos), mask=logo2)
return image

View File

@@ -1,12 +0,0 @@
[Unit]
Description=Iroh relay
[Service]
ExecStart=/usr/local/bin/iroh-relay --config-path /etc/iroh-relay.toml
Restart=on-failure
RestartSec=5s
User=iroh
Group=iroh
[Install]
WantedBy=multi-user.target

View File

@@ -1,11 +0,0 @@
enable_relay = true
http_bind_addr = "[::]:3340"
# Disable built-in STUN server in iroh-relay 0.35
# as we deploy our own TURN server instead.
# STUN server is going to be removed in iroh-relay 1.0
# and this line can be removed after upgrade.
enable_stun = false
enable_metrics = false
metrics_bind_addr = "127.0.0.1:9092"

View File

@@ -1,3 +0,0 @@
[Journal]
MaxRetentionSec=3d
Storage=volatile

View File

@@ -1,558 +0,0 @@
"""lxc-start/stop/status/test subcommands for testing with local containers."""
import os
import subprocess
import threading
import time
from ..util import (
collapse,
get_git_hash,
get_version_string,
shell,
)
from .incus import Incus, RelayContainer
RELAY_NAMES = ("test0", "test1")
# -------------------------------------------------------------------
# lxc-start
# -------------------------------------------------------------------
def lxc_start_cmd_options(parser):
_add_name_args(
parser,
help_text="User relay name(s) to create (default: test0).",
)
parser.add_argument(
"--ipv4-only",
dest="ipv4_only",
action="store_true",
help="Create an IPv4-only container.",
)
parser.add_argument(
"--run",
action="store_true",
help="Run 'cmdeploy run' on each container after starting it.",
)
def lxc_start_cmd(args, out):
"""Create/Ensure and start LXC relay and DNS containers."""
ix = Incus()
out.green("Ensuring DNS container (ns-localchat) ...")
dns_ct = ix.get_dns_container()
dns_ct.ensure()
if not ix.find_dns_image():
with out.section("LXC: publishing DNS image"):
dns_ct.publish_as_dns_image()
out.print(f" DNS container IP: {dns_ct.ipv4}")
names = args.names if args.names else RELAY_NAMES
relays = list(ix.get_container(n) for n in names)
for ct in relays:
out.green(f"Ensuring container {ct.name!r} ({ct.domain}) ...")
ct.ensure()
ip = ct.ipv4
out.print(" Configuring container hostname ...")
ct.configure_hosts(ip)
out.print(f" Writing {ct.ini.name} ...")
ct.write_ini(disable_ipv6=args.ipv4_only)
out.print(f" Config: {ct.ini}")
if args.ipv4_only:
ct.disable_ipv6()
ipv6 = None
else:
output = ct.bash(
"ip -6 addr show scope global -deprecated"
" | grep -oP '(?<=inet6 )[^/]+'",
check=False,
)
ipv6 = output.strip() if output else None
out.print(f" {_format_addrs(ip, ipv6)}")
out.green(f" Container {ct.name!r} ready: {ct.domain} -> {ip}")
out.print()
# Reset DNS zones only for the containers we just started
started_cnames = {ct.name for ct in relays}
managed = ix.list_managed()
started = [c for c in managed if c["name"] in started_cnames]
if started:
out.print(
f"Resetting DNS zones for {len(started)}"
" domain(s) (A + AAAA records) ..."
)
dns_ct.reset_dns_records(dns_ct.ipv4, started)
for ct in relays:
if ct.name in started_cnames:
out.print(f" Configuring and testing DNS in {ct.name} ...")
ct.configure_dns(dns_ct.ipv4)
if not ct.check_dns():
out.red(
f" DNS check failed for {ct.name}"
": cannot resolve external hosts"
)
return 1
# Generate the unified SSH config
out.green("Writing ssh-config ...")
ssh_cfg = ix.write_ssh_config()
out.print(f" {ssh_cfg}")
# Verify SSH via the generated config
for ct in relays:
out.print(f" Verifying SSH to {ct.name} via ssh-config ...")
if ct.verify_ssh(ssh_cfg):
out.print(f" SSH OK: ssh -F lxconfigs/ssh-config {ct.domain}")
else:
out.red(f" WARNING: SSH verification failed for {ct.name}")
# Print integration suggestions
ssh_cfg = ix.ssh_config_path
if not ix.check_ssh_include():
out.green(
"\n (Optional) To use containers from any SSH client, add to ~/.ssh/config:"
)
out.green(f" Include {ssh_cfg}")
# Optionally run cmdeploy run on each relay
if args.run:
for ct in relays:
with out.section(f"cmdeploy run: {ct.sname} ({ct.domain})"):
ret = _run_cmdeploy("run", ct, ix, out, extra=["--skip-dns-check"])
if ret:
out.red(f"Deploy to {ct.sname} failed (exit {ret})")
return ret
# -------------------------------------------------------------------
# lxc-stop
# -------------------------------------------------------------------
def lxc_stop_cmd_options(parser):
parser.add_argument(
"--destroy",
action="store_true",
help="Delete containers and their config files after stopping.",
)
parser.add_argument(
"--destroy-all",
dest="destroy_all",
action="store_true",
help="Like --destroy, but also remove the ns-localchat DNS container.",
)
_add_name_args(
parser,
help_text="Container name(s) to stop (default: test0 + test1).",
)
def lxc_stop_cmd(args, out):
"""Stop (and optionally destroy) local LXC relay containers."""
ix = Incus()
names = args.names or RELAY_NAMES
destroy = args.destroy or args.destroy_all
for ct in map(ix.get_container, names):
if destroy:
out.green(f"Destroying container {ct.name!r} ...")
ct.destroy()
if hasattr(ct, "image_alias"):
out.green(f" Deleting cached image {ct.image_alias!r} ...")
ix.run(["image", "delete", ct.image_alias], check=False)
else:
out.green(f"Stopping container {ct.name!r} ...")
ct.stop(force=True)
if args.destroy_all:
dns_ct = ix.get_dns_container()
out.green(f"Destroying DNS container {dns_ct.name!r} ...")
dns_ct.destroy()
ix.delete_images()
if destroy:
ix.write_ssh_config()
out.green("LXC containers destroyed.")
else:
out.green("LXC containers stopped.")
# -------------------------------------------------------------------
# lxc-test
# -------------------------------------------------------------------
def lxc_test_cmd_options(parser):
parser.add_argument(
"--one",
action="store_true",
help="Only deploy and test against test0 (skip test1).",
)
def lxc_test_cmd(args, out):
"""Run full LXC pipeline: start, deploy, DNS, zone files, and tests.
All commands run directly on the host using
``--ssh-config lxconfigs/ssh-config`` for SSH access.
"""
ix = Incus()
t_total = time.time()
relay_names = list(RELAY_NAMES)
if args.one:
relay_names = relay_names[:1]
local_hash = get_git_hash()
# Per-relay: start containers, then deploy in parallel.
ipv4_only_flags = {RELAY_NAMES[0]: False, RELAY_NAMES[1]: True}
# Phase 1: start all containers (sequential, fast)
for ct in map(ix.get_container, relay_names):
name = ct.sname
ipv4_only = ipv4_only_flags.get(name, False)
label = "IPv4-only" if ipv4_only else "dual-stack"
with out.section(f"LXC: lxc-start {name} ({label})"):
args.names = [name]
args.ipv4_only = ipv4_only
args.run = False
ret = lxc_start_cmd(args, out)
if ret:
return ret
# Phase 2: deploy all relays in parallel
to_deploy = []
for ct in map(ix.get_container, relay_names):
status = _deploy_status(ct, local_hash, ix)
if "IN-SYNC" in status:
out.section_line(f"cmdeploy run: {ct.sname}: {status}, skipping")
else:
to_deploy.append(ct)
if to_deploy:
with out.section("cmdeploy run (parallel)"):
ret = _run_cmdeploy_parallel(
"run", to_deploy, ix, out, extra=["--skip-dns-check"]
)
if ret:
return ret
# Phase 3: publish images (sequential, fast)
for ct in map(ix.get_container, relay_names):
if ct.publish_image():
out.section_line(f"LXC: published {ct.sname} image")
else:
out.section_line(
f"LXC: publish {ct.sname} image: skipped, cached",
)
for ct in map(ix.get_container, relay_names):
with out.section(f"cmdeploy dns: {ct.sname} ({ct.domain})"):
ret = _run_cmdeploy("dns", ct, ix, out, extra=["--zonefile", str(ct.zone)])
if ret:
out.red(f"DNS for {ct.sname} failed (exit {ret})")
return ret
with out.section("LXC: PowerDNS zone update"):
dns_ct = ix.get_dns_container()
for ct in map(ix.get_container, relay_names):
if ct.zone.exists():
zone_data = ct.zone.read_text()
out.print(f" Loading {ct.zone} into PowerDNS ...")
dns_ct.set_dns_records(zone_data)
# Run tests in both directions when two relays are available.
test_pairs = [(0, 1), (1, 0)] if len(relay_names) > 1 else [(0,)]
for pair in test_pairs:
first = ix.get_container(relay_names[pair[0]])
label = first.sname
env = None
if len(pair) > 1:
second = ix.get_container(relay_names[pair[1]])
label = f"{first.sname} \u2194 {second.sname}"
env = os.environ.copy()
env["CHATMAIL_DOMAIN2"] = second.domain
with out.section(f"cmdeploy test: {label}"):
ret = _run_cmdeploy("test", first, ix, out, **({"env": env} if env else {}))
if ret:
out.red(f"Tests failed (exit {ret})")
return ret
elapsed = time.time() - t_total
out.section_line(f"lxc-test complete ({elapsed:.1f}s)")
if out.section_timings:
out.print("Section timings:")
for name, secs in out.section_timings:
out.print(f" {name:.<50s} {secs:5.1f}s")
out.print(f" {'total':.<50s} {elapsed:5.1f}s")
out.section_timings.clear()
return 0
# -------------------------------------------------------------------
# lxc-status
# -------------------------------------------------------------------
def lxc_status_cmd_options(parser):
pass
def lxc_status_cmd(args, out):
"""Show status of local LXC chatmail containers."""
ix = Incus()
containers = ix.list_managed()
if not containers:
out.red("No LXC containers found. Run 'cmdeploy lxc-start' first.")
return 1
local_hash = get_git_hash()
# Get storage pool path for display
storage_path = None
data = ix.run_json(["storage", "show", "default"], check=False)
if data:
storage_path = data.get("config", {}).get("source")
if storage_path:
out.green(f"Containers: ({storage_path})")
else:
out.green("Containers:")
dns_ip = None
for c in containers:
_print_container_status(out, c, ix, local_hash)
if c["name"] == ix.get_dns_container().name:
dns_ip = c["ip"]
_print_ssh_status(out, ix)
_print_dns_forwarding_status(out, dns_ip)
return 0
def _print_container_status(out, c, ix, local_hash):
"""Print name/status, domain/IPs, and RAM for one container."""
cname = c["name"]
is_running = c.get("status") == "Running"
ct = ix.get_container(cname)
# First line: name + running/STOPPED + deploy status
if not is_running:
tag = "STOPPED"
elif not isinstance(ct, RelayContainer):
tag = "running"
else:
tag = f"running {_deploy_status(ct, local_hash, ix)}"
out.print(f" {cname:20s} {tag}")
# Second line: domain, IPv4, IPv6
domain = c.get("domain", "")
ip = c.get("ip") or "?"
ipv6 = c.get("ipv6")
out.print(f" {domain:20s} {_format_addrs(ip, ipv6)}")
# Third line: RAM (RSS), config
indent = " " * 21
try:
used, total = ct.rss_mib()
except Exception:
ram_str = "RSS ?"
else:
ram_str = f"RSS {used}/{total} MiB ({used * 100 // total}%)"
if isinstance(ct, RelayContainer):
detail = f"{ram_str}, config: {os.path.relpath(ct.ini)}"
else:
detail = ram_str
out.print(f" {indent}{detail}")
out.print()
def _print_ssh_status(out, ix):
"""Print SSH integration status."""
out.print()
ssh_cfg = ix.ssh_config_path
if ix.check_ssh_include():
out.green("SSH: ~/.ssh/config includes lxconfigs/ssh-config ✓")
else:
out.red("SSH: ~/.ssh/config does NOT include lxconfigs/ssh-config")
out.print(" Add to ~/.ssh/config:")
out.print(f" Include {ssh_cfg}")
def _print_dns_forwarding_status(out, dns_ip):
"""Print host DNS forwarding status for .localchat."""
if not dns_ip:
out.red("DNS: ns-localchat container not found")
return
try:
rv = shell("resolvectl status incusbr0", timeout=5)
dns_ok = dns_ip in rv.stdout and "localchat" in rv.stdout
except (FileNotFoundError, subprocess.TimeoutExpired, OSError):
dns_ok = None
if dns_ok is True:
out.green(f"DNS: .localchat forwarding to {dns_ip}")
elif dns_ok is False:
out.red("DNS: .localchat forwarding NOT configured")
out.print(" Run:")
out.print(f" sudo resolvectl dns incusbr0 {dns_ip}")
out.print(" sudo resolvectl domain incusbr0 ~localchat")
else:
out.print(" DNS: .localchat forwarding status UNKNOWN")
# -------------------------------------------------------------------
# Internal helpers
# -------------------------------------------------------------------
def _format_addrs(ip, ipv6=None):
parts = [f"IPv4 {ip}"]
if ipv6:
parts.append(f"IPv6 {ipv6}")
return ", ".join(parts)
def _deploy_status(ct, local_hash, ix):
"""Return a human-readable deploy status string.
Compares the full deployed version (hash + diff) against
the local state built by :func:`~cmdeploy.util.get_version_string`.
"""
deployed = ct.deployed_version()
if deployed is None:
return "NOT DEPLOYED"
# A container launched from the relay image has the same
# git hash but a different domain - always redeploy.
deployed_domain = ct.deployed_domain()
if deployed_domain and deployed_domain != ct.domain:
return f"DOMAIN-MISMATCH (deployed: {deployed_domain})"
deployed_lines = deployed.splitlines()
deployed_hash = deployed_lines[0] if deployed_lines else ""
short = deployed_hash[:12]
if not local_hash:
return f"UNKNOWN (deployed: {short})"
local_short = local_hash[:12]
if deployed_hash != local_hash:
return f"STALE (deployed: {short}, local: {local_short})"
# Hash matches - check for uncommitted diffs
local_version = get_version_string()
if deployed != local_version:
return f"DIRTY ({local_short}, undeployed changes)"
return f"IN-SYNC ({short})"
def _add_name_args(parser, help_text=None):
"""Add optional positional NAME arguments."""
parser.add_argument(
"names",
nargs="*",
metavar="NAME",
help=help_text or "Relay name(s) to operate on.",
)
def _build_cmdeploy_cmd(subcmd, ct, ix, extra=None):
"""Build the ``cmdeploy <subcmd>`` command string."""
extra_str = " ".join(extra) if extra else ""
return collapse(f"""\
cmdeploy {subcmd}
--config {ct.ini}
--ssh-config {ix.ssh_config_path}
--ssh-host {ct.domain}
{extra_str}
""")
def _run_cmdeploy(subcmd, ct, ix, out, extra=None, **kwargs):
"""Run ``cmdeploy <subcmd>`` with standard --config/--ssh flags.
*ct* is a Container (uses ``ct.ini`` and ``ct.domain``).
Returns the subprocess exit code.
"""
cmd = _build_cmdeploy_cmd(subcmd, ct, ix, extra=extra)
if "cwd" not in kwargs:
kwargs["cwd"] = str(ix.project_root)
out.print(f" [$ {cmd}]")
return shell(cmd, capture_output=False, **kwargs).returncode
# Number of tail lines to print on failure.
_FAIL_CONTEXT_LINES = 40
def _run_cmdeploy_parallel(subcmd, containers, ix, out, extra=None):
"""Run ``cmdeploy <subcmd>`` for every container in parallel.
Output is captured and filtered: only lines containing
``"Start operation"`` are printed (prefixed with the relay
short-name). On failure the last *_FAIL_CONTEXT_LINES*
lines of that process's output are shown.
"""
procs = [] # list of (container, Popen, collected_lines)
cwd = str(ix.project_root)
for ct in containers:
cmd = _build_cmdeploy_cmd(subcmd, ct, ix, extra=extra)
out.print(f" [{ct.sname}] $ {cmd}")
proc = subprocess.Popen(
cmd,
shell=True,
text=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=cwd,
)
procs.append((ct, proc, []))
def _reader(ct, proc, lines):
prefix = f" [{ct.sname}]"
for raw in proc.stdout:
line = raw.rstrip("\n")
lines.append(line)
if "Starting operation" in line:
out.print(f"{prefix} {line}")
threads = []
for ct, proc, lines in procs:
t = threading.Thread(
target=_reader,
args=(ct, proc, lines),
daemon=True,
)
t.start()
threads.append(t)
for t in threads:
t.join()
for _, proc, _ in procs:
proc.wait()
# Check results
first_failure = 0
for ct, proc, lines in procs:
if proc.returncode:
out.red(f"Deploy to {ct.sname} failed " f"(exit {proc.returncode})")
tail = lines[-_FAIL_CONTEXT_LINES:]
for tl in tail:
out.print(f" [{ct.sname}] {tl}")
if not first_failure:
first_failure = proc.returncode
return first_failure

View File

@@ -1,754 +0,0 @@
"""Core Incus operations for local chatmail LXC containers."""
import json
import subprocess
import textwrap
import time
from pathlib import Path
from ..util import shell
LABEL_KEY = "user.localchat-managed"
SSH_KEY_NAME = "id_localchat"
DOMAIN_SUFFIX = ".localchat"
UPSTREAM_IMAGE = "images:debian/12"
BASE_IMAGE_ALIAS = "localchat-base"
BASE_SETUP_NAME = "localchat-base-setup"
DNS_IMAGE_ALIAS = "localchat-ns"
DNS_CONTAINER_NAME = "ns-localchat"
DNS_DOMAIN = "ns.localchat"
BRIDGE_IPV4 = "10.200.200.1/24"
DNS_IP = "10.200.200.2"
RELAY_IPS = {
"test0": "10.200.200.10",
"test1": "10.200.200.11",
"test2": "10.200.200.12",
}
def _extract_ip(net_data, family="inet"):
"""Extract the first global-scope IP of *family* from network state data.
*net_data* is the ``state.network`` dict from ``incus list --format=json``.
*family* is ``"inet"`` for IPv4 or ``"inet6"`` for IPv6.
Returns the address string, or None.
"""
for iface_name, iface in net_data.items():
if iface_name == "lo":
continue
for addr in iface.get("addresses", []):
if addr["family"] == family and addr["scope"] == "global":
return addr["address"]
return None
class Incus:
"""Gateway for all Incus container operations.
Instantiated once per CLI command and passed around so that
all modules share a single entry point for Incus interactions.
"""
def __init__(self):
self.project_root = Path(__file__).resolve().parent.parent.parent.parent.parent
self.lxconfigs_dir = self.project_root / "lxconfigs"
self.lxconfigs_dir.mkdir(exist_ok=True)
self.ssh_key_path = self.lxconfigs_dir / SSH_KEY_NAME
if not self.ssh_key_path.exists():
shell(
f"ssh-keygen -t ed25519 -f {self.ssh_key_path} -N '' -C localchat",
check=True,
)
self.ssh_config_path = self.lxconfigs_dir / "ssh-config"
def write_ssh_config(self):
"""Write ``lxconfigs/ssh-config`` mapping all containers to their IPs.
Each Host block maps the container name, the domain name, and the
short relay name (e.g. ``_test0``) to the container's IP, using the
shared localchat SSH key. Returns the path to the file.
"""
containers = self.list_managed()
key_path = self.ssh_key_path
lines = ["# Auto-generated by cmdeploy lxc-start - do not edit\n"]
for c in containers:
hosts = [c["name"]]
domain = c.get("domain", "")
if domain and domain != c["name"]:
hosts.append(domain)
short = domain.split(".")[0]
if short and short not in hosts:
hosts.append(short)
lines.append(f"\nHost {' '.join(hosts)}\n")
lines.append(f" Hostname {c['ip']}\n")
lines.append(" User root\n")
lines.append(f" IdentityFile {key_path}\n")
lines.append(" IdentitiesOnly yes\n")
lines.append(" StrictHostKeyChecking accept-new\n")
lines.append(" UserKnownHostsFile /dev/null\n")
lines.append(" LogLevel ERROR\n")
path = self.ssh_config_path
path.write_text("".join(lines))
return path
def check_ssh_include(self):
"""Check if the user's ~/.ssh/config already includes our ssh-config."""
user_ssh_config = Path.home() / ".ssh" / "config"
if not user_ssh_config.exists():
return False
lines = filter(None, map(str.strip, user_ssh_config.open("r")))
return f"Include {self.ssh_config_path}" in lines
def run(self, args, check=True, capture=True, input=None):
"""Run an incus command."""
cmd = ["incus"] + list(args)
kwargs = dict(check=check, text=True, input=input)
if capture:
kwargs["capture_output"] = True
else:
kwargs["stdout"] = None
kwargs["stderr"] = None
return subprocess.run(cmd, **kwargs) # noqa: PLW1510
def run_json(self, args, check=True):
"""Run an incus command with ``--format=json``.
Returns the parsed JSON on success.
When *check* is True raises ``subprocess.CalledProcessError``
on non-zero exit; when False returns *None* instead.
"""
result = self.run(
list(args) + ["--format=json"],
check=check,
)
if result.returncode != 0:
return None
return json.loads(result.stdout)
def run_output(self, args, check=True):
"""Run an incus command and return its stripped stdout.
When *check* is False, returns *None* on non-zero exit
instead of raising.
"""
result = self.run(args, check=check)
if result.returncode != 0:
return None
return result.stdout.strip()
def _find_image(self, alias):
"""Return *alias* if an image with that alias exists, else None."""
images = self.run_json(["image", "list"], check=False) or []
for img in images:
for a in img.get("aliases", []):
if a.get("name") == alias:
return alias
return None
def find_dns_image(self):
"""Return the DNS image alias if it exists, else None."""
return self._find_image(DNS_IMAGE_ALIAS)
def delete_images(self):
"""Delete all cached localchat images."""
for alias in (DNS_IMAGE_ALIAS, BASE_IMAGE_ALIAS):
self.run(["image", "delete", alias], check=False)
for name in RELAY_IPS:
self.run(["image", "delete", f"localchat-{name}"], check=False)
def list_managed(self):
"""Return list of dicts with name, ip, ipv6, domain, status, memory_usage."""
containers = []
for ct in self.run_json(["list"]):
config = ct.get("config", {})
if config.get(LABEL_KEY) != "true":
continue
name = ct["name"]
state = ct.get("state", {})
net = state.get("network") or {}
containers.append(
{
"name": name,
"ip": _extract_ip(net, "inet"),
"ipv6": _extract_ip(net, "inet6"),
"domain": config.get(
"user.localchat-domain", f"{name}{DOMAIN_SUFFIX}"
),
"status": ct.get("status", "Unknown"),
"memory_usage": state.get("memory", {}).get("usage", 0),
}
)
return containers
def ensure_base_image(self):
"""Build and cache a base image with openssh and the SSH key.
The image is published as a local incus image with alias
'localchat-base'. Subsequent container launches use this
image instead of the upstream Debian 12, skipping the
slow apt-get install step.
Returns the image alias.
"""
if self._find_image(BASE_IMAGE_ALIAS):
return BASE_IMAGE_ALIAS
print(" Building base image (one-time setup) ...")
self.run(["delete", BASE_SETUP_NAME, "--force"], check=False)
self.run(["image", "delete", BASE_IMAGE_ALIAS], check=False)
self.run(
["launch", UPSTREAM_IMAGE, BASE_SETUP_NAME, "-c", "limits.memory=512MiB"]
)
ct = Container(self, BASE_SETUP_NAME, memory="512MiB")
ct.wait_ready()
key_path = self.ssh_key_path
pub_key = key_path.with_suffix(".pub").read_text().strip()
print(" ── apt-get install (base image) ──")
ct.bash(
f"""\
systemctl disable --now systemd-resolved 2>/dev/null || true
rm -f /etc/resolv.conf
echo 'nameserver 9.9.9.9' > /etc/resolv.conf
while fuser /var/lib/apt/lists/lock >/dev/null 2>&1 ; do
echo "Waiting for other apt-get instance to finish..."
sleep 5
done
apt-get -o DPkg::Lock::Timeout=60 update
DEBIAN_FRONTEND=noninteractive apt-get install -y openssh-server python3
systemctl enable ssh
apt-get clean
mkdir -p /root/.ssh
chmod 700 /root/.ssh
echo '{pub_key}' > /root/.ssh/authorized_keys
chmod 600 /root/.ssh/authorized_keys
""",
capture=False,
)
print(" ── base image install done ──")
self.run(["stop", BASE_SETUP_NAME])
self.run(["publish", BASE_SETUP_NAME, f"--alias={BASE_IMAGE_ALIAS}"])
self.run(["delete", BASE_SETUP_NAME, "--force"])
print(f" Base image '{BASE_IMAGE_ALIAS}' ready.")
return BASE_IMAGE_ALIAS
def ensure_bridge(self):
"""Ensure incusbr0 exists and uses our fixed IPv4 subnet."""
bridge = self.run_json(["network", "show", "incusbr0"], check=False)
if bridge and bridge.get("config", {}).get("ipv4.address") == BRIDGE_IPV4:
return
print(f" Configuring incusbr0 with static subnet {BRIDGE_IPV4} ...")
if not bridge:
self.run(["network", "create", "incusbr0"], check=False)
self.run(
[
"network",
"set",
"incusbr0",
f"ipv4.address={BRIDGE_IPV4}",
"ipv4.nat=true",
"ipv6.address=none",
"dns.mode=none",
]
)
def get_container(self, name):
"""Return a container handle for the given name.
Accepts both short relay names (``test0``) and full Incus
container names (``test0-localchat``). Returns
``DNSContainer`` for the DNS container and
``RelayContainer`` for everything else.
"""
if name == DNS_CONTAINER_NAME:
return DNSContainer(self)
return RelayContainer(self, name.removesuffix("-localchat"))
def get_dns_container(self):
"""Return a DNSContainer handle."""
return DNSContainer(self)
class Container:
"""Lightweight handle for an Incus container.
Carries the container *name* and provides convenience methods
for running commands, managing lifecycle, and extracting state
so callers don't repeat the name everywhere.
"""
def __init__(self, incus, name, domain=None, memory="200MiB", ipv4=None):
self.incus = incus
self.name = name
self.domain = domain or f"{name}{DOMAIN_SUFFIX}"
self.memory = memory
self.ipv4 = ipv4
self.ipv6 = None
def bash(self, script, check=True, capture=True):
"""Returns stdout from executing ``bash -ec <script>`` inside this container.
*script* is dedented and stripped so callers can use triple-quoted strings.
When *check* is False, returns *None* on non-zero exit instead of raising.
When *capture* is False, output streams to the terminal and None is returned.
"""
cmd = ["exec", self.name, "--", "bash", "-ec", textwrap.dedent(script).strip()]
if not capture:
self.incus.run(cmd, check=check, capture=False)
return None
return self.incus.run_output(cmd, check=check)
def run_cmd(self, *args, check=True):
"""Return stdout from running a command directly in the container (no shell).
When *check* is False, returns *None* on non-zero exit instead of raising.
"""
return self.incus.run_output(
["exec", self.name, "--", *args],
check=check,
)
def start(self):
self.incus.run(["start", self.name])
def stop(self, force=False):
cmd = ["stop", self.name]
if force:
cmd.append("--force")
self.incus.run(cmd, check=False)
def launch(self, image=None):
"""Launch from the specified image, or the base image if None."""
self.incus.ensure_bridge()
if image is None:
image = self.incus.ensure_base_image()
cfg = []
cfg += ("-c", f"{LABEL_KEY}=true")
cfg += ("-c", f"user.localchat-domain={self.domain}")
cfg += ("-c", f"limits.memory={self.memory}")
self.incus.run(["init", image, self.name, *cfg])
if self.ipv4:
self.incus.run(
[
"config",
"device",
"override",
self.name,
"eth0",
f"ipv4.address={self.ipv4}",
]
)
self.incus.run(["start", self.name])
return image
def ensure(self):
"""Create/start this container from the cached base image.
On first call, builds the base image (~30s).
Subsequent containers launch in ~2s from the cached image.
Returns ``self`` for chaining.
"""
data = self.incus.run_json(["list", self.name], check=False) or []
existing = [c for c in data if c["name"] == self.name]
image = None
if existing:
status = existing[0]["status"]
if status != "Running":
print(f" Starting stopped {self.name} container ...")
self.start()
else:
print(f" {self.name} already running")
else:
image = self.launch()
self.wait_ready()
if image:
print(f" Ensured {self.name} (launched from {image!r} image)")
return self
def destroy(self):
"""Stop, delete, and clean up config files."""
self.stop(force=True)
self.incus.run(["delete", self.name, "--force"], check=False)
def push_file_content(self, dest_path, content):
"""Write *content* to *dest_path* inside the container.
*content* is dedented and stripped so callers can use
indented triple-quoted strings.
"""
content = textwrap.dedent(content).strip() + "\n"
self.incus.run(
["file", "push", "-", f"{self.name}{dest_path}"],
input=content,
)
self.bash(f"chmod 644 {dest_path}")
def wait_ready(self, timeout=60):
"""Wait until the container is running with an IPv4 address.
Sets ``self.ipv4`` and ``self.ipv6`` (may be *None*),
or raises ``TimeoutError``.
"""
deadline = time.time() + timeout
while time.time() < deadline:
data = self.incus.run_json(
["list", self.name],
check=False,
)
if data and data[0].get("status") == "Running":
net = data[0].get("state", {}).get("network", {})
self.ipv4 = _extract_ip(net, "inet")
self.ipv6 = _extract_ip(net, "inet6")
if self.ipv4:
return
time.sleep(1)
raise TimeoutError(
f"Container {self.name!r} did not become ready within {timeout}s"
)
def rss_mib(self):
"""Return ``(used, total)`` memory from container (or None if unobtainable)."""
output = self.bash("free -m", check=False)
if output:
for line in output.splitlines():
if line.startswith("Mem:"):
parts = line.split()
return int(parts[2]), int(parts[1])
class RelayContainer(Container):
"""Container handle for a chatmail relay.
Accepts the short relay name (e.g. ``test0``) and derives
the Incus container name and mail domain automatically.
"""
def __init__(self, incus, name):
super().__init__(
incus,
f"{name}-localchat",
domain=f"_{name}{DOMAIN_SUFFIX}",
memory="500MiB",
ipv4=RELAY_IPS.get(name),
)
self.sname = name
self.image_alias = f"localchat-{name}"
self.ini = incus.lxconfigs_dir / f"chatmail-{name}.ini"
self.zone = incus.lxconfigs_dir / f"{name}.zone"
def launch(self):
"""Launch from a cached per-relay image if available, else from base."""
cached = self.incus._find_image(self.image_alias)
if cached:
print(f" Using cached image {cached!r}")
else:
print(" No cached image, building from base")
image = super().launch(image=cached)
self.bash("rm -f /etc/chatmail-version")
return image
def destroy(self):
"""Stop, delete, and clean up config files."""
super().destroy()
if self.ini.exists():
self.ini.unlink()
def disable_ipv6(self):
"""Disable IPv6 inside the container via sysctl."""
self.bash("""\
sysctl -w net.ipv6.conf.all.disable_ipv6=1
sysctl -w net.ipv6.conf.default.disable_ipv6=1
mkdir -p /etc/sysctl.d
printf 'net.ipv6.conf.all.disable_ipv6=1\\n
net.ipv6.conf.default.disable_ipv6=1\\n'
> /etc/sysctl.d/99-disable-ipv6.conf
""")
def configure_hosts(self, ip):
"""Set hostname and /etc/hosts inside the container."""
self.bash(f"""
echo '{self.name}' > /etc/hostname
hostname {self.name}
sed -i '/ {self.domain}$/d' /etc/hosts
echo '{ip} {self.name} {self.domain}' >> /etc/hosts
""")
def publish_image(self):
"""Publish this container as a reusable per-relay image.
Returns True if an image was published,
False if a cached image already existed.
"""
if self.incus._find_image(self.image_alias):
return False
self.bash("apt-get clean && rm -rf /var/lib/apt/lists/*")
print(f" Publishing {self.name!r} as {self.image_alias!r} image ...")
self.incus.run(
["publish", self.name, f"--alias={self.image_alias}", "--force"],
capture=False,
)
self.wait_ready()
print(f" Image {self.image_alias!r} ready.")
return True
def deployed_version(self):
"""Read /etc/chatmail-version, or None if absent."""
return self.bash("cat /etc/chatmail-version", check=False)
def deployed_domain(self):
"""Read the domain deployed on the container (postfix myhostname)."""
return self.bash(
"postconf -h myhostname 2>/dev/null",
check=False,
)
def verify_ssh(self, ssh_config):
"""Verify SSH connectivity to this container."""
cmd = f"ssh -F {ssh_config} -o ConnectTimeout=10 root@{self.domain} hostname"
return shell(cmd, timeout=15).returncode == 0
def configure_dns(self, dns_ip):
"""Point this container's resolver at *dns_ip*.
Disables systemd-resolved to free port 53 and writes
a static /etc/resolv.conf. Also configures unbound
(if present) to forward .localchat queries.
"""
self.bash(f"""\
systemctl disable --now systemd-resolved 2>/dev/null || true
rm -f /etc/resolv.conf
echo 'nameserver {dns_ip}' > /etc/resolv.conf
mkdir -p /etc/unbound/unbound.conf.d
printf 'server:\\n domain-insecure: "localchat"\\n\\n
forward-zone:\\n name: "localchat"\\n
forward-addr: {dns_ip}\\n'
> /etc/unbound/unbound.conf.d/localchat-forward.conf
systemctl restart unbound 2>/dev/null || true
""")
def check_dns(self, retries=5, delay=2):
"""Verify that external DNS resolution works inside the container."""
for i in range(retries):
result = self.bash(
"getent hosts pypi.org",
check=False,
)
if result:
return True
if i < retries - 1:
time.sleep(delay)
return False
def write_ini(self, disable_ipv6=False):
"""Generate a chatmail.ini config file in lxconfigs/."""
from chatmaild.config import write_initial_config
overrides = {
"max_user_send_per_minute": 600,
"max_user_send_burst_size": 100,
"mtail_address": "127.0.0.1",
}
if disable_ipv6:
overrides["disable_ipv6"] = "True"
write_initial_config(self.ini, self.domain, overrides)
return self.ini
class DNSContainer(Container):
"""Specialised container handle for the PowerDNS name server."""
def __init__(self, incus):
super().__init__(
incus, DNS_CONTAINER_NAME, domain=DNS_DOMAIN, memory="256MiB", ipv4=DNS_IP
)
def launch(self):
"""Launch from cached DNS image if available, else from base image."""
cached = self.incus._find_image(DNS_IMAGE_ALIAS)
if cached:
print(f" Using cached image {cached!r}")
else:
print(" No cached image, building from base")
return super().launch(image=cached)
def publish_as_dns_image(self):
"""Publish this container as a reusable DNS image."""
if self.incus._find_image(DNS_IMAGE_ALIAS):
return
self.bash("apt-get clean && rm -rf /var/lib/apt/lists/*")
print(f" Publishing {self.name!r} as {DNS_IMAGE_ALIAS!r} image ...")
self.incus.run(
["publish", self.name, f"--alias={DNS_IMAGE_ALIAS}", "--force"],
capture=False,
)
self.wait_ready()
print(f" DNS image {DNS_IMAGE_ALIAS!r} ready.")
def pdnsutil(self, *args, check=True):
"""Run ``pdnsutil <args>`` inside the DNS container."""
return self.run_cmd("pdnsutil", *args, check=check)
def replace_rrset(self, zone, name, rtype, ttl, rdata):
"""Shortcut for ``pdnsutil replace-rrset``."""
self.pdnsutil("replace-rrset", zone, name, rtype, ttl, rdata)
def restart_services(self):
"""Restart pdns and pdns-recursor."""
self.bash("""\
systemctl restart pdns
systemctl restart pdns-recursor || true
""")
def ensure(self):
"""Create the DNS container with PowerDNS if needed.
Calls ``super().ensure()`` to create/start the container
and set up SSH, then installs PowerDNS and configures
the Incus bridge to use this container as DNS.
"""
super().ensure()
self._install_powerdns()
self.incus.run(
["network", "set", "incusbr0", "dns.mode=none"],
check=False,
)
self.incus.run(
["network", "set", "incusbr0", f"raw.dnsmasq=dhcp-option=6,{self.ipv4}"],
check=False,
)
def _install_powerdns(self):
"""Install and configure PowerDNS if not already present."""
if self.run_cmd("which", "pdns_server", check=False) is not None:
return
self.bash("""\
systemctl disable --now systemd-resolved 2>/dev/null || true
rm -f /etc/resolv.conf
echo 'nameserver 9.9.9.9' > /etc/resolv.conf
apt-get -o DPkg::Lock::Timeout=60 update
DEBIAN_FRONTEND=noninteractive apt-get install -y \
pdns-server pdns-backend-sqlite3 sqlite3 pdns-recursor dnsutils
systemctl stop pdns pdns-recursor || true
mkdir -p /var/lib/powerdns
sqlite3 /var/lib/powerdns/pdns.sqlite3 \
</usr/share/doc/pdns-backend-sqlite3/schema.sqlite3.sql
chown -R pdns:pdns /var/lib/powerdns
""")
self.push_file_content(
"/etc/powerdns/pdns.conf",
"""\
launch=gsqlite3
gsqlite3-database=/var/lib/powerdns/pdns.sqlite3
local-address=127.0.0.1
local-port=5353
""",
)
self.push_file_content(
"/etc/powerdns/recursor.conf",
"""\
local-address=0.0.0.0
local-port=53
forward-zones=localchat=127.0.0.1:5353
forward-zones-recurse=.=9.9.9.9;149.112.112.112
allow-from=0.0.0.0/0
dont-query=
dnssec=off
""",
)
self.bash("""\
systemctl start pdns
systemctl start pdns-recursor
echo 'nameserver 127.0.0.1' > /etc/resolv.conf
""")
def reset_dns_records(self, dns_ip, domains):
"""Create DNS zones with initial A records via pdnsutil.
Only sets SOA, NS, and A records as the minimal set
needed for SSH connectivity. Full records (MX, TXT, SRV,
CNAME, DKIM) are added later by ``cmdeploy dns``.
Args:
dns_ip: IP of the DNS container
domains: list of dicts with 'name', 'domain', 'ip'
"""
for d in domains:
domain = d["domain"]
ip = d["ip"]
print(f" {domain} -> {ip}")
# Delete and recreate zone fresh (removes stale records)
self.pdnsutil("delete-zone", domain, check=False)
self.pdnsutil("create-zone", domain, f"ns.{domain}")
serial = str(int(time.time()))
soa = f"ns.{domain} hostmaster.{domain} {serial} 3600 900 604800 300"
self.replace_rrset(domain, ".", "SOA", "3600", soa)
self.replace_rrset(domain, ".", "NS", "3600", f"ns.{domain}.")
self.replace_rrset(domain, ".", "A", "3600", ip)
self.replace_rrset(domain, "ns", "A", "3600", dns_ip)
# AAAA (domain -> container IPv6, if available)
ipv6 = d.get("ipv6")
if ipv6:
self.replace_rrset(domain, ".", "AAAA", "3600", ipv6)
print(f" zone reset: SOA, NS, A, AAAA ({ip}, {ipv6})")
else:
# Remove any stale AAAA record
self.pdnsutil("delete-rrset", domain, ".", "AAAA", check=False)
print(f" zone reset: SOA, NS, A ({ip}, IPv4-only)")
self.restart_services()
def set_dns_records(self, text):
"""Add or overwrite DNS records from standard BIND format.
Uses ``cmdeploy.dns.parse_zone_records`` to parse.
Zones are created automatically from the record names.
"""
from ..dns import parse_zone_records
zones_seen = set()
for name, ttl, rtype, rdata in parse_zone_records(text):
# Derive zone from name: find top-level .localchat domain
name_parts = name.split(".")
zone = name # fallback
for i in range(len(name_parts) - 1):
if name_parts[i + 1 :] == ["localchat"]:
zone = ".".join(name_parts[i:])
break
# Create zone if first time seeing it
if zone not in zones_seen:
self.pdnsutil(
"create-zone",
zone,
f"ns.{zone}",
check=False,
)
zones_seen.add(zone)
# Figure out the record name relative to zone
if name == zone:
relative = "."
elif name.endswith(f".{zone}"):
relative = name[: -(len(zone) + 1)]
else:
relative = name
self.replace_rrset(zone, relative, rtype, ttl, rdata)
if zones_seen:
self.restart_services()

View File

@@ -1,80 +0,0 @@
counter delivered_mail
/saved mail to INBOX$/ {
delivered_mail++
}
counter quota_exceeded
/Quota exceeded \(mailbox for user is full\)$/ {
quota_exceeded++
}
# Essentially the number of outgoing messages.
counter dkim_signed
/DKIM-Signature field added/ {
dkim_signed++
}
counter created_accounts
counter created_ci_accounts
counter created_nonci_accounts
/: Created address: (?P<addr>.*)$/ {
created_accounts++
$addr =~ /ci-/ {
created_ci_accounts++
} else {
created_nonci_accounts++
}
}
counter postfix_timeouts
/timeout after DATA/ {
postfix_timeouts++
}
counter postfix_noqueue
/postfix\/.*NOQUEUE/ {
postfix_noqueue++
}
counter warning_count
/warning/ {
warning_count++
}
counter filtered_outgoing_mail_count
counter outgoing_encrypted_mail_count
/Outgoing: Filtering encrypted mail\./ {
outgoing_encrypted_mail_count++
filtered_outgoing_mail_count++
}
counter outgoing_unencrypted_mail_count
/Outgoing: Filtering unencrypted mail\./ {
outgoing_unencrypted_mail_count++
filtered_outgoing_mail_count++
}
counter filtered_incoming_mail_count
counter incoming_encrypted_mail_count
/Incoming: Filtering encrypted mail\./ {
incoming_encrypted_mail_count++
filtered_incoming_mail_count++
}
counter incoming_unencrypted_mail_count
/Incoming: Filtering unencrypted mail\./ {
incoming_unencrypted_mail_count++
filtered_incoming_mail_count++
}
counter rejected_unencrypted_mail_count
/Rejected unencrypted mail/ {
rejected_unencrypted_mail_count++
}

View File

@@ -1,68 +0,0 @@
from pyinfra import facts, host
from pyinfra.operations import apt, files, server, systemd
from cmdeploy.basedeploy import (
Deployer,
get_resource,
)
class MtailDeployer(Deployer):
def __init__(self, mtail_address):
self.mtail_address = mtail_address
def install(self):
# Uninstall mtail package to install a static binary.
apt.packages(name="Uninstall mtail", packages=["mtail"], present=False)
(url, sha256sum) = {
"x86_64": (
"https://github.com/google/mtail/releases/download/v3.0.8/mtail_3.0.8_linux_amd64.tar.gz",
"123c2ee5f48c3eff12ebccee38befd2233d715da736000ccde49e3d5607724e4",
),
"aarch64": (
"https://github.com/google/mtail/releases/download/v3.0.8/mtail_3.0.8_linux_arm64.tar.gz",
"aa04811c0929b6754408676de520e050c45dddeb3401881888a092c9aea89cae",
),
}[host.get_fact(facts.server.Arch)]
server.shell(
name="Download mtail",
commands=[
f"(echo '{sha256sum} /usr/local/bin/mtail' | sha256sum -c) || (curl -L {url} | gunzip | tar -x -f - mtail -O >/usr/local/bin/mtail.new && mv /usr/local/bin/mtail.new /usr/local/bin/mtail)",
"chmod 755 /usr/local/bin/mtail",
],
)
def configure(self):
# Using our own systemd unit instead of `/usr/lib/systemd/system/mtail.service`.
# This allows to read from journalctl instead of log files.
files.template(
src=get_resource("mtail/mtail.service.j2"),
dest="/etc/systemd/system/mtail.service",
user="root",
group="root",
mode="644",
address=self.mtail_address or "127.0.0.1",
port=3903,
)
mtail_conf = files.put(
name="Mtail configuration",
src=get_resource("mtail/delivered_mail.mtail"),
dest="/etc/mtail/delivered_mail.mtail",
user="root",
group="root",
mode="644",
)
self.need_restart = mtail_conf.changed
def activate(self):
systemd.service(
name="Start and enable mtail",
service="mtail.service",
running=bool(self.mtail_address),
enabled=bool(self.mtail_address),
restarted=self.need_restart,
)
self.need_restart = False

View File

@@ -1,10 +0,0 @@
[Unit]
Description=mtail
[Service]
Type=simple
ExecStart=/bin/sh -c "journalctl -f -o short-iso -n 0 | /usr/local/bin/mtail --address={{ address }} --port={{ port }} --progs /etc/mtail --logtostderr --logs -"
Restart=on-failure
[Install]
WantedBy=multi-user.target

View File

@@ -1,51 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<clientConfig version="1.1">
<emailProvider id="{{ config.mail_domain }}">
<domain>{{ config.mail_domain }}</domain>
<displayName>{{ config.mail_domain }} chatmail</displayName>
<displayShortName>{{ config.mail_domain }}</displayShortName>
<incomingServer type="imap">
<hostname>{{ config.mail_domain }}</hostname>
<port>993</port>
<socketType>SSL</socketType>
<authentication>password-cleartext</authentication>
<username>%EMAILADDRESS%</username>
</incomingServer>
<incomingServer type="imap">
<hostname>{{ config.mail_domain }}</hostname>
<port>143</port>
<socketType>STARTTLS</socketType>
<authentication>password-cleartext</authentication>
<username>%EMAILADDRESS%</username>
</incomingServer>
<incomingServer type="imap">
<hostname>{{ config.mail_domain }}</hostname>
<port>443</port>
<socketType>SSL</socketType>
<authentication>password-cleartext</authentication>
<username>%EMAILADDRESS%</username>
</incomingServer>
<outgoingServer type="smtp">
<hostname>{{ config.mail_domain }}</hostname>
<port>465</port>
<socketType>SSL</socketType>
<authentication>password-cleartext</authentication>
<username>%EMAILADDRESS%</username>
</outgoingServer>
<outgoingServer type="smtp">
<hostname>{{ config.mail_domain }}</hostname>
<port>587</port>
<socketType>STARTTLS</socketType>
<authentication>password-cleartext</authentication>
<username>%EMAILADDRESS%</username>
</outgoingServer>
<outgoingServer type="smtp">
<hostname>{{ config.mail_domain }}</hostname>
<port>443</port>
<socketType>SSL</socketType>
<authentication>password-cleartext</authentication>
<username>%EMAILADDRESS%</username>
</outgoingServer>
</emailProvider>
</clientConfig>

View File

@@ -1,117 +0,0 @@
from chatmaild.config import Config
from pyinfra.operations import apt, files, systemd
from cmdeploy.basedeploy import (
Deployer,
get_resource,
)
class NginxDeployer(Deployer):
def __init__(self, config):
self.config = config
def install(self):
#
# If we allow nginx to start up on install, it will grab port
# 80, which then will block acmetool from listening on the port.
# That in turn prevents getting certificates, which then causes
# an error when we try to start nginx on the custom config
# that leaves port 80 open but also requires certificates to
# be present. To avoid getting into that interlocking mess,
# we use policy-rc.d to prevent nginx from starting up when it
# is installed.
#
# This approach allows us to avoid performing any explicit
# systemd operations during the install stage (as opposed to
# allowing it to start and then forcing it to stop), which allows
# the install stage to run in non-systemd environments like a
# container image build.
#
# For documentation about policy-rc.d, see:
# https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt
#
files.put(
src=get_resource("policy-rc.d"),
dest="/usr/sbin/policy-rc.d",
user="root",
group="root",
mode="755",
)
apt.packages(
name="Install nginx",
packages=["nginx", "libnginx-mod-stream"],
)
files.file("/usr/sbin/policy-rc.d", present=False)
def configure(self):
self.need_restart = _configure_nginx(self.config)
def activate(self):
systemd.service(
name="Start and enable nginx",
service="nginx.service",
running=True,
enabled=True,
restarted=self.need_restart,
)
self.need_restart = False
def _configure_nginx(config: Config, debug: bool = False) -> bool:
"""Configures nginx HTTP server."""
need_restart = False
main_config = files.template(
src=get_resource("nginx/nginx.conf.j2"),
dest="/etc/nginx/nginx.conf",
user="root",
group="root",
mode="644",
config=config,
disable_ipv6=config.disable_ipv6,
)
need_restart |= main_config.changed
autoconfig = files.template(
src=get_resource("nginx/autoconfig.xml.j2"),
dest="/var/www/html/.well-known/autoconfig/mail/config-v1.1.xml",
user="root",
group="root",
mode="644",
config=config,
)
need_restart |= autoconfig.changed
mta_sts_config = files.template(
src=get_resource("nginx/mta-sts.txt.j2"),
dest="/var/www/html/.well-known/mta-sts.txt",
user="root",
group="root",
mode="644",
config=config,
)
need_restart |= mta_sts_config.changed
# install CGI newemail script
#
cgi_dir = "/usr/lib/cgi-bin"
files.directory(
name=f"Ensure {cgi_dir} exists",
path=cgi_dir,
user="root",
group="root",
)
files.put(
name="Upload cgi newemail.py script",
src=get_resource("newemail.py", pkg="chatmaild").open("rb"),
dest=f"{cgi_dir}/newemail.py",
user="root",
group="root",
mode="755",
)
return need_restart

View File

@@ -1,4 +0,0 @@
version: STSv1
mode: enforce
mx: {{ config.mail_domain }}
max_age: 2419200

View File

@@ -1,165 +0,0 @@
load_module modules/ngx_stream_module.so;
user www-data;
worker_processes auto;
# Increase the number of connections
# that a worker process can open
# to avoid errors such as
# accept4() failed (24: Too many open files)
# and
# socket() failed (24: Too many open files) while connecting to upstream
# in the logs.
# <https://nginx.org/en/docs/ngx_core_module.html#worker_rlimit_nofile>
worker_rlimit_nofile 2048;
pid /run/nginx.pid;
error_log syslog:server=unix:/dev/log,facility=local3;
events {
# Increase to avoid errors such as
# 768 worker_connections are not enough while connecting to upstream
# in the logs.
# <https://nginx.org/en/docs/ngx_core_module.html#worker_connections>
worker_connections 2048;
# multi_accept on;
}
stream {
map $ssl_preread_alpn_protocols $proxy {
default 127.0.0.1:8443;
~\bsmtp\b 127.0.0.1:465;
~\bimap\b 127.0.0.1:993;
}
server {
listen 443;
{% if not disable_ipv6 %}
listen [::]:443;
{% endif %}
proxy_pass $proxy;
ssl_preread on;
}
}
http {
{% if config.tls_cert_mode == "self" %}
limit_req_zone $binary_remote_addr zone=newaccount:10m rate=2r/s;
{% endif %}
sendfile on;
tcp_nopush on;
# Do not emit nginx version on error pages.
server_tokens off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers on;
ssl_certificate {{ config.tls_cert_path }};
ssl_certificate_key {{ config.tls_key_path }};
gzip on;
server {
listen 127.0.0.1:8443 ssl default_server;
root /var/www/html;
index index.html index.htm;
server_name {{ config.mail_domain }} www.{{ config.mail_domain }} mta-sts.{{ config.mail_domain }};
access_log syslog:server=unix:/dev/log,facility=local7;
location / {
# First attempt to serve request as file, then
# as directory, then fall back to displaying a 404.
try_files $uri $uri/ =404;
}
location /new {
{% if config.tls_cert_mode != "self" %}
if ($request_method = GET) {
# Redirect to Delta Chat,
# which will in turn do a POST request.
return 301 dcaccount:https://{{ config.mail_domain }}/new;
}
{% else %}
limit_req zone=newaccount burst=5 nodelay;
{% endif %}
fastcgi_pass unix:/run/fcgiwrap.socket;
include /etc/nginx/fastcgi_params;
fastcgi_param SCRIPT_FILENAME /usr/lib/cgi-bin/newemail.py;
}
# Old URL for compatibility with e.g. printed QR codes.
#
# Copy-paste instead of redirect to /new
# because Delta Chat core does not follow redirects.
#
# Redirects are only for browsers.
location /cgi-bin/newemail.py {
{% if config.tls_cert_mode != "self" %}
if ($request_method = GET) {
return 301 dcaccount:https://{{ config.mail_domain }}/new;
}
{% endif %}
fastcgi_pass unix:/run/fcgiwrap.socket;
include /etc/nginx/fastcgi_params;
fastcgi_param SCRIPT_FILENAME /usr/lib/cgi-bin/newemail.py;
}
# Proxy to iroh-relay service.
location /relay {
proxy_pass http://127.0.0.1:3340;
proxy_http_version 1.1;
# Upgrade header is normally set to "iroh derp http" or "websocket".
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
location /relay/probe {
proxy_pass http://127.0.0.1:3340;
proxy_http_version 1.1;
}
location /generate_204 {
proxy_pass http://127.0.0.1:3340;
proxy_http_version 1.1;
}
}
# Redirect www. to non-www
server {
listen 127.0.0.1:8443 ssl;
server_name www.{{ config.mail_domain }};
return 301 $scheme://{{ config.mail_domain }}$request_uri;
access_log syslog:server=unix:/dev/log,facility=local7;
}
server {
listen 80;
{% if not disable_ipv6 %}
listen [::]:80;
{% endif %}
{% if config.tls_cert_mode == "acme" %}
location /.well-known/acme-challenge/ {
proxy_pass http://acmetool;
}
{% endif %}
return 301 https://$host$request_uri;
}
{% if config.tls_cert_mode == "acme" %}
upstream acmetool {
server 127.0.0.1:402;
}
{% endif %}
}

View File

@@ -1 +0,0 @@
{{ config.opendkim_selector }}._domainkey.{{ config.domain_name }} {{ config.domain_name }}:{{ config.opendkim_selector }}:/etc/dkimkeys/{{ config.opendkim_selector }}.private

View File

@@ -1 +0,0 @@
*@{{ config.domain_name }} {{ config.opendkim_selector }}._domainkey.{{ config.domain_name }}

View File

@@ -1,124 +0,0 @@
"""
Installs OpenDKIM
"""
from pyinfra import host
from pyinfra.facts.files import File
from pyinfra.operations import apt, files, server, systemd
from cmdeploy.basedeploy import Deployer, get_resource
class OpendkimDeployer(Deployer):
required_users = [("opendkim", None, ["opendkim"])]
def __init__(self, mail_domain):
self.mail_domain = mail_domain
def install(self):
apt.packages(
name="apt install opendkim opendkim-tools",
packages=["opendkim", "opendkim-tools"],
)
def configure(self):
domain = self.mail_domain
dkim_selector = "opendkim"
"""Configures OpenDKIM"""
need_restart = False
main_config = files.template(
src=get_resource("opendkim/opendkim.conf"),
dest="/etc/opendkim.conf",
user="root",
group="root",
mode="644",
config={"domain_name": domain, "opendkim_selector": dkim_selector},
)
need_restart |= main_config.changed
screen_script = files.file(
path="/etc/opendkim/screen.lua",
present=False,
)
need_restart |= screen_script.changed
final_script = files.file(
path="/etc/opendkim/final.lua",
present=False,
)
need_restart |= final_script.changed
files.directory(
name="Add opendkim directory to /etc",
path="/etc/opendkim",
user="opendkim",
group="opendkim",
mode="750",
present=True,
)
keytable = files.template(
src=get_resource("opendkim/KeyTable"),
dest="/etc/dkimkeys/KeyTable",
user="opendkim",
group="opendkim",
mode="644",
config={"domain_name": domain, "opendkim_selector": dkim_selector},
)
need_restart |= keytable.changed
signing_table = files.template(
src=get_resource("opendkim/SigningTable"),
dest="/etc/dkimkeys/SigningTable",
user="opendkim",
group="opendkim",
mode="644",
config={"domain_name": domain, "opendkim_selector": dkim_selector},
)
need_restart |= signing_table.changed
files.directory(
name="Add opendkim socket directory to /var/spool/postfix",
path="/var/spool/postfix/opendkim",
user="opendkim",
group="opendkim",
mode="750",
present=True,
)
if not host.get_fact(File, f"/etc/dkimkeys/{dkim_selector}.private"):
server.shell(
name="Generate OpenDKIM domain keys",
commands=[
f"/usr/sbin/opendkim-genkey -D /etc/dkimkeys -d {domain} -s {dkim_selector}"
],
_use_su_login=True,
_su_user="opendkim",
)
service_file = files.put(
name="Configure opendkim to restart once a day",
src=get_resource("opendkim/systemd.conf"),
dest="/etc/systemd/system/opendkim.service.d/10-prevent-memory-leak.conf",
)
need_restart |= service_file.changed
files.file(
name="chown opendkim: /etc/dkimkeys/opendkim.private",
path="/etc/dkimkeys/opendkim.private",
user="opendkim",
group="opendkim",
)
self.need_restart = need_restart
def activate(self):
systemd.service(
name="Start and enable OpenDKIM",
service="opendkim.service",
running=True,
enabled=True,
daemon_reload=self.need_restart,
restarted=self.need_restart,
)
self.need_restart = False

View File

@@ -1,67 +0,0 @@
# OpenDKIM configuration.
Syslog yes
SyslogSuccess yes
#LogWhy no
# Common signing and verification parameters. In Debian, the "From" header is
# oversigned, because it is often the identity key used by reputation systems
# and thus somewhat security sensitive.
Canonicalization relaxed/simple
OversignHeaders From
On-BadSignature reject
On-KeyNotFound reject
On-NoSignature reject
DNSTimeout 60
# Signing domain, selector, and key (required). For example, perform signing
# for domain "example.com" with selector "2020" (2020._domainkey.example.com),
# using the private key stored in /etc/dkimkeys/example.private. More granular
# setup options can be found in /usr/share/doc/opendkim/README.opendkim.
Domain {{ config.domain_name }}
Selector {{ config.opendkim_selector }}
KeyFile /etc/dkimkeys/{{ config.opendkim_selector }}.private
KeyTable /etc/dkimkeys/KeyTable
SigningTable refile:/etc/dkimkeys/SigningTable
# Sign Autocrypt header in addition to the default specified in RFC 6376.
#
# Default list is here:
# <https://github.com/trusteddomainproject/OpenDKIM/blob/5c539587561785a66c1f67f720f2fb741f320785/libopendkim/dkim.c#L221-L245>
SignHeaders *,+autocrypt,+content-type
# Prevent addition of second Content-Type header
# and other important headers that should not be added
# after signing the message.
# See
# <https://www.zone.eu/blog/2024/05/17/bimi-and-dmarc-cant-save-you/>
# and RFC 6376 (page 41) for reference.
#
# We don't use "l=" body length so the problem described in RFC 6376
# is not applicable, but adding e.g. a second "From" header
# or second "Autocrypt" header is better prevented in any case.
#
# Default is empty.
OversignHeaders from,reply-to,subject,date,to,cc,resent-date,resent-from,resent-sender,resent-to,resent-cc,in-reply-to,references,list-id,list-help,list-unsubscribe,list-subscribe,list-post,list-owner,list-archive,autocrypt
# In Debian, opendkim runs as user "opendkim". A umask of 007 is required when
# using a local socket with MTAs that access the socket as a non-privileged
# user (for example, Postfix). You may need to add user "postfix" to group
# "opendkim" in that case.
UserID opendkim
UMask 007
Socket local:/var/spool/postfix/opendkim/opendkim.sock
PidFile /run/opendkim/opendkim.pid
# The trust anchor enables DNSSEC. In Debian, the trust anchor file is provided
# by the package dns-root-data.
TrustAnchorFile /usr/share/dns/root.key
# Sign messages when `-o milter_macro_daemon_name=ORIGINATING` is set.
MTA ORIGINATING
# No hosts are treated as internal, ORIGINATING daemon name should be set explicitly.
InternalHosts -

View File

@@ -1,3 +0,0 @@
[Service]
Restart=always
RuntimeMaxSec=1d

View File

@@ -1,3 +0,0 @@
#!/bin/sh
echo "All runlevel operations denied by policy" >&2
exit 101

View File

@@ -1,119 +0,0 @@
from pyinfra.operations import apt, files, server, systemd
from cmdeploy.basedeploy import Deployer, get_resource
class PostfixDeployer(Deployer):
required_users = [("postfix", None, ["opendkim"])]
daemon_reload = False
def __init__(self, config, disable_mail):
self.config = config
self.disable_mail = disable_mail
def install(self):
apt.packages(
name="Install Postfix",
packages="postfix",
)
def configure(self):
config = self.config
need_restart = False
main_config = files.template(
src=get_resource("postfix/main.cf.j2"),
dest="/etc/postfix/main.cf",
user="root",
group="root",
mode="644",
config=config,
disable_ipv6=config.disable_ipv6,
)
need_restart |= main_config.changed
master_config = files.template(
src=get_resource("postfix/master.cf.j2"),
dest="/etc/postfix/master.cf",
user="root",
group="root",
mode="644",
debug=False,
config=config,
)
need_restart |= master_config.changed
header_cleanup = files.put(
src=get_resource("postfix/submission_header_cleanup"),
dest="/etc/postfix/submission_header_cleanup",
user="root",
group="root",
mode="644",
)
need_restart |= header_cleanup.changed
lmtp_header_cleanup = files.put(
src=get_resource("postfix/lmtp_header_cleanup"),
dest="/etc/postfix/lmtp_header_cleanup",
user="root",
group="root",
mode="644",
)
need_restart |= lmtp_header_cleanup.changed
tls_policy_map = files.put(
name="Upload SMTP TLS Policy that accepts self-signed certificates for IP-only hosts",
src=get_resource("postfix/smtp_tls_policy_map"),
dest="/etc/postfix/smtp_tls_policy_map",
user="root",
group="root",
mode="644",
)
need_restart |= tls_policy_map.changed
if tls_policy_map.changed:
server.shell(
commands=["postmap /etc/postfix/smtp_tls_policy_map"],
)
# Login map that 1:1 maps email address to login.
login_map = files.put(
src=get_resource("postfix/login_map"),
dest="/etc/postfix/login_map",
user="root",
group="root",
mode="644",
)
need_restart |= login_map.changed
restart_conf = files.put(
name="postfix: restart automatically on failure",
src=get_resource("service/10_restart.conf"),
dest="/etc/systemd/system/postfix@.service.d/10_restart.conf",
)
self.daemon_reload = restart_conf.changed
# Validate postfix configuration before restart
if need_restart:
server.shell(
name="Validate postfix configuration",
# Extract stderr and quit with error if non-zero
commands=[
"""bash -c 'w=$(postconf 2>&1 >/dev/null); [[ -z "$w" ]] || { echo "$w"; false; }'"""
],
)
self.need_restart = need_restart
def activate(self):
restart = False if self.disable_mail else self.need_restart
systemd.service(
name="disable postfix for now"
if self.disable_mail
else "Start and enable Postfix",
service="postfix.service",
running=False if self.disable_mail else True,
enabled=False if self.disable_mail else True,
restarted=restart,
daemon_reload=self.daemon_reload,
)
self.need_restart = False

View File

@@ -1,3 +0,0 @@
/^DKIM-Signature:/ IGNORE
/^Authentication-Results:/ IGNORE
/^Received:/ IGNORE

View File

@@ -1 +0,0 @@
/^(.*)$/ ${1}

View File

@@ -1,104 +0,0 @@
myorigin = {{ config.mail_domain }}
smtpd_banner = $myhostname ESMTP $mail_name (Debian/GNU)
biff = no
# appending .domain is the MUA's job.
append_dot_mydomain = no
# Uncomment the next line to generate "delayed mail" warnings
#delay_warning_time = 4h
readme_directory = no
# See http://www.postfix.org/COMPATIBILITY_README.html
compatibility_level = 3.6
# TLS parameters
smtpd_tls_cert_file={{ config.tls_cert_path }}
smtpd_tls_key_file={{ config.tls_key_path }}
smtpd_tls_security_level=may
smtp_tls_CApath=/etc/ssl/certs
smtp_tls_security_level={{ "verify" if config.tls_cert_mode == "acme" else "encrypt" }}
# Send SNI extension when connecting to other servers.
# <https://www.postfix.org/postconf.5.html#smtp_tls_servername>
smtp_tls_servername = hostname
smtp_tls_session_cache_database = btree:${data_directory}/smtp_scache
smtp_tls_policy_maps = regexp:/etc/postfix/smtp_tls_policy_map
smtp_tls_protocols = >=TLSv1.2
smtp_tls_mandatory_protocols = >=TLSv1.2
# Disable anonymous cipher suites
# and known insecure algorithms.
#
# Disabling anonymous ciphers
# does not generally improve security
# because clients that want to verify certificate
# will not select them anyway,
# but makes cipher suite list shorter and security scanners happy.
# See <https://www.postfix.org/TLS_README.html> for discussion.
#
# Only ancient insecure ciphers should be disabled here
# as MTA clients that do not support more secure cipher
# likely do not support MTA-STS either and will
# otherwise fall back to using plaintext connection.
smtpd_tls_exclude_ciphers = aNULL, RC4, MD5, DES
# Override client's preference order.
# <https://www.postfix.org/postconf.5.html#tls_preempt_cipherlist>
#
# This is mostly to ensure cipher suites with forward secrecy
# are preferred over non cipher suites without forward secrecy.
# See <https://www.postfix.org/FORWARD_SECRECY_README.html#server_fs>.
tls_preempt_cipherlist = yes
smtpd_relay_restrictions = permit_mynetworks permit_sasl_authenticated defer_unauth_destination
myhostname = {{ config.mail_domain }}
alias_maps = hash:/etc/aliases
alias_database = hash:/etc/aliases
# Postfix does not deliver mail for any domain by itself.
# Primary domain is listed in `virtual_mailbox_domains` instead
# and handed over to Dovecot.
mydestination =
relayhost =
{% if disable_ipv6 %}
mynetworks = 127.0.0.0/8
{% else %}
mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128
{% endif %}
{% if config.addr_v4 %}
smtp_bind_address = {{ config.addr_v4 }}
{% endif %}
{% if config.addr_v6 %}
smtp_bind_address6 = {{ config.addr_v6 }}
{% endif %}
{% if config.addr_v4 or config.addr_v6 %}
smtp_bind_address_enforce = yes
{% endif %}
mailbox_size_limit = 0
message_size_limit = {{config.max_message_size}}
recipient_delimiter = +
inet_interfaces = all
{% if disable_ipv6 %}
inet_protocols = ipv4
{% else %}
inet_protocols = all
{% endif %}
virtual_transport = lmtp:unix:private/dovecot-lmtp
virtual_mailbox_domains = {{ config.mail_domain }}
lmtp_header_checks = regexp:/etc/postfix/lmtp_header_cleanup
mua_client_restrictions = permit_sasl_authenticated, reject
mua_sender_restrictions = reject_sender_login_mismatch, permit_sasl_authenticated, reject
mua_helo_restrictions = permit_mynetworks, reject_invalid_helo_hostname, reject_non_fqdn_helo_hostname, permit
# 1:1 map MAIL FROM to SASL login name.
smtpd_sender_login_maps = regexp:/etc/postfix/login_map
# Do not lookup SMTP client hostnames to reduce delays
# and avoid unnecessary DNS requests.
smtpd_peername_lookup = no

Some files were not shown because too many files have changed in this diff Show More