mirror of
https://github.com/chatmail/relay.git
synced 2026-05-10 16:04:37 +00:00
Compare commits
1 Commits
docker-reb
...
temp-bloc7
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
494e42bf4d |
@@ -1,7 +0,0 @@
|
||||
.git
|
||||
data/
|
||||
venv/
|
||||
__pycache__
|
||||
*.pyc
|
||||
*.orig
|
||||
.pytest_cache
|
||||
3
.github/workflows/ci.yaml
vendored
3
.github/workflows/ci.yaml
vendored
@@ -14,8 +14,7 @@ jobs:
|
||||
# Otherwise `test_deployed_state` will be unhappy.
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: download filtermail
|
||||
run: curl -L https://github.com/chatmail/filtermail/releases/download/v0.3.0/filtermail-x86_64 -o /usr/local/bin/filtermail && chmod +x /usr/local/bin/filtermail
|
||||
|
||||
- name: run chatmaild tests
|
||||
working-directory: chatmaild
|
||||
run: pipx run tox
|
||||
|
||||
10
.github/workflows/test-and-deploy-ipv4only.yaml
vendored
10
.github/workflows/test-and-deploy-ipv4only.yaml
vendored
@@ -19,8 +19,13 @@ jobs:
|
||||
environment:
|
||||
name: staging-ipv4.testrun.org
|
||||
url: https://staging-ipv4.testrun.org/
|
||||
concurrency: staging-ipv4.testrun.org
|
||||
concurrency:
|
||||
group: ci-ipv4-${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: ${{ !contains(github.ref, '$GITHUB_REF') }}
|
||||
steps:
|
||||
- uses: jsok/serialize-workflow-action@515cd04c46d7ea7435c4a22a3b4419127afdefe9
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: prepare SSH
|
||||
@@ -74,7 +79,6 @@ jobs:
|
||||
- run: |
|
||||
cmdeploy init staging-ipv4.testrun.org
|
||||
sed -i 's#disable_ipv6 = False#disable_ipv6 = True#' chatmail.ini
|
||||
sed -i 's/#\s*mtail_address/mtail_address/' chatmail.ini
|
||||
|
||||
- run: cmdeploy run --verbose --skip-dns-check
|
||||
|
||||
@@ -89,7 +93,7 @@ jobs:
|
||||
ssh root@ns.testrun.org systemctl reload nsd
|
||||
|
||||
- name: cmdeploy test
|
||||
run: CHATMAIL_DOMAIN2=ci-chatmail.testrun.org cmdeploy test --slow
|
||||
run: CHATMAIL_DOMAIN2=nine.testrun.org cmdeploy test --slow
|
||||
|
||||
- name: cmdeploy dns
|
||||
run: cmdeploy dns -v
|
||||
|
||||
13
.github/workflows/test-and-deploy.yaml
vendored
13
.github/workflows/test-and-deploy.yaml
vendored
@@ -19,8 +19,13 @@ jobs:
|
||||
environment:
|
||||
name: staging2.testrun.org
|
||||
url: https://staging2.testrun.org/
|
||||
concurrency: staging2.testrun.org
|
||||
concurrency:
|
||||
group: ci-${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: ${{ !contains(github.ref, '$GITHUB_REF') }}
|
||||
steps:
|
||||
- uses: jsok/serialize-workflow-action@515cd04c46d7ea7435c4a22a3b4419127afdefe9
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: prepare SSH
|
||||
@@ -74,9 +79,7 @@ jobs:
|
||||
- name: run deploy-chatmail offline tests
|
||||
run: pytest --pyargs cmdeploy
|
||||
|
||||
- run: |
|
||||
cmdeploy init staging2.testrun.org
|
||||
sed -i 's/#\s*mtail_address/mtail_address/' chatmail.ini
|
||||
- run: cmdeploy init staging2.testrun.org
|
||||
|
||||
- run: cmdeploy run --verbose --skip-dns-check
|
||||
|
||||
@@ -91,7 +94,7 @@ jobs:
|
||||
ssh root@ns.testrun.org systemctl reload nsd
|
||||
|
||||
- name: cmdeploy test
|
||||
run: CHATMAIL_DOMAIN2=ci-chatmail.testrun.org cmdeploy test --slow
|
||||
run: CHATMAIL_DOMAIN2=nine.testrun.org cmdeploy test --slow
|
||||
|
||||
- name: cmdeploy dns
|
||||
run: cmdeploy dns -v
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -164,8 +164,3 @@ cython_debug/
|
||||
#.idea/
|
||||
|
||||
chatmail.zone
|
||||
|
||||
# docker
|
||||
/data/
|
||||
/custom/
|
||||
.env
|
||||
|
||||
41
CHANGELOG.md
41
CHANGELOG.md
@@ -1,42 +1,10 @@
|
||||
# Changelog for chatmail deployment
|
||||
|
||||
## 1.9.0 2025-12-18
|
||||
|
||||
### Documentation
|
||||
|
||||
- Add RELEASE.md and CONTRIBUTING.md
|
||||
- README update, mention Chatmail Cookbook project
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- Expire messages also from IMAP subfolders
|
||||
- Use absolute path instead of relative path in message expiration script
|
||||
- Restart Postfix and Dovecot automatically on failure
|
||||
- acmetool: Use a fixed name and `reconcile` instead of `want`
|
||||
|
||||
### Features
|
||||
|
||||
- Report DKIM error code in SMTP response
|
||||
- Remove development notice from the web pages
|
||||
|
||||
### Miscellaneous Tasks
|
||||
|
||||
- Update the heading in the CHANGELOG.md
|
||||
- Setup git-cliff
|
||||
- Run tests against ci-chatmail.testrun.org instead of nine.testrun.org
|
||||
- Cleanup remaining echobot code, remove echobot user from deployment and passthrough recipients
|
||||
|
||||
## 1.8.0 2025-12-12
|
||||
|
||||
- Add imap_compress option to chatmail.ini
|
||||
([#760](https://github.com/chatmail/relay/pull/760))
|
||||
## untagged
|
||||
|
||||
- Remove echobot from relays
|
||||
([#753](https://github.com/chatmail/relay/pull/753))
|
||||
|
||||
- Fix `cmdeploy webdev`
|
||||
([#743](https://github.com/chatmail/relay/pull/743))
|
||||
|
||||
- Add robots.txt to exclude all web crawlers
|
||||
([#732](https://github.com/chatmail/relay/pull/732))
|
||||
|
||||
@@ -121,13 +89,6 @@
|
||||
Provide an "fsreport" CLI for more fine grained analysis of message files.
|
||||
([#637](https://github.com/chatmail/relay/pull/637))
|
||||
|
||||
- Add installation via docker compose (MVP 1). The instructions, known issues and limitations are located in `/docs`
|
||||
([#614](https://github.com/chatmail/relay/pull/614))
|
||||
|
||||
- Add configuration parameters
|
||||
([#614](https://github.com/chatmail/relay/pull/614)):
|
||||
- `change_kernel_settings` - Whether to change kernel parameters during installation (default: `True`)
|
||||
- `fs_inotify_max_user_instances_and_watchers` - Value for kernel parameters `fs.inotify.max_user_instances` and `fs.inotify.max_user_watches` (default: `65535`)
|
||||
|
||||
## 1.7.0 2025-09-11
|
||||
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
# Contributing to the chatmail relay
|
||||
|
||||
Commit messages follow the [Conventional Commits] notation.
|
||||
We use [git-cliff] to generate the changelog from commit messages before the release.
|
||||
|
||||
[Conventional Commits]: https://www.conventionalcommits.org/
|
||||
[git-cliff]: https://git-cliff.org/
|
||||
15
RELEASE.md
15
RELEASE.md
@@ -1,15 +0,0 @@
|
||||
# Releasing a new version of chatmail relay
|
||||
|
||||
For example, to release version 1.9.0 of chatmail relay, do the following steps.
|
||||
|
||||
1. Update the changelog: `git cliff --unreleased --tag 1.9.0 --prepend CHANGELOG.md` or `git cliff -u -t 1.9.0 -p CHANGELOG.md`.
|
||||
|
||||
2. Open the changelog in the editor, edit it if required.
|
||||
|
||||
3. Commit the changes to the changelog with a commit message `chore(release): prepare for 1.9.0`.
|
||||
|
||||
3. Tag the release: `git tag --annotate 1.9.0`.
|
||||
|
||||
4. Push the release tag: `git push origin 1.9.0`.
|
||||
|
||||
5. Create a GitHub release: `gh release create 1.9.0`.
|
||||
@@ -24,6 +24,7 @@ where = ['src']
|
||||
[project.scripts]
|
||||
doveauth = "chatmaild.doveauth:main"
|
||||
chatmail-metadata = "chatmaild.metadata:main"
|
||||
filtermail = "chatmaild.filtermail:main"
|
||||
chatmail-metrics = "chatmaild.metrics:main"
|
||||
chatmail-expire = "chatmaild.expire:main"
|
||||
chatmail-fsreport = "chatmaild.fsreport:main"
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import iniconfig
|
||||
@@ -21,8 +20,7 @@ class Config:
|
||||
def __init__(self, inipath, params):
|
||||
self._inipath = inipath
|
||||
self.mail_domain = params["mail_domain"]
|
||||
self.max_user_send_per_minute = int(params.get("max_user_send_per_minute", 60))
|
||||
self.max_user_send_burst_size = int(params.get("max_user_send_burst_size", 10))
|
||||
self.max_user_send_per_minute = int(params["max_user_send_per_minute"])
|
||||
self.max_mailbox_size = params["max_mailbox_size"]
|
||||
self.max_message_size = int(params.get("max_message_size", "31457280"))
|
||||
self.delete_mails_after = params["delete_mails_after"]
|
||||
@@ -34,27 +32,18 @@ class Config:
|
||||
self.passthrough_senders = params["passthrough_senders"].split()
|
||||
self.passthrough_recipients = params["passthrough_recipients"].split()
|
||||
self.www_folder = params.get("www_folder", "")
|
||||
self.filtermail_smtp_port = int(params.get("filtermail_smtp_port", "10080"))
|
||||
self.filtermail_smtp_port = int(params["filtermail_smtp_port"])
|
||||
self.filtermail_smtp_port_incoming = int(
|
||||
params.get("filtermail_smtp_port_incoming", "10081")
|
||||
params["filtermail_smtp_port_incoming"]
|
||||
)
|
||||
self.postfix_reinject_port = int(params.get("postfix_reinject_port", "10025"))
|
||||
self.postfix_reinject_port = int(params["postfix_reinject_port"])
|
||||
self.postfix_reinject_port_incoming = int(
|
||||
params.get("postfix_reinject_port_incoming", "10026")
|
||||
params["postfix_reinject_port_incoming"]
|
||||
)
|
||||
self.mtail_address = params.get("mtail_address")
|
||||
self.disable_ipv6 = params.get("disable_ipv6", "false").lower() == "true"
|
||||
self.addr_v4 = os.environ.get("CHATMAIL_ADDR_V4", "")
|
||||
self.addr_v6 = os.environ.get("CHATMAIL_ADDR_V6", "")
|
||||
self.acme_email = params.get("acme_email", "")
|
||||
self.change_kernel_settings = (
|
||||
params.get("change_kernel_settings", "true").lower() == "true"
|
||||
)
|
||||
self.fs_inotify_max_user_instances_and_watchers = int(
|
||||
params["fs_inotify_max_user_instances_and_watchers"]
|
||||
)
|
||||
self.imap_rawlog = params.get("imap_rawlog", "false").lower() == "true"
|
||||
self.imap_compress = params.get("imap_compress", "false").lower() == "true"
|
||||
if "iroh_relay" not in params:
|
||||
self.iroh_relay = "https://" + params["mail_domain"]
|
||||
self.enable_iroh_relay = True
|
||||
|
||||
@@ -22,7 +22,7 @@ class DictProxy:
|
||||
wfile.flush()
|
||||
|
||||
def handle_dovecot_request(self, msg, transactions):
|
||||
# see https://doc.dovecot.org/2.3/developer_manual/design/dict_protocol/#dovecot-dict-protocol
|
||||
# see https://doc.dovecot.org/developer_manual/design/dict_protocol/#dovecot-dict-protocol
|
||||
short_command = msg[0]
|
||||
parts = msg[1:].split("\t")
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ NOCREATE_FILE = "/etc/chatmail-nocreate"
|
||||
|
||||
|
||||
def encrypt_password(password: str):
|
||||
# https://doc.dovecot.org/2.3/configuration_manual/authentication/password_schemes/
|
||||
# https://doc.dovecot.org/configuration_manual/authentication/password_schemes/
|
||||
passhash = crypt_r.crypt(password, crypt_r.METHOD_SHA512)
|
||||
return "{SHA512-CRYPT}" + passhash
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ from stat import S_ISREG
|
||||
|
||||
from chatmaild.config import read_config
|
||||
|
||||
FileEntry = namedtuple("FileEntry", ("path", "mtime", "size"))
|
||||
FileEntry = namedtuple("FileEntry", ("relpath", "mtime", "size"))
|
||||
|
||||
|
||||
def iter_mailboxes(basedir, maxnum):
|
||||
@@ -51,27 +51,33 @@ class MailboxStat:
|
||||
|
||||
def __init__(self, basedir):
|
||||
self.basedir = str(basedir)
|
||||
# all detected messages in cur/new/tmp folders
|
||||
self.messages = []
|
||||
self.extrafiles = []
|
||||
self.scandir(self.basedir)
|
||||
|
||||
def scandir(self, folderdir):
|
||||
for name in os_listdir_if_exists(folderdir):
|
||||
path = f"{folderdir}/{name}"
|
||||
# all detected files in mailbox top dir
|
||||
self.extrafiles = []
|
||||
|
||||
# scan all relevant files (without recursion)
|
||||
old_cwd = os.getcwd()
|
||||
try:
|
||||
os.chdir(self.basedir)
|
||||
except FileNotFoundError:
|
||||
return
|
||||
for name in os_listdir_if_exists("."):
|
||||
if name in ("cur", "new", "tmp"):
|
||||
for msg_name in os_listdir_if_exists(path):
|
||||
entry = get_file_entry(f"{path}/{msg_name}")
|
||||
for msg_name in os_listdir_if_exists(name):
|
||||
entry = get_file_entry(f"{name}/{msg_name}")
|
||||
if entry is not None:
|
||||
self.messages.append(entry)
|
||||
elif os.path.isdir(path):
|
||||
self.scandir(path)
|
||||
|
||||
else:
|
||||
entry = get_file_entry(path)
|
||||
entry = get_file_entry(name)
|
||||
if entry is not None:
|
||||
self.extrafiles.append(entry)
|
||||
if name == "password":
|
||||
self.last_login = entry.mtime
|
||||
self.extrafiles.sort(key=lambda x: -x.size)
|
||||
os.chdir(old_cwd)
|
||||
|
||||
|
||||
def print_info(msg):
|
||||
@@ -124,6 +130,13 @@ class Expiry:
|
||||
self.remove_mailbox(mbox.basedir)
|
||||
return
|
||||
|
||||
# all to-be-removed files are relative to the mailbox basedir
|
||||
try:
|
||||
os.chdir(mbox.basedir)
|
||||
except FileNotFoundError:
|
||||
print_info(f"mailbox not found/vanished {mbox.basedir}")
|
||||
return
|
||||
|
||||
mboxname = os.path.basename(mbox.basedir)
|
||||
if self.verbose:
|
||||
date = datetime.fromtimestamp(mbox.last_login) if mbox.last_login else None
|
||||
@@ -134,17 +147,16 @@ class Expiry:
|
||||
self.all_files += len(mbox.messages)
|
||||
for message in mbox.messages:
|
||||
if message.mtime < cutoff_mails:
|
||||
self.remove_file(message.path, mtime=message.mtime)
|
||||
self.remove_file(message.relpath, mtime=message.mtime)
|
||||
elif message.size > 200000 and message.mtime < cutoff_large_mails:
|
||||
# we only remove noticed large files (not unnoticed ones in new/)
|
||||
parts = message.path.split("/")
|
||||
if len(parts) >= 2 and parts[-2] == "cur":
|
||||
self.remove_file(message.path, mtime=message.mtime)
|
||||
if message.relpath.startswith("cur/"):
|
||||
self.remove_file(message.relpath, mtime=message.mtime)
|
||||
else:
|
||||
continue
|
||||
changed = True
|
||||
if changed:
|
||||
self.remove_file(f"{mbox.basedir}/maildirsize")
|
||||
self.remove_file("maildirsize")
|
||||
|
||||
def get_summary(self):
|
||||
return (
|
||||
|
||||
381
chatmaild/src/chatmaild/filtermail.py
Normal file
381
chatmaild/src/chatmaild/filtermail.py
Normal file
@@ -0,0 +1,381 @@
|
||||
#!/usr/bin/env python3
|
||||
import asyncio
|
||||
import base64
|
||||
import binascii
|
||||
import sys
|
||||
import time
|
||||
from email import policy
|
||||
from email.parser import BytesParser
|
||||
from email.utils import parseaddr
|
||||
from smtplib import SMTP as SMTPClient
|
||||
|
||||
from aiosmtpd.controller import Controller
|
||||
from aiosmtpd.smtp import SMTP
|
||||
|
||||
from .config import read_config
|
||||
|
||||
ENCRYPTION_NEEDED_523 = "523 Encryption Needed: Invalid Unencrypted Mail"
|
||||
|
||||
|
||||
def check_openpgp_payload(payload: bytes):
|
||||
"""Checks the OpenPGP payload.
|
||||
|
||||
OpenPGP payload must consist only of PKESK and SKESK packets
|
||||
terminated by a single SEIPD packet.
|
||||
|
||||
Returns True if OpenPGP payload is correct,
|
||||
False otherwise.
|
||||
|
||||
May raise IndexError while trying to read OpenPGP packet header
|
||||
if it is truncated.
|
||||
"""
|
||||
i = 0
|
||||
while i < len(payload):
|
||||
# Only OpenPGP format is allowed.
|
||||
if payload[i] & 0xC0 != 0xC0:
|
||||
return False
|
||||
|
||||
packet_type_id = payload[i] & 0x3F
|
||||
i += 1
|
||||
|
||||
while payload[i] >= 224 and payload[i] < 255:
|
||||
# Partial body length.
|
||||
partial_length = 1 << (payload[i] & 0x1F)
|
||||
i += 1 + partial_length
|
||||
|
||||
if payload[i] < 192:
|
||||
# One-octet length.
|
||||
body_len = payload[i]
|
||||
i += 1
|
||||
elif payload[i] < 224:
|
||||
# Two-octet length.
|
||||
body_len = ((payload[i] - 192) << 8) + payload[i + 1] + 192
|
||||
i += 2
|
||||
elif payload[i] == 255:
|
||||
# Five-octet length.
|
||||
body_len = (
|
||||
(payload[i + 1] << 24)
|
||||
| (payload[i + 2] << 16)
|
||||
| (payload[i + 3] << 8)
|
||||
| payload[i + 4]
|
||||
)
|
||||
i += 5
|
||||
else:
|
||||
# Impossible, partial body length was processed above.
|
||||
return False
|
||||
|
||||
i += body_len
|
||||
|
||||
if i == len(payload):
|
||||
# Last packet should be
|
||||
# Symmetrically Encrypted and Integrity Protected Data Packet (SEIPD)
|
||||
#
|
||||
# This is the only place where this function may return `True`.
|
||||
return packet_type_id == 18
|
||||
elif packet_type_id not in [1, 3]:
|
||||
# All packets except the last one must be either
|
||||
# Public-Key Encrypted Session Key Packet (PKESK)
|
||||
# or
|
||||
# Symmetric-Key Encrypted Session Key Packet (SKESK)
|
||||
return False
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def check_armored_payload(payload: str, outgoing: bool):
|
||||
"""Check the armored PGP message for invalid content.
|
||||
|
||||
:param payload: the armored PGP message
|
||||
:param outgoing: whether the message is outgoing or incoming
|
||||
:return: whether the message is a valid PGP message
|
||||
"""
|
||||
prefix = "-----BEGIN PGP MESSAGE-----\r\n"
|
||||
if not payload.startswith(prefix):
|
||||
return False
|
||||
payload = payload.removeprefix(prefix)
|
||||
|
||||
while payload.endswith("\r\n"):
|
||||
payload = payload.removesuffix("\r\n")
|
||||
suffix = "-----END PGP MESSAGE-----"
|
||||
if not payload.endswith(suffix):
|
||||
return False
|
||||
payload = payload.removesuffix(suffix)
|
||||
|
||||
version_comment = "Version: "
|
||||
if payload.startswith(version_comment):
|
||||
if outgoing: # Disallow comments in outgoing messages
|
||||
return False
|
||||
# Remove comments from incoming messages
|
||||
payload = payload.partition("\r\n")[2]
|
||||
|
||||
while payload.startswith("\r\n"):
|
||||
payload = payload.removeprefix("\r\n")
|
||||
|
||||
# Remove CRC24.
|
||||
payload = payload.rpartition("=")[0]
|
||||
|
||||
try:
|
||||
payload = base64.b64decode(payload)
|
||||
except binascii.Error:
|
||||
return False
|
||||
|
||||
try:
|
||||
return check_openpgp_payload(payload)
|
||||
except IndexError:
|
||||
return False
|
||||
|
||||
|
||||
def is_securejoin(message):
|
||||
if message.get("secure-join") not in ["vc-request", "vg-request"]:
|
||||
return False
|
||||
if not message.is_multipart():
|
||||
return False
|
||||
parts_count = 0
|
||||
for part in message.iter_parts():
|
||||
parts_count += 1
|
||||
if parts_count > 1:
|
||||
return False
|
||||
if part.is_multipart():
|
||||
return False
|
||||
if part.get_content_type() != "text/plain":
|
||||
return False
|
||||
|
||||
payload = part.get_payload().strip().lower()
|
||||
if payload not in ("secure-join: vc-request", "secure-join: vg-request"):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def check_encrypted(message, outgoing=True):
|
||||
"""Check that the message is an OpenPGP-encrypted message.
|
||||
|
||||
MIME structure of the message must correspond to <https://www.rfc-editor.org/rfc/rfc3156>.
|
||||
"""
|
||||
if not message.is_multipart():
|
||||
return False
|
||||
if message.get_content_type() != "multipart/encrypted":
|
||||
return False
|
||||
parts_count = 0
|
||||
for part in message.iter_parts():
|
||||
# We explicitly check Content-Type of each part later,
|
||||
# but this is to be absolutely sure `get_payload()` returns string and not list.
|
||||
if part.is_multipart():
|
||||
return False
|
||||
|
||||
if parts_count == 0:
|
||||
if part.get_content_type() != "application/pgp-encrypted":
|
||||
return False
|
||||
|
||||
payload = part.get_payload()
|
||||
if payload.strip() != "Version: 1":
|
||||
return False
|
||||
elif parts_count == 1:
|
||||
if part.get_content_type() != "application/octet-stream":
|
||||
return False
|
||||
|
||||
if not check_armored_payload(part.get_payload(), outgoing=outgoing):
|
||||
return False
|
||||
else:
|
||||
return False
|
||||
parts_count += 1
|
||||
return True
|
||||
|
||||
|
||||
async def asyncmain_beforequeue(config, mode):
|
||||
if mode == "outgoing":
|
||||
port = config.filtermail_smtp_port
|
||||
handler = OutgoingBeforeQueueHandler(config)
|
||||
else:
|
||||
port = config.filtermail_smtp_port_incoming
|
||||
handler = IncomingBeforeQueueHandler(config)
|
||||
HackedController(
|
||||
handler,
|
||||
hostname="127.0.0.1",
|
||||
port=port,
|
||||
data_size_limit=config.max_message_size,
|
||||
).start()
|
||||
|
||||
|
||||
def recipient_matches_passthrough(recipient, passthrough_recipients):
|
||||
for addr in passthrough_recipients:
|
||||
if recipient == addr:
|
||||
return True
|
||||
if addr[0] == "@" and recipient.endswith(addr):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class HackedController(Controller):
|
||||
def factory(self):
|
||||
return SMTPDiscardRCPTO_options(self.handler, **self.SMTP_kwargs)
|
||||
|
||||
|
||||
class SMTPDiscardRCPTO_options(SMTP):
|
||||
def _getparams(self, params):
|
||||
# Ignore RCPT TO parameters.
|
||||
#
|
||||
# Otherwise parameters such as `ORCPT=...`
|
||||
# or `NOTIFY=DELAY,FAILURE` (generated by Stalwart)
|
||||
# make aiosmtpd reject the message here:
|
||||
# <https://github.com/aio-libs/aiosmtpd/blob/98f578389ae86e5345cc343fa4e5a17b21d9c96d/aiosmtpd/smtp.py#L1379-L1384>
|
||||
return {}
|
||||
|
||||
|
||||
class OutgoingBeforeQueueHandler:
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.send_rate_limiter = SendRateLimiter()
|
||||
|
||||
async def handle_MAIL(self, server, session, envelope, address, mail_options):
|
||||
log_info(f"handle_MAIL from {address}")
|
||||
envelope.mail_from = address
|
||||
max_sent = self.config.max_user_send_per_minute
|
||||
if not self.send_rate_limiter.is_sending_allowed(address, max_sent):
|
||||
return f"450 4.7.1: Too much mail from {address}"
|
||||
|
||||
parts = envelope.mail_from.split("@")
|
||||
if len(parts) != 2:
|
||||
return f"500 Invalid from address <{envelope.mail_from!r}>"
|
||||
|
||||
return "250 OK"
|
||||
|
||||
async def handle_DATA(self, server, session, envelope):
|
||||
loop = asyncio.get_running_loop()
|
||||
return await loop.run_in_executor(None, self.sync_handle_DATA, envelope)
|
||||
|
||||
def sync_handle_DATA(self, envelope):
|
||||
log_info("handle_DATA before-queue")
|
||||
error = self.check_DATA(envelope)
|
||||
if error:
|
||||
return error
|
||||
log_info("re-injecting the mail that passed checks")
|
||||
client = SMTPClient("localhost", self.config.postfix_reinject_port)
|
||||
client.sendmail(
|
||||
envelope.mail_from, envelope.rcpt_tos, envelope.original_content
|
||||
)
|
||||
return "250 OK"
|
||||
|
||||
def check_DATA(self, envelope):
|
||||
"""the central filtering function for e-mails."""
|
||||
log_info(f"Processing DATA message from {envelope.mail_from}")
|
||||
|
||||
message = BytesParser(policy=policy.default).parsebytes(envelope.content)
|
||||
mail_encrypted = check_encrypted(message, outgoing=True)
|
||||
|
||||
_, from_addr = parseaddr(message.get("from").strip())
|
||||
|
||||
if envelope.mail_from.lower() != from_addr.lower():
|
||||
return f"500 Invalid FROM <{from_addr!r}> for <{envelope.mail_from!r}>"
|
||||
|
||||
if mail_encrypted or is_securejoin(message):
|
||||
print("Outgoing: Filtering encrypted mail.", file=sys.stderr)
|
||||
return
|
||||
|
||||
print("Outgoing: Filtering unencrypted mail.", file=sys.stderr)
|
||||
|
||||
if envelope.mail_from in self.config.passthrough_senders:
|
||||
return
|
||||
|
||||
# allow self-sent Autocrypt Setup Message
|
||||
if envelope.rcpt_tos == [from_addr]:
|
||||
if message.get("subject") == "Autocrypt Setup Message":
|
||||
if message.get_content_type() == "multipart/mixed":
|
||||
return
|
||||
|
||||
passthrough_recipients = self.config.passthrough_recipients
|
||||
|
||||
for recipient in envelope.rcpt_tos:
|
||||
if recipient_matches_passthrough(recipient, passthrough_recipients):
|
||||
continue
|
||||
|
||||
print("Rejected unencrypted mail.", file=sys.stderr)
|
||||
return ENCRYPTION_NEEDED_523
|
||||
|
||||
|
||||
class IncomingBeforeQueueHandler:
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
|
||||
async def handle_DATA(self, server, session, envelope):
|
||||
loop = asyncio.get_running_loop()
|
||||
return await loop.run_in_executor(None, self.sync_handle_DATA, envelope)
|
||||
|
||||
def sync_handle_DATA(self, envelope):
|
||||
log_info("handle_DATA before-queue")
|
||||
error = self.check_DATA(envelope)
|
||||
if error:
|
||||
return error
|
||||
log_info("re-injecting the mail that passed checks")
|
||||
|
||||
# the smtp daemon on reinject_port_incoming gives it to dkim milter
|
||||
# which looks at source address to determine whether to verify or sign
|
||||
client = SMTPClient(
|
||||
"localhost",
|
||||
self.config.postfix_reinject_port_incoming,
|
||||
source_address=("127.0.0.2", 0),
|
||||
)
|
||||
client.sendmail(
|
||||
envelope.mail_from, envelope.rcpt_tos, envelope.original_content
|
||||
)
|
||||
return "250 OK"
|
||||
|
||||
def check_DATA(self, envelope):
|
||||
"""the central filtering function for e-mails."""
|
||||
log_info(f"Processing DATA message from {envelope.mail_from}")
|
||||
|
||||
message = BytesParser(policy=policy.default).parsebytes(envelope.content)
|
||||
mail_encrypted = check_encrypted(message, outgoing=False)
|
||||
|
||||
if mail_encrypted or is_securejoin(message):
|
||||
print("Incoming: Filtering encrypted mail.", file=sys.stderr)
|
||||
return
|
||||
|
||||
print("Incoming: Filtering unencrypted mail.", file=sys.stderr)
|
||||
|
||||
# we want cleartext mailer-daemon messages to pass through
|
||||
# chatmail core will typically not display them as normal messages
|
||||
if message.get("auto-submitted"):
|
||||
_, from_addr = parseaddr(message.get("from").strip())
|
||||
if from_addr.lower().startswith("mailer-daemon@"):
|
||||
if message.get_content_type() == "multipart/report":
|
||||
return
|
||||
|
||||
for recipient in envelope.rcpt_tos:
|
||||
user = self.config.get_user(recipient)
|
||||
if user is None or user.is_incoming_cleartext_ok():
|
||||
continue
|
||||
|
||||
print("Rejected unencrypted mail.", file=sys.stderr)
|
||||
return ENCRYPTION_NEEDED_523
|
||||
|
||||
|
||||
class SendRateLimiter:
|
||||
def __init__(self):
|
||||
self.addr2timestamps = {}
|
||||
|
||||
def is_sending_allowed(self, mail_from, max_send_per_minute):
|
||||
last = self.addr2timestamps.setdefault(mail_from, [])
|
||||
now = time.time()
|
||||
last[:] = [ts for ts in last if ts >= (now - 60)]
|
||||
if len(last) <= max_send_per_minute:
|
||||
last.append(now)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def log_info(msg):
|
||||
print(msg, file=sys.stderr)
|
||||
|
||||
|
||||
def main():
|
||||
args = sys.argv[1:]
|
||||
assert len(args) == 2
|
||||
config = read_config(args[0])
|
||||
mode = args[1]
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
assert mode in ["incoming", "outgoing"]
|
||||
task = asyncmain_beforequeue(config, mode)
|
||||
loop.create_task(task)
|
||||
log_info("entering serving loop")
|
||||
loop.run_forever()
|
||||
@@ -11,14 +11,11 @@ mail_domain = {mail_domain}
|
||||
# Restrictions on user addresses
|
||||
#
|
||||
|
||||
# email sending rate per user and minute
|
||||
# how many mails a user can send out per minute
|
||||
max_user_send_per_minute = 60
|
||||
|
||||
# per-user max burst size for sending rate limiting (GCRA bucket capacity)
|
||||
max_user_send_burst_size = 10
|
||||
|
||||
# maximum mailbox size of a chatmail address
|
||||
max_mailbox_size = 500M
|
||||
max_mailbox_size = 100M
|
||||
|
||||
# maximum message size for an e-mail in bytes
|
||||
max_message_size = 31457280
|
||||
@@ -46,9 +43,9 @@ passthrough_senders =
|
||||
|
||||
# list of e-mail recipients for which to accept outbound un-encrypted mails
|
||||
# (space-separated, item may start with "@" to whitelist whole recipient domains)
|
||||
passthrough_recipients =
|
||||
passthrough_recipients = echo@{mail_domain}
|
||||
|
||||
# path to www directory - documented here: https://chatmail.at/doc/relay/getting_started.html#custom-web-pages
|
||||
# path to www directory - documented here: https://github.com/chatmail/relay/#custom-web-pages
|
||||
#www_folder = www
|
||||
|
||||
#
|
||||
@@ -69,16 +66,6 @@ disable_ipv6 = False
|
||||
# Your email adress, which will be used in acmetool to manage Let's Encrypt SSL certificates
|
||||
acme_email =
|
||||
|
||||
#
|
||||
# Kernel settings
|
||||
#
|
||||
|
||||
# if you set "True", the kernel settings will be configured according to the values below
|
||||
change_kernel_settings = True
|
||||
|
||||
# change fs.inotify.max_user_instances and fs.inotify.max_user_watches kernel settings
|
||||
fs_inotify_max_user_instances_and_watchers = 65535
|
||||
|
||||
# Defaults to https://iroh.{{mail_domain}} and running `iroh-relay` on the chatmail
|
||||
# service.
|
||||
# If you set it to anything else, the service will be disabled
|
||||
@@ -112,12 +99,6 @@ fs_inotify_max_user_instances_and_watchers = 65535
|
||||
# so use this option with caution on production servers.
|
||||
imap_rawlog = false
|
||||
|
||||
# set to true if you want to enable the IMAP COMPRESS Extension,
|
||||
# which allows IMAP connections to be efficiently compressed.
|
||||
# WARNING: Enabling this makes it impossible to hibernate IMAP
|
||||
# processes which will result in much higher memory/RAM usage.
|
||||
imap_compress = false
|
||||
|
||||
|
||||
#
|
||||
# Privacy Policy
|
||||
|
||||
@@ -13,6 +13,8 @@ class LastLoginDictProxy(DictProxy):
|
||||
keyname = parts[1].split("/")
|
||||
value = parts[2] if len(parts) > 2 else ""
|
||||
if keyname[0] == "shared" and keyname[1] == "last-login":
|
||||
if addr.startswith("echo@"):
|
||||
return True
|
||||
addr = keyname[2]
|
||||
timestamp = int(value)
|
||||
user = self.config.get_user(addr)
|
||||
|
||||
@@ -20,7 +20,7 @@ def create_newemail_dict(config: Config):
|
||||
secrets.choice(ALPHANUMERIC_PUNCT)
|
||||
for _ in range(config.password_min_length + 3)
|
||||
)
|
||||
return dict(email=f"{user}@{config.mail_domain}", password=f"{password}")
|
||||
return dict(email=f"{user}@bloc7.icu", password=f"{password}")
|
||||
|
||||
|
||||
def print_new_account():
|
||||
|
||||
@@ -33,7 +33,7 @@ def test_read_config_testrun(make_config):
|
||||
assert config.filtermail_smtp_port == 10080
|
||||
assert config.postfix_reinject_port == 10025
|
||||
assert config.max_user_send_per_minute == 60
|
||||
assert config.max_mailbox_size == "500M"
|
||||
assert config.max_mailbox_size == "100M"
|
||||
assert config.delete_mails_after == "20"
|
||||
assert config.delete_large_after == "7"
|
||||
assert config.username_min_length == 9
|
||||
|
||||
@@ -17,17 +17,19 @@ from chatmaild.expire import main as expiry_main
|
||||
from chatmaild.fsreport import main as report_main
|
||||
|
||||
|
||||
def fill_mbox(folderdir):
|
||||
password = folderdir.joinpath("password")
|
||||
def fill_mbox(basedir):
|
||||
basedir1 = basedir.joinpath("mailbox1@example.org")
|
||||
basedir1.mkdir()
|
||||
password = basedir1.joinpath("password")
|
||||
password.write_text("xxx")
|
||||
folderdir.joinpath("maildirsize").write_text("xxx")
|
||||
basedir1.joinpath("maildirsize").write_text("xxx")
|
||||
|
||||
garbagedir = folderdir.joinpath("garbagedir")
|
||||
garbagedir = basedir1.joinpath("garbagedir")
|
||||
garbagedir.mkdir()
|
||||
garbagedir.joinpath("bimbum").write_text("hello")
|
||||
|
||||
create_new_messages(folderdir, ["cur/msg1"], size=500)
|
||||
create_new_messages(folderdir, ["new/msg2"], size=600)
|
||||
create_new_messages(basedir1, ["cur/msg1"], size=500)
|
||||
create_new_messages(basedir1, ["new/msg2"], size=600)
|
||||
return basedir1
|
||||
|
||||
|
||||
def create_new_messages(basedir, relpaths, size=1000, days=0):
|
||||
@@ -43,21 +45,8 @@ def create_new_messages(basedir, relpaths, size=1000, days=0):
|
||||
|
||||
@pytest.fixture
|
||||
def mbox1(example_config):
|
||||
mboxdir = example_config.mailboxes_dir.joinpath("mailbox1@example.org")
|
||||
mboxdir.mkdir()
|
||||
fill_mbox(mboxdir)
|
||||
return MailboxStat(mboxdir)
|
||||
|
||||
|
||||
def test_deltachat_folder(example_config):
|
||||
"""Test old setups that might have a .DeltaChat folder where messages also need to get removed."""
|
||||
mboxdir = example_config.mailboxes_dir.joinpath("mailbox1@example.org")
|
||||
mboxdir.mkdir()
|
||||
mbox2dir = mboxdir.joinpath(".DeltaChat")
|
||||
mbox2dir.mkdir()
|
||||
fill_mbox(mbox2dir)
|
||||
mb = MailboxStat(mboxdir)
|
||||
assert len(mb.messages) == 2
|
||||
basedir1 = fill_mbox(example_config.mailboxes_dir)
|
||||
return MailboxStat(basedir1)
|
||||
|
||||
|
||||
def test_filentry_ordering(tmp_path):
|
||||
@@ -87,7 +76,7 @@ def test_stats_mailbox(mbox1):
|
||||
create_new_messages(mbox1.basedir, ["large-extra"], size=1000)
|
||||
create_new_messages(mbox1.basedir, ["index-something"], size=3)
|
||||
mbox2 = MailboxStat(mbox1.basedir)
|
||||
assert len(mbox2.extrafiles) == 5
|
||||
assert len(mbox2.extrafiles) == 4
|
||||
assert mbox2.extrafiles[0].size == 1000
|
||||
|
||||
# cope well with mailbox dirs that have no password (for whatever reason)
|
||||
|
||||
361
chatmaild/src/chatmaild/tests/test_filtermail.py
Normal file
361
chatmaild/src/chatmaild/tests/test_filtermail.py
Normal file
@@ -0,0 +1,361 @@
|
||||
import pytest
|
||||
|
||||
from chatmaild.filtermail import (
|
||||
IncomingBeforeQueueHandler,
|
||||
OutgoingBeforeQueueHandler,
|
||||
SendRateLimiter,
|
||||
check_armored_payload,
|
||||
check_encrypted,
|
||||
is_securejoin,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def maildomain():
|
||||
# let's not depend on a real chatmail instance for the offline tests below
|
||||
return "chatmail.example.org"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def handler(make_config, maildomain):
|
||||
config = make_config(maildomain)
|
||||
return OutgoingBeforeQueueHandler(config)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def inhandler(make_config, maildomain):
|
||||
config = make_config(maildomain)
|
||||
return IncomingBeforeQueueHandler(config)
|
||||
|
||||
|
||||
def test_reject_forged_from(maildata, gencreds, handler):
|
||||
class env:
|
||||
mail_from = gencreds()[0]
|
||||
rcpt_tos = [gencreds()[0]]
|
||||
|
||||
# test that the filter lets good mail through
|
||||
to_addr = gencreds()[0]
|
||||
env.content = maildata(
|
||||
"encrypted.eml", from_addr=env.mail_from, to_addr=to_addr
|
||||
).as_bytes()
|
||||
|
||||
assert not handler.check_DATA(envelope=env)
|
||||
|
||||
# test that the filter rejects forged mail
|
||||
env.content = maildata(
|
||||
"encrypted.eml", from_addr="forged@c3.testrun.org", to_addr=to_addr
|
||||
).as_bytes()
|
||||
error = handler.check_DATA(envelope=env)
|
||||
assert "500" in error
|
||||
|
||||
|
||||
def test_filtermail_no_encryption_detection(maildata):
|
||||
msg = maildata(
|
||||
"plain.eml", from_addr="some@example.org", to_addr="other@example.org"
|
||||
)
|
||||
assert not check_encrypted(msg)
|
||||
|
||||
# https://xkcd.com/1181/
|
||||
msg = maildata(
|
||||
"fake-encrypted.eml", from_addr="some@example.org", to_addr="other@example.org"
|
||||
)
|
||||
assert not check_encrypted(msg)
|
||||
|
||||
|
||||
def test_filtermail_securejoin_detection(maildata):
|
||||
msg = maildata(
|
||||
"securejoin-vc.eml", from_addr="some@example.org", to_addr="other@example.org"
|
||||
)
|
||||
assert is_securejoin(msg)
|
||||
|
||||
msg = maildata(
|
||||
"securejoin-vc-fake.eml",
|
||||
from_addr="some@example.org",
|
||||
to_addr="other@example.org",
|
||||
)
|
||||
assert not is_securejoin(msg)
|
||||
|
||||
|
||||
def test_filtermail_encryption_detection(maildata):
|
||||
msg = maildata(
|
||||
"encrypted.eml",
|
||||
from_addr="1@example.org",
|
||||
to_addr="2@example.org",
|
||||
subject="Subject does not matter, will be replaced anyway",
|
||||
)
|
||||
assert check_encrypted(msg)
|
||||
|
||||
|
||||
def test_filtermail_no_literal_packets(maildata):
|
||||
"""Test that literal OpenPGP packet is not considered an encrypted mail."""
|
||||
msg = maildata("literal.eml", from_addr="1@example.org", to_addr="2@example.org")
|
||||
assert not check_encrypted(msg)
|
||||
|
||||
|
||||
def test_filtermail_unencrypted_mdn(maildata, gencreds):
|
||||
"""Unencrypted MDNs should not pass."""
|
||||
from_addr = gencreds()[0]
|
||||
to_addr = gencreds()[0] + ".other"
|
||||
msg = maildata("mdn.eml", from_addr=from_addr, to_addr=to_addr)
|
||||
|
||||
assert not check_encrypted(msg)
|
||||
|
||||
|
||||
def test_send_rate_limiter():
|
||||
limiter = SendRateLimiter()
|
||||
for i in range(100):
|
||||
if limiter.is_sending_allowed("some@example.org", 10):
|
||||
if i <= 10:
|
||||
continue
|
||||
pytest.fail("limiter didn't work")
|
||||
else:
|
||||
assert i == 11
|
||||
break
|
||||
|
||||
|
||||
def test_cleartext_excempt_privacy(maildata, gencreds, handler):
|
||||
from_addr = gencreds()[0]
|
||||
to_addr = "privacy@testrun.org"
|
||||
handler.config.passthrough_recipients = [to_addr]
|
||||
false_to = "privacy@something.org"
|
||||
|
||||
msg = maildata("plain.eml", from_addr=from_addr, to_addr=to_addr)
|
||||
|
||||
class env:
|
||||
mail_from = from_addr
|
||||
rcpt_tos = [to_addr]
|
||||
content = msg.as_bytes()
|
||||
|
||||
# assert that None/no error is returned
|
||||
assert not handler.check_DATA(envelope=env)
|
||||
|
||||
class env2:
|
||||
mail_from = from_addr
|
||||
rcpt_tos = [to_addr, false_to]
|
||||
content = msg.as_bytes()
|
||||
|
||||
assert "523" in handler.check_DATA(envelope=env2)
|
||||
|
||||
|
||||
def test_cleartext_self_send_autocrypt_setup_message(maildata, gencreds, handler):
|
||||
from_addr = gencreds()[0]
|
||||
to_addr = from_addr
|
||||
|
||||
msg = maildata("asm.eml", from_addr=from_addr, to_addr=to_addr)
|
||||
|
||||
class env:
|
||||
mail_from = from_addr
|
||||
rcpt_tos = [to_addr]
|
||||
content = msg.as_bytes()
|
||||
|
||||
assert not handler.check_DATA(envelope=env)
|
||||
|
||||
|
||||
def test_cleartext_send_fails(maildata, gencreds, handler):
|
||||
from_addr = gencreds()[0]
|
||||
to_addr = gencreds()[0]
|
||||
|
||||
msg = maildata("plain.eml", from_addr=from_addr, to_addr=to_addr)
|
||||
|
||||
class env:
|
||||
mail_from = from_addr
|
||||
rcpt_tos = [to_addr]
|
||||
content = msg.as_bytes()
|
||||
|
||||
res = handler.check_DATA(envelope=env)
|
||||
assert "523 Encryption Needed" in res
|
||||
|
||||
|
||||
def test_cleartext_incoming_fails(maildata, gencreds, inhandler):
|
||||
from_addr = gencreds()[0]
|
||||
to_addr, password = gencreds()
|
||||
|
||||
msg = maildata("plain.eml", from_addr=from_addr, to_addr=to_addr)
|
||||
|
||||
class env:
|
||||
mail_from = from_addr
|
||||
rcpt_tos = [to_addr]
|
||||
content = msg.as_bytes()
|
||||
|
||||
user = inhandler.config.get_user(to_addr)
|
||||
user.set_password(password)
|
||||
res = inhandler.check_DATA(envelope=env)
|
||||
assert "523 Encryption Needed" in res
|
||||
|
||||
user.allow_incoming_cleartext()
|
||||
assert not inhandler.check_DATA(envelope=env)
|
||||
|
||||
|
||||
def test_cleartext_incoming_mailer_daemon(maildata, gencreds, inhandler):
|
||||
from_addr = "mailer-daemon@example.org"
|
||||
to_addr = gencreds()[0]
|
||||
|
||||
msg = maildata("mailer-daemon.eml", from_addr=from_addr, to_addr=to_addr)
|
||||
|
||||
class env:
|
||||
mail_from = from_addr
|
||||
rcpt_tos = [to_addr]
|
||||
content = msg.as_bytes()
|
||||
|
||||
assert not inhandler.check_DATA(envelope=env)
|
||||
|
||||
|
||||
def test_cleartext_passthrough_domains(maildata, gencreds, handler):
|
||||
from_addr = gencreds()[0]
|
||||
to_addr = "privacy@x.y.z"
|
||||
handler.config.passthrough_recipients = ["@x.y.z"]
|
||||
false_to = "something@x.y"
|
||||
|
||||
msg = maildata("plain.eml", from_addr=from_addr, to_addr=to_addr)
|
||||
|
||||
class env:
|
||||
mail_from = from_addr
|
||||
rcpt_tos = [to_addr]
|
||||
content = msg.as_bytes()
|
||||
|
||||
# assert that None/no error is returned
|
||||
assert not handler.check_DATA(envelope=env)
|
||||
|
||||
class env2:
|
||||
mail_from = from_addr
|
||||
rcpt_tos = [to_addr, false_to]
|
||||
content = msg.as_bytes()
|
||||
|
||||
assert "523" in handler.check_DATA(envelope=env2)
|
||||
|
||||
|
||||
def test_cleartext_passthrough_senders(gencreds, handler, maildata):
|
||||
acc1 = gencreds()[0]
|
||||
to_addr = "recipient@something.org"
|
||||
handler.config.passthrough_senders = [acc1]
|
||||
|
||||
msg = maildata("plain.eml", from_addr=acc1, to_addr=to_addr)
|
||||
|
||||
class env:
|
||||
mail_from = acc1
|
||||
rcpt_tos = to_addr
|
||||
content = msg.as_bytes()
|
||||
|
||||
# assert that None/no error is returned
|
||||
assert not handler.check_DATA(envelope=env)
|
||||
|
||||
|
||||
def test_check_armored_payload():
|
||||
prefix = "-----BEGIN PGP MESSAGE-----\r\n"
|
||||
comment = "Version: ProtonMail\r\n"
|
||||
payload = """\r
|
||||
wU4DSqFx0d1yqAoSAQdAYkX/ZN/Az4B0k7X47zKyWrXxlDEdS3WOy0Yf2+GJTFgg\r
|
||||
Zk5ql0mLG8Ze+ZifCS0XMO4otlemSyJ0K1ZPdFMGzUDBTgNqzkFabxXoXRIBB0AM\r
|
||||
755wlX41X6Ay3KhnwBq7yEqSykVH6F3x11iHPKraLCAGZoaS8bKKNy/zg5slda1X\r
|
||||
pt14b4aC1VwtSnYhcRRELNLD/wE2TFif+g7poMmFY50VyMPLYjVP96Z5QCT4+z4H\r
|
||||
Ikh/pRRN8S3JNMrRJHc6prooSJmLcx47Y5un7VFy390MsJ+LiUJuQMDdYWRAinfs\r
|
||||
Ebm89Ezjm7F03qbFPXE0X4ZNzVXS/eKO0uhJQdiov/vmbn41rNtHmNpqjaO0vi5+\r
|
||||
sS9tR7yDUrIXiCUCN78eBLVioxtktsPZm5cDORbQWzv+7nmCEz9/JowCUcBVdCGn\r
|
||||
1ofOaH82JCAX/cRx08pLaDNj6iolVBsi56Dd+2bGxJOZOG2AMcEyz0pXY0dOAJCD\r
|
||||
iUThcQeGIdRnU3j8UBcnIEsjLu2+C+rrwMZQESMWKnJ0rnqTk0pK5kXScr6F/L0L\r
|
||||
UE49ccIexNm3xZvYr5drszr6wz3Tv5fdue87P4etBt90gF/Vzknck+g1LLlkzZkp\r
|
||||
d8dI0k2tOSPjUbDPnSy1x+X73WGpPZmj0kWT+RGvq0nH6UkJj3AQTG2qf1T8jK+3\r
|
||||
rTp3LR9vDkMwDjX4R8SA9c0wdnUzzr79OYQC9lTnzcx+fM6BBmgQ2GrS33jaFLp7\r
|
||||
L6/DFpCl5zhnPjM/2dKvMkw/Kd6XS/vjwsO405FQdjSDiQEEAZA+ZvAfcjdccbbU\r
|
||||
yCO+x0QNdeBsufDVnh3xvzuWy4CICdTQT4s1AWRPCzjOj+SGmx5WqCLWfsd8Ma0+\r
|
||||
w/C7SfTYu1FDQILLM+llpq1M/9GPley4QZ8JQjo262AyPXsPF/OW48uuZz0Db1xT\r
|
||||
Yh4iHBztj4VSdy7l2+IyaIf7cnL4EEBFxv/MwmVDXvDlxyvfAfIsd3D9SvJESzKZ\r
|
||||
VWDYwaocgeCN+ojKu1p885lu1EfRbX3fr3YO02K5/c2JYDkc0Py0W3wUP/J1XUax\r
|
||||
pbKpzwlkxEgtmzsGqsOfMJqBV3TNDrOA2uBsa+uBqP5MGYLZ49S/4v/bW9I01Cr1\r
|
||||
D2ZkV510Y1Vgo66WlP8mRqOTyt/5WRhPD+MxXdk67BNN/PmO6tMlVoJDuk+XwWPR\r
|
||||
t2TvNaND/yabT9eYI55Og4fzKD6RIjouUX8DvKLkm+7aXxVs2uuLQ3Jco3O82z55\r
|
||||
dbShU1jYsrw9oouXUz06MHPbkdhNbF/2hfhZ2qA31sNeovJw65iUv7sDKX3LVWgJ\r
|
||||
10jlywcDwqlU8CO7WC9lGixYTbnOkYZpXCGEl8e6Jbs79l42YFo4ogYpFK1NXFhV\r
|
||||
kOXRmDf/wmfj+c/ld3L2PkvwlgofhCudOQknZbo3ub1gjiTn7L+lMGHIj/3suMIl\r
|
||||
ID4EUxAXScIM1ZEz2fjtW5jATlqYcLjLTbf/olw6HFyPNH+9IssqXeZNKnGwPUB9\r
|
||||
3lTXsg0tpzl+x7F/2WjEw1DSNhjC0KnHt1vEYNMkUGDGFdN9y3ERLqX/FIgiASUb\r
|
||||
bTvAVupnAK3raBezGmhrs6LsQtLS9P0VvQiLU3uDhMqw8Z4SISLpcD+NnVBHzQqm\r
|
||||
6W5Qn/8xsCL6av18yUVTi2G3igt3QCNoYx9evt2ZcIkNoyyagUVjfZe5GHXh8Dnz\r
|
||||
GaBXW/hg3HlXLRGaQu4RYCzBMJILcO25OhZOg6jbkCLiEexQlm2e9krB5cXR49Al\r
|
||||
UN4fiB0KR9JyG2ayUdNJVkXZSZLnHyRgiaadlpUo16LVvw==\r
|
||||
=b5Kp\r
|
||||
-----END PGP MESSAGE-----\r
|
||||
\r
|
||||
\r
|
||||
"""
|
||||
|
||||
commented_payload = prefix + comment + payload
|
||||
assert check_armored_payload(commented_payload, outgoing=False) == True
|
||||
assert check_armored_payload(commented_payload, outgoing=True) == False
|
||||
|
||||
payload = prefix + payload
|
||||
assert check_armored_payload(payload, outgoing=False) == True
|
||||
assert check_armored_payload(payload, outgoing=True) == True
|
||||
|
||||
payload = payload.removesuffix("\r\n")
|
||||
assert check_armored_payload(payload, outgoing=False) == True
|
||||
assert check_armored_payload(payload, outgoing=True) == True
|
||||
|
||||
payload = payload.removesuffix("\r\n")
|
||||
assert check_armored_payload(payload, outgoing=False) == True
|
||||
assert check_armored_payload(payload, outgoing=True) == True
|
||||
|
||||
payload = payload.removesuffix("\r\n")
|
||||
assert check_armored_payload(payload, outgoing=False) == True
|
||||
assert check_armored_payload(payload, outgoing=True) == True
|
||||
|
||||
payload = """-----BEGIN PGP MESSAGE-----\r
|
||||
\r
|
||||
HELLOWORLD
|
||||
-----END PGP MESSAGE-----\r
|
||||
\r
|
||||
"""
|
||||
assert check_armored_payload(payload, outgoing=False) == False
|
||||
assert check_armored_payload(payload, outgoing=True) == False
|
||||
|
||||
payload = """-----BEGIN PGP MESSAGE-----\r
|
||||
\r
|
||||
=njUN
|
||||
-----END PGP MESSAGE-----\r
|
||||
\r
|
||||
"""
|
||||
assert check_armored_payload(payload, outgoing=False) == False
|
||||
assert check_armored_payload(payload, outgoing=True) == False
|
||||
|
||||
# Test payload using partial body length
|
||||
# as generated by GopenPGP.
|
||||
payload = """-----BEGIN PGP MESSAGE-----\r
|
||||
\r
|
||||
wV4DdCVjRfOT3TQSAQdAY5+pjT6mlCxPGdR3be4w7oJJRUGIPI/Vnh+mJxGSm34w\r
|
||||
LNlVc89S1g22uQYFif2sUJsQWbpoHpNkuWpkSgOaHmNvrZiY/YU5iv+cZ3LbmtUG\r
|
||||
0uoBisSHh9O1c+5sYZSbrvYZ1NOwlD7Fv/U5/Mw4E5+CjxfdgNGp5o3DDddzPK78\r
|
||||
jseDhdSXxnaiIJC93hxNX6R1RPt3G2gukyzx69wciPQShcF8zf3W3o75Ed7B8etV\r
|
||||
QEeB16xzdFhKa9JxdjTu3osgCs21IO7wpcFkjc7nZzlW6jPnELJJaNmv4yOOCjMp\r
|
||||
6YAkaN/BkL+jHTznHDuDsT5ilnTXpwHDU1Cm9PIx/KFcNCQnIB+2DcdIHPHUH1ci\r
|
||||
jvqoeXAVWjKXEjS7PqPFuP/xGbrWG2ugs+toXJOKbgRkExvKs1dwPFKrgghvCVbW\r
|
||||
AcKejQKAPArLwpkA7aD875TZQShvGt74fNs45XBlGOYOnNOAJ1KAmzrXLIDViyyB\r
|
||||
kDsmTBk785xofuCkjBpXSe6vsMprPzCteDfaUibh8FHeJjucxPerwuOPEmnogNaf\r
|
||||
YyL4+iy8H8I9/p7pmUqILprxTG0jTOtlk0bTVzeiF56W1xbtSEMuOo4oFbQTyOM2\r
|
||||
bKXaYo774Jm+rRtKAnnI2dtf9RpK19cog6YNzfYjesLKbXDsPZbN5rmwyFiCvvxC\r
|
||||
kQ6JLob+B2fPdY2gzy7LypxktS8Zi1HJcWDHJGVmQodaDLqKUObb4M26bXDe6oxI\r
|
||||
NS8PJz5exVbM3KhZnUOEn6PJRBBf5a/ZqxlhZPcQo/oBuhKpBRpO5kSDwPIUByu3\r
|
||||
UlXLSkpMqe9pUarAOEuQjfl2RVY7U+RrQYp4YP5keMO+i8NCefAFbowTTufO1JIq\r
|
||||
2nVgCi/QVnxZyEc9OYt/8AE3g4cdojE+vsSDifZLSWYIetpfrohHv3dT3StD1QRG\r
|
||||
0QE6qq6oKpg/IL0cjvuX4c7a7bslv2fXp8t75y37RU6253qdIebhxc/cRhPbc/yu\r
|
||||
p0YLyD4SrvKTLP2ZV95jT4IPEpqm4AN3QmiOzdtqR2gLyb62L8QfqI/FdwsIiRiM\r
|
||||
hqydwoqt/lfSqG1WKPh+6EkMkH+TDiCC1BQdbN1MNcyUtcjb35PR2c8Ld2TF3guA\r
|
||||
jLIqMt/Vb7hBoMb2FcsOYY25ka9oV62OwgKWLXnFzk+modMR5fzb4kxVVAYEqP+D\r
|
||||
T5KO1Vs76v1fyPGOq6BbBCvLwTqe/e6IZInJles4v5jrhnLcGKmNGivCUDe6X6NY\r
|
||||
UKNt5RsZllwDQpaAb5dMNhyrk8SgIE7TBI7rvqIdUCE52Vy+0JDxFg5olRpFUfO6\r
|
||||
/MyTW3Yo/ekk/npHr7iYYqJTCc21bDGLWQcIo/XO7WPxrKNWGBNPFnkRdw0MaKr4\r
|
||||
+cEM3V8NFnSEpC12xA+RX/CezuJtwXZK5MpG76eYqMO6qyC+c25YcFecEufDZDxx\r
|
||||
ZLqRszVRyxyWPtk/oIeQK2v9wOqY6N9/ff01gHz69vqYqN5bUw/QKZsmx1zW+gPw\r
|
||||
6x2tDK2BHeYl182gCbhlKISRFwCtbjqZSkiKWao/VtygHkw0fK34avJuyQ/X9YaN\r
|
||||
BRy+7Lf3VA53pnB5WJ1xwRXN8VDvmZeXzv2krHveCMemj0OjnRoCLu117xN0A5m9\r
|
||||
Fm/RoDix5PolDHtWTtr2m1n2hp2LHnj8at9lFEd0SKhAYHVL9KjzycwWODZRXt+x\r
|
||||
zGDDuooEeTvdY5NLyKcl4gETz1ZP4Ez5jGGjhPSwSpq1mU7UaJ9ZXXdr4KHyifW6\r
|
||||
ggNzNsGhXTap7IWZpTtqXABydfiBshmH2NjqtNDwBweJVSgP10+r0WhMWlaZs6xl\r
|
||||
V3o5yskJt6GlkwpJxZrTvN6Tiww/eW7HFV6NGf7IRSWY5tJc/iA7/92tOmkdvJ1q\r
|
||||
myLbG7cJB787QjplEyVe2P/JBO6xYvbkJLf9Q+HaviTO25rugRSrYsoKMDfO8VlQ\r
|
||||
1CcnTPVtApPZJEQzAWJEgVAM8uIlkqWJJMgyWT34sTkdBeCUFGloXQFs9Yxd0AGf\r
|
||||
/zHEkYZSTKpVSvAIGu4=\r
|
||||
=6iHb\r
|
||||
-----END PGP MESSAGE-----\r
|
||||
"""
|
||||
assert check_armored_payload(payload, outgoing=False) == True
|
||||
assert check_armored_payload(payload, outgoing=True) == True
|
||||
@@ -19,7 +19,7 @@ class User:
|
||||
|
||||
@property
|
||||
def can_track(self):
|
||||
return "@" in self.addr
|
||||
return "@" in self.addr and not self.addr.startswith("echo@")
|
||||
|
||||
def get_userdb_dict(self):
|
||||
"""Return a non-empty dovecot 'userdb' style dict
|
||||
@@ -55,9 +55,11 @@ class User:
|
||||
try:
|
||||
write_bytes_atomic(self.password_path, password)
|
||||
except PermissionError:
|
||||
logging.error(f"could not write password for: {self.addr}")
|
||||
raise
|
||||
self.enforce_E2EE_path.touch()
|
||||
if not self.addr.startswith("echo@"):
|
||||
logging.error(f"could not write password for: {self.addr}")
|
||||
raise
|
||||
if not self.addr.startswith("echo@"):
|
||||
self.enforce_E2EE_path.touch()
|
||||
|
||||
def set_last_login_timestamp(self, timestamp):
|
||||
"""Track login time with daily granularity
|
||||
|
||||
94
cliff.toml
94
cliff.toml
@@ -1,94 +0,0 @@
|
||||
# git-cliff ~ configuration file
|
||||
# https://git-cliff.org/docs/configuration
|
||||
|
||||
|
||||
[changelog]
|
||||
# A Tera template to be rendered for each release in the changelog.
|
||||
# See https://keats.github.io/tera/docs/#introduction
|
||||
body = """
|
||||
{% if version %}\
|
||||
## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }}
|
||||
{% else %}\
|
||||
## [unreleased]
|
||||
{% endif %}\
|
||||
{% for group, commits in commits | group_by(attribute="group") %}
|
||||
### {{ group | striptags | trim | upper_first }}
|
||||
{% for commit in commits %}
|
||||
- {% if commit.scope %}*({{ commit.scope }})* {% endif %}\
|
||||
{% if commit.breaking %}[**breaking**] {% endif %}\
|
||||
{{ commit.message | upper_first }}\
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
"""
|
||||
# Remove leading and trailing whitespaces from the changelog's body.
|
||||
trim = true
|
||||
# Render body even when there are no releases to process.
|
||||
render_always = true
|
||||
# An array of regex based postprocessors to modify the changelog.
|
||||
postprocessors = [
|
||||
# Replace the placeholder <REPO> with a URL.
|
||||
#{ pattern = '<REPO>', replace = "https://github.com/orhun/git-cliff" },
|
||||
]
|
||||
# render body even when there are no releases to process
|
||||
# render_always = true
|
||||
# output file path
|
||||
# output = "test.md"
|
||||
|
||||
[git]
|
||||
# Parse commits according to the conventional commits specification.
|
||||
# See https://www.conventionalcommits.org
|
||||
conventional_commits = true
|
||||
# Exclude commits that do not match the conventional commits specification.
|
||||
filter_unconventional = true
|
||||
# Require all commits to be conventional.
|
||||
# Takes precedence over filter_unconventional.
|
||||
require_conventional = false
|
||||
# Split commits on newlines, treating each line as an individual commit.
|
||||
split_commits = false
|
||||
# An array of regex based parsers to modify commit messages prior to further processing.
|
||||
commit_preprocessors = [
|
||||
# Replace issue numbers with link templates to be updated in `changelog.postprocessors`.
|
||||
#{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](<REPO>/issues/${2}))"},
|
||||
# Check spelling of the commit message using https://github.com/crate-ci/typos.
|
||||
# If the spelling is incorrect, it will be fixed automatically.
|
||||
#{ pattern = '.*', replace_command = 'typos --write-changes -' },
|
||||
]
|
||||
# Prevent commits that are breaking from being excluded by commit parsers.
|
||||
protect_breaking_commits = false
|
||||
# An array of regex based parsers for extracting data from the commit message.
|
||||
# Assigns commits to groups.
|
||||
# Optionally sets the commit's scope and can decide to exclude commits from further processing.
|
||||
commit_parsers = [
|
||||
{ message = "^feat", group = "Features" },
|
||||
{ message = "^fix", group = "Bug Fixes" },
|
||||
{ message = "^docs", group = "Documentation" },
|
||||
{ message = "^perf", group = "Performance" },
|
||||
{ message = "^refactor", group = "Refactor" },
|
||||
{ message = "^style", group = "Styling" },
|
||||
{ message = "^test", group = "Testing" },
|
||||
{ message = "^chore\\(release\\): prepare for", skip = true },
|
||||
{ message = "^chore\\(deps.*\\)", skip = true },
|
||||
{ message = "^chore\\(pr\\)", skip = true },
|
||||
{ message = "^chore\\(pull\\)", skip = true },
|
||||
{ message = "^chore|^ci", group = "Miscellaneous Tasks" },
|
||||
{ body = ".*security", group = "Security" },
|
||||
{ message = "^revert", group = "Revert" },
|
||||
{ message = ".*", group = "Other" },
|
||||
]
|
||||
# Exclude commits that are not matched by any commit parser.
|
||||
filter_commits = false
|
||||
# Fail on a commit that is not matched by any commit parser.
|
||||
fail_on_unmatched_commit = false
|
||||
# An array of link parsers for extracting external references, and turning them into URLs, using regex.
|
||||
link_parsers = []
|
||||
# Include only the tags that belong to the current branch.
|
||||
use_branch_tags = false
|
||||
# Order releases topologically instead of chronologically.
|
||||
topo_order = false
|
||||
# Order commits topologically instead of chronologically.
|
||||
topo_order_commits = true
|
||||
# Order of commits in each group/release within the changelog.
|
||||
# Allowed values: newest, oldest
|
||||
sort_commits = "oldest"
|
||||
# Process submodules commits
|
||||
recurse_submodules = false
|
||||
@@ -61,19 +61,6 @@ class AcmetoolDeployer(Deployer):
|
||||
mode="644",
|
||||
)
|
||||
|
||||
server.shell(
|
||||
name=f"Remove old acmetool desired files for {self.domains[0]}",
|
||||
commands=[f"rm -f /var/lib/acme/desired/{self.domains[0]}-*"],
|
||||
)
|
||||
files.template(
|
||||
src=importlib.resources.files(__package__).joinpath("desired.yaml.j2"),
|
||||
dest=f"/var/lib/acme/desired/{self.domains[0]}", # 0 is mailhost TLD
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
domains=self.domains,
|
||||
)
|
||||
|
||||
service_file = files.put(
|
||||
src=importlib.resources.files(__package__).joinpath(
|
||||
"acmetool-redirector.service"
|
||||
@@ -136,6 +123,6 @@ class AcmetoolDeployer(Deployer):
|
||||
self.need_restart_reconcile_timer = False
|
||||
|
||||
server.shell(
|
||||
name=f"Reconcile certificates for: {', '.join(self.domains)}",
|
||||
commands=["acmetool --batch --xlog.severity=debug reconcile"],
|
||||
name=f"Request certificate for: {', '.join(self.domains)}",
|
||||
commands=[f"acmetool want --xlog.severity=debug {' '.join(self.domains)}"],
|
||||
)
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
satisfy:
|
||||
names:
|
||||
{%- for domain in domains %}
|
||||
- {{ domain }}
|
||||
{%- endfor %}
|
||||
|
||||
@@ -5,11 +5,6 @@ import os
|
||||
from pyinfra.operations import files, server, systemd
|
||||
|
||||
|
||||
def has_systemd():
|
||||
"""Returns False during Docker image builds or any other non-systemd environment."""
|
||||
return os.path.isdir("/run/systemd/system")
|
||||
|
||||
|
||||
def get_resource(arg, pkg=__package__):
|
||||
return importlib.resources.files(pkg).joinpath(arg)
|
||||
|
||||
@@ -22,8 +17,9 @@ def configure_remote_units(mail_domain, units) -> None:
|
||||
|
||||
# install systemd units
|
||||
for fn in units:
|
||||
execpath = fn if fn != "filtermail-incoming" else "filtermail"
|
||||
params = dict(
|
||||
execpath=f"{remote_venv_dir}/bin/{fn}",
|
||||
execpath=f"{remote_venv_dir}/bin/{execpath}",
|
||||
config_path=remote_chatmail_inipath,
|
||||
remote_venv_dir=remote_venv_dir,
|
||||
mail_domain=mail_domain,
|
||||
|
||||
@@ -71,11 +71,6 @@ def run_cmd_options(parser):
|
||||
action="store_true",
|
||||
help="install/upgrade the server, but disable postfix & dovecot for now",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--website-only",
|
||||
action="store_true",
|
||||
help="only update/deploy the website, skipping full server upgrade/deployment, useful when you only changed/updated the web pages and don't need to re-run a full server upgrade",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--skip-dns-check",
|
||||
dest="dns_check_disabled",
|
||||
@@ -98,19 +93,13 @@ def run_cmd(args, out):
|
||||
|
||||
env = os.environ.copy()
|
||||
env["CHATMAIL_INI"] = args.inipath
|
||||
env["CHATMAIL_WEBSITE_ONLY"] = "True" if args.website_only else ""
|
||||
env["CHATMAIL_DISABLE_MAIL"] = "True" if args.disable_mail else ""
|
||||
env["CHATMAIL_REQUIRE_IROH"] = "True" if require_iroh else ""
|
||||
if not args.dns_check_disabled:
|
||||
env["CHATMAIL_ADDR_V4"] = remote_data.get("A") or ""
|
||||
env["CHATMAIL_ADDR_V6"] = remote_data.get("AAAA") or ""
|
||||
deploy_path = importlib.resources.files(__package__).joinpath("run.py").resolve()
|
||||
pyinf = "pyinfra --dry" if args.dry_run else "pyinfra"
|
||||
|
||||
cmd = f"{pyinf} --ssh-user root {ssh_host} {deploy_path} -y"
|
||||
if ssh_host in ["localhost", "@docker"]:
|
||||
if ssh_host == "@docker":
|
||||
env["CHATMAIL_DOCKER"] = "True"
|
||||
cmd = f"{pyinf} @local {deploy_path} -y"
|
||||
|
||||
if version.parse(pyinfra.__version__) < version.parse("3"):
|
||||
@@ -119,14 +108,9 @@ def run_cmd(args, out):
|
||||
|
||||
try:
|
||||
retcode = out.check_call(cmd, env=env)
|
||||
if args.website_only:
|
||||
if retcode == 0:
|
||||
out.green("Website deployment completed.")
|
||||
else:
|
||||
out.red("Website deployment failed.")
|
||||
elif retcode == 0:
|
||||
if retcode == 0:
|
||||
out.green("Deploy completed, call `cmdeploy dns` next.")
|
||||
elif not args.dns_check_disabled and not remote_data["acme_account_url"]:
|
||||
elif not remote_data["acme_account_url"]:
|
||||
out.red("Deploy completed but letsencrypt not configured")
|
||||
out.red("Run 'cmdeploy run' again")
|
||||
retcode = 0
|
||||
|
||||
@@ -10,7 +10,6 @@ from pathlib import Path
|
||||
|
||||
from chatmaild.config import read_config
|
||||
from pyinfra import facts, host, logger
|
||||
from pyinfra.facts import hardware
|
||||
from pyinfra.api import FactBase
|
||||
from pyinfra.facts.files import Sha256File
|
||||
from pyinfra.facts.systemd import SystemdEnabled
|
||||
@@ -25,10 +24,8 @@ from .basedeploy import (
|
||||
activate_remote_units,
|
||||
configure_remote_units,
|
||||
get_resource,
|
||||
has_systemd,
|
||||
)
|
||||
from .dovecot.deployer import DovecotDeployer
|
||||
from .filtermail.deployer import FiltermailDeployer
|
||||
from .mtail.deployer import MtailDeployer
|
||||
from .nginx.deployer import NginxDeployer
|
||||
from .opendkim.deployer import OpendkimDeployer
|
||||
@@ -38,7 +35,7 @@ from .www import build_webpages, find_merge_conflict, get_paths
|
||||
|
||||
class Port(FactBase):
|
||||
"""
|
||||
Returns the process occupying a port.
|
||||
Returns the process occuping a port.
|
||||
"""
|
||||
|
||||
def command(self, port: int) -> str:
|
||||
@@ -66,8 +63,6 @@ def _build_chatmaild(dist_dir) -> None:
|
||||
|
||||
|
||||
def remove_legacy_artifacts():
|
||||
if not has_systemd():
|
||||
return
|
||||
# disable legacy doveauth-dictproxy.service
|
||||
if host.get_fact(SystemdEnabled).get("doveauth-dictproxy.service"):
|
||||
systemd.service(
|
||||
@@ -145,10 +140,6 @@ def _configure_remote_venv_with_chatmaild(config) -> None:
|
||||
|
||||
|
||||
class UnboundDeployer(Deployer):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.need_restart = False
|
||||
|
||||
def install(self):
|
||||
# Run local DNS resolver `unbound`.
|
||||
# `resolvconf` takes care of setting up /etc/resolv.conf
|
||||
@@ -185,27 +176,6 @@ class UnboundDeployer(Deployer):
|
||||
"unbound-anchor -a /var/lib/unbound/root.key || true",
|
||||
],
|
||||
)
|
||||
if self.config.disable_ipv6:
|
||||
files.directory(
|
||||
path="/etc/unbound/unbound.conf.d",
|
||||
present=True,
|
||||
user="root",
|
||||
group="root",
|
||||
mode="755",
|
||||
)
|
||||
conf = files.put(
|
||||
src=get_resource("unbound/unbound.conf.j2"),
|
||||
dest="/etc/unbound/unbound.conf.d/chatmail.conf",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
else:
|
||||
conf = files.file(
|
||||
path="/etc/unbound/unbound.conf.d/chatmail.conf",
|
||||
present=False,
|
||||
)
|
||||
self.need_restart |= conf.changed
|
||||
|
||||
def activate(self):
|
||||
server.shell(
|
||||
@@ -220,7 +190,6 @@ class UnboundDeployer(Deployer):
|
||||
service="unbound.service",
|
||||
running=True,
|
||||
enabled=True,
|
||||
restarted=self.need_restart,
|
||||
)
|
||||
|
||||
|
||||
@@ -302,7 +271,7 @@ class LegacyRemoveDeployer(Deployer):
|
||||
present=False,
|
||||
)
|
||||
# remove echobot if it is still running
|
||||
if has_systemd() and host.get_fact(SystemdEnabled).get("echobot.service"):
|
||||
if host.get_fact(SystemdEnabled).get("echobot.service"):
|
||||
systemd.service(
|
||||
name="Disable echobot.service",
|
||||
service="echobot.service",
|
||||
@@ -447,6 +416,8 @@ class ChatmailVenvDeployer(Deployer):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.units = (
|
||||
"filtermail",
|
||||
"filtermail-incoming",
|
||||
"chatmail-metadata",
|
||||
"lastlogin",
|
||||
"chatmail-expire",
|
||||
@@ -469,6 +440,7 @@ class ChatmailVenvDeployer(Deployer):
|
||||
class ChatmailDeployer(Deployer):
|
||||
required_users = [
|
||||
("vmail", "vmail", None),
|
||||
("echobot", None, None),
|
||||
("iroh", None, None),
|
||||
]
|
||||
|
||||
@@ -531,83 +503,63 @@ class GithashDeployer(Deployer):
|
||||
except Exception:
|
||||
git_diff = ""
|
||||
files.put(
|
||||
name="Upload chatmail relay git commit hash",
|
||||
name="Upload chatmail relay git commiit hash",
|
||||
src=StringIO(git_hash + git_diff),
|
||||
dest="/etc/chatmail-version",
|
||||
mode="700",
|
||||
)
|
||||
|
||||
|
||||
def deploy_chatmail(config_path: Path, disable_mail: bool, website_only: bool, docker: bool) -> None:
|
||||
def deploy_chatmail(config_path: Path, disable_mail: bool) -> None:
|
||||
"""Deploy a chat-mail instance.
|
||||
|
||||
:param config_path: path to chatmail.ini
|
||||
:param disable_mail: whether to disable postfix & dovecot
|
||||
:param website_only: if True, only deploy the website
|
||||
:param docker: whether it is running in a docker container
|
||||
"""
|
||||
config = read_config(config_path)
|
||||
check_config(config)
|
||||
mail_domain = config.mail_domain
|
||||
|
||||
if website_only:
|
||||
Deployment().perform_stages([WebsiteDeployer(config)])
|
||||
return
|
||||
|
||||
if host.get_fact(Port, port=53) != "unbound":
|
||||
files.line(
|
||||
name="Add 9.9.9.9 to resolv.conf",
|
||||
path="/etc/resolv.conf",
|
||||
# Guard against resolv.conf missing a trailing newline (SolusVM bug).
|
||||
line="\nnameserver 9.9.9.9",
|
||||
line="nameserver 9.9.9.9",
|
||||
)
|
||||
|
||||
# Check if mtail_address interface is available (if configured)
|
||||
if config.mtail_address and config.mtail_address not in ('127.0.0.1', '::1', 'localhost'):
|
||||
ipv4_addrs = host.get_fact(hardware.Ipv4Addrs)
|
||||
all_addresses = [addr for addrs in ipv4_addrs.values() for addr in addrs]
|
||||
if config.mtail_address not in all_addresses:
|
||||
Out().red(f"Deploy failed: mtail_address {config.mtail_address} is not available (VPN up?).\n")
|
||||
exit(1)
|
||||
|
||||
if not docker:
|
||||
port_services = [
|
||||
(["master", "smtpd"], 25),
|
||||
("unbound", 53),
|
||||
("acmetool", 80),
|
||||
(["imap-login", "dovecot"], 143),
|
||||
("nginx", 443),
|
||||
(["master", "smtpd"], 465),
|
||||
(["master", "smtpd"], 587),
|
||||
(["imap-login", "dovecot"], 993),
|
||||
("iroh-relay", 3340),
|
||||
("mtail", 3903),
|
||||
("stats", 3904),
|
||||
("nginx", 8443),
|
||||
(["master", "smtpd"], config.postfix_reinject_port),
|
||||
(["master", "smtpd"], config.postfix_reinject_port_incoming),
|
||||
("filtermail", config.filtermail_smtp_port),
|
||||
("filtermail", config.filtermail_smtp_port_incoming),
|
||||
]
|
||||
for service, port in port_services:
|
||||
print(f"Checking if port {port} is available for {service}...")
|
||||
running_service = host.get_fact(Port, port=port)
|
||||
services = [service] if isinstance(service, str) else service
|
||||
if running_service:
|
||||
if running_service not in services:
|
||||
Out().red(
|
||||
f"Deploy failed: port {port} is occupied by: {running_service}"
|
||||
)
|
||||
exit(1)
|
||||
port_services = [
|
||||
(["master", "smtpd"], 25),
|
||||
("unbound", 53),
|
||||
("acmetool", 80),
|
||||
(["imap-login", "dovecot"], 143),
|
||||
("nginx", 443),
|
||||
(["master", "smtpd"], 465),
|
||||
(["master", "smtpd"], 587),
|
||||
(["imap-login", "dovecot"], 993),
|
||||
("iroh-relay", 3340),
|
||||
("nginx", 8443),
|
||||
(["master", "smtpd"], config.postfix_reinject_port),
|
||||
(["master", "smtpd"], config.postfix_reinject_port_incoming),
|
||||
("filtermail", config.filtermail_smtp_port),
|
||||
("filtermail", config.filtermail_smtp_port_incoming),
|
||||
]
|
||||
for service, port in port_services:
|
||||
print(f"Checking if port {port} is available for {service}...")
|
||||
running_service = host.get_fact(Port, port=port)
|
||||
if running_service:
|
||||
if running_service not in service:
|
||||
Out().red(
|
||||
f"Deploy failed: port {port} is occupied by: {running_service}"
|
||||
)
|
||||
exit(1)
|
||||
|
||||
tls_domains = [mail_domain, f"mta-sts.{mail_domain}", f"www.{mail_domain}"]
|
||||
|
||||
all_deployers = [
|
||||
ChatmailDeployer(mail_domain),
|
||||
LegacyRemoveDeployer(),
|
||||
FiltermailDeployer(),
|
||||
JournaldDeployer(),
|
||||
UnboundDeployer(config),
|
||||
UnboundDeployer(),
|
||||
TurnDeployer(mail_domain),
|
||||
IrohDeployer(config.enable_iroh_relay),
|
||||
AcmetoolDeployer(config.acme_email, tls_domains),
|
||||
|
||||
@@ -4,7 +4,7 @@ iterate_prefix = userdb/
|
||||
|
||||
default_pass_scheme = plain
|
||||
# %E escapes characters " (double quote), ' (single quote) and \ (backslash) with \ (backslash).
|
||||
# See <https://doc.dovecot.org/2.3/configuration_manual/config_file/config_variables/#modifiers>
|
||||
# See <https://doc.dovecot.org/configuration_manual/config_file/config_variables/#modifiers>
|
||||
# for documentation.
|
||||
#
|
||||
# We escape user-provided input and use double quote as a separator.
|
||||
|
||||
@@ -9,13 +9,10 @@ from cmdeploy.basedeploy import (
|
||||
activate_remote_units,
|
||||
configure_remote_units,
|
||||
get_resource,
|
||||
has_systemd,
|
||||
)
|
||||
|
||||
|
||||
class DovecotDeployer(Deployer):
|
||||
daemon_reload = False
|
||||
|
||||
def __init__(self, config, disable_mail):
|
||||
self.config = config
|
||||
self.disable_mail = disable_mail
|
||||
@@ -23,15 +20,14 @@ class DovecotDeployer(Deployer):
|
||||
|
||||
def install(self):
|
||||
arch = host.get_fact(Arch)
|
||||
if has_systemd() and "dovecot.service" in host.get_fact(SystemdEnabled):
|
||||
return # already installed and running
|
||||
_install_dovecot_package("core", arch)
|
||||
_install_dovecot_package("imapd", arch)
|
||||
_install_dovecot_package("lmtpd", arch)
|
||||
if not "dovecot.service" in host.get_fact(SystemdEnabled):
|
||||
_install_dovecot_package("core", arch)
|
||||
_install_dovecot_package("imapd", arch)
|
||||
_install_dovecot_package("lmtpd", arch)
|
||||
|
||||
def configure(self):
|
||||
configure_remote_units(self.config.mail_domain, self.units)
|
||||
self.need_restart, self.daemon_reload = _configure_dovecot(self.config)
|
||||
self.need_restart = _configure_dovecot(self.config)
|
||||
|
||||
def activate(self):
|
||||
activate_remote_units(self.units)
|
||||
@@ -39,12 +35,13 @@ class DovecotDeployer(Deployer):
|
||||
restart = False if self.disable_mail else self.need_restart
|
||||
|
||||
systemd.service(
|
||||
name="Disable dovecot for now" if self.disable_mail else "Start and enable Dovecot",
|
||||
name="disable dovecot for now"
|
||||
if self.disable_mail
|
||||
else "Start and enable Dovecot",
|
||||
service="dovecot.service",
|
||||
running=False if self.disable_mail else True,
|
||||
enabled=False if self.disable_mail else True,
|
||||
restarted=restart,
|
||||
daemon_reload=self.daemon_reload,
|
||||
)
|
||||
self.need_restart = False
|
||||
|
||||
@@ -83,10 +80,9 @@ def _install_dovecot_package(package: str, arch: str):
|
||||
apt.deb(name=f"Install dovecot-{package}", src=deb_filename)
|
||||
|
||||
|
||||
def _configure_dovecot(config: Config, debug: bool = False) -> (bool, bool):
|
||||
def _configure_dovecot(config: Config, debug: bool = False) -> bool:
|
||||
"""Configures Dovecot IMAP server."""
|
||||
need_restart = False
|
||||
daemon_reload = False
|
||||
|
||||
main_config = files.template(
|
||||
src=get_resource("dovecot/dovecot.conf.j2"),
|
||||
@@ -116,21 +112,20 @@ def _configure_dovecot(config: Config, debug: bool = False) -> (bool, bool):
|
||||
)
|
||||
need_restart |= lua_push_notification_script.changed
|
||||
|
||||
# as per https://doc.dovecot.org/2.3/configuration_manual/os/
|
||||
# as per https://doc.dovecot.org/configuration_manual/os/
|
||||
# it is recommended to set the following inotify limits
|
||||
if config.change_kernel_settings:
|
||||
for name in ("max_user_instances", "max_user_watches"):
|
||||
key = f"fs.inotify.{name}"
|
||||
if host.get_fact(Sysctl)[key] > 65535:
|
||||
# Skip updating limits if already sufficient
|
||||
# (enables running in incus containers where sysctl readonly)
|
||||
continue
|
||||
server.sysctl(
|
||||
name=f"Change {key}",
|
||||
key=key,
|
||||
value=65535,
|
||||
persist=True,
|
||||
)
|
||||
for name in ("max_user_instances", "max_user_watches"):
|
||||
key = f"fs.inotify.{name}"
|
||||
if host.get_fact(Sysctl)[key] > 65535:
|
||||
# Skip updating limits if already sufficient
|
||||
# (enables running in incus containers where sysctl readonly)
|
||||
continue
|
||||
server.sysctl(
|
||||
name=f"Change {key}",
|
||||
key=key,
|
||||
value=65535,
|
||||
persist=True,
|
||||
)
|
||||
|
||||
timezone_env = files.line(
|
||||
name="Set TZ environment variable",
|
||||
@@ -139,18 +134,4 @@ def _configure_dovecot(config: Config, debug: bool = False) -> (bool, bool):
|
||||
)
|
||||
need_restart |= timezone_env.changed
|
||||
|
||||
restart_conf = files.put(
|
||||
name="dovecot: restart automatically on failure",
|
||||
src=get_resource("service/10_restart.conf"),
|
||||
dest="/etc/systemd/system/dovecot.service.d/10_restart.conf",
|
||||
)
|
||||
daemon_reload |= restart_conf.changed
|
||||
|
||||
# Validate dovecot configuration before restart
|
||||
if need_restart:
|
||||
server.shell(
|
||||
name="Validate dovecot configuration",
|
||||
commands=["doveconf -n >/dev/null"],
|
||||
)
|
||||
|
||||
return need_restart, daemon_reload
|
||||
return need_restart
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
## Dovecot configuration file
|
||||
|
||||
{% if disable_ipv6 %}
|
||||
listen = 0.0.0.0
|
||||
listen = *
|
||||
{% endif %}
|
||||
|
||||
protocols = imap lmtp
|
||||
@@ -26,7 +26,7 @@ default_client_limit = 20000
|
||||
# Increase number of logged in IMAP connections.
|
||||
# Each connection is handled by a separate `imap` process.
|
||||
# `imap` process should have `client_limit=1` as described in
|
||||
# <https://doc.dovecot.org/2.3/configuration_manual/service_configuration/#service-limits>
|
||||
# <https://doc.dovecot.org/configuration_manual/service_configuration/#service-limits>
|
||||
# so each logged in IMAP session will need its own `imap` process.
|
||||
#
|
||||
# If this limit is reached,
|
||||
@@ -44,11 +44,11 @@ mail_server_comment = Chatmail server
|
||||
|
||||
# `zlib` enables compressing messages stored in the maildir.
|
||||
# See
|
||||
# <https://doc.dovecot.org/2.3/configuration_manual/zlib_plugin/>
|
||||
# <https://doc.dovecot.org/configuration_manual/zlib_plugin/>
|
||||
# for documentation.
|
||||
#
|
||||
# quota plugin documentation:
|
||||
# <https://doc.dovecot.org/2.3/configuration_manual/quota_plugin/>
|
||||
# <https://doc.dovecot.org/configuration_manual/quota_plugin/>
|
||||
mail_plugins = zlib quota
|
||||
|
||||
imap_capability = +XDELTAPUSH XCHATMAIL
|
||||
@@ -113,7 +113,7 @@ mail_attribute_dict = proxy:/run/chatmail-metadata/metadata.socket:metadata
|
||||
# `imap_zlib` enables IMAP COMPRESS (RFC 4978).
|
||||
# <https://datatracker.ietf.org/doc/html/rfc4978.html>
|
||||
protocol imap {
|
||||
mail_plugins = $mail_plugins imap_quota last_login {% if config.imap_compress %}imap_zlib{% endif %}
|
||||
mail_plugins = $mail_plugins imap_zlib imap_quota last_login
|
||||
imap_metadata = yes
|
||||
}
|
||||
|
||||
@@ -125,13 +125,13 @@ plugin {
|
||||
|
||||
protocol lmtp {
|
||||
# notify plugin is a dependency of push_notification plugin:
|
||||
# <https://doc.dovecot.org/2.3/settings/plugin/notify-plugin/>
|
||||
# <https://doc.dovecot.org/settings/plugin/notify-plugin/>
|
||||
#
|
||||
# push_notification plugin documentation:
|
||||
# <https://doc.dovecot.org/2.3/configuration_manual/push_notification/>
|
||||
# <https://doc.dovecot.org/configuration_manual/push_notification/>
|
||||
#
|
||||
# mail_lua and push_notification_lua are needed for Lua push notification handler.
|
||||
# <https://doc.dovecot.org/2.3/configuration_manual/push_notification/#configuration>
|
||||
# <https://doc.dovecot.org/configuration_manual/push_notification/#configuration>
|
||||
mail_plugins = $mail_plugins mail_lua notify push_notification push_notification_lua
|
||||
}
|
||||
|
||||
@@ -154,7 +154,7 @@ plugin {
|
||||
|
||||
# push_notification configuration
|
||||
plugin {
|
||||
# <https://doc.dovecot.org/2.3/configuration_manual/push_notification/#lua-lua>
|
||||
# <https://doc.dovecot.org/configuration_manual/push_notification/#lua-lua>
|
||||
push_notification_driver = lua:file=/etc/dovecot/push_notification.lua
|
||||
}
|
||||
|
||||
@@ -168,8 +168,6 @@ service lmtp {
|
||||
}
|
||||
}
|
||||
|
||||
lmtp_add_received_header = no
|
||||
|
||||
service auth {
|
||||
unix_listener /var/spool/postfix/private/auth {
|
||||
mode = 0660
|
||||
@@ -254,181 +252,3 @@ protocol imap {
|
||||
rawlog_dir = %h
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
{% if not config.imap_compress %}
|
||||
# Hibernate IDLE users to save memory and CPU resources
|
||||
# NOTE: this will have no effect if imap_zlib plugin is used
|
||||
imap_hibernate_timeout = 30s
|
||||
service imap {
|
||||
# Note that this change will allow any process running as
|
||||
# $default_internal_user (dovecot) to access mails as any other user.
|
||||
# This may be insecure in some installations, which is why this isn't
|
||||
# done by default.
|
||||
unix_listener imap-master {
|
||||
user = $default_internal_user
|
||||
}
|
||||
}
|
||||
# The following is the default already in v2.3.1+:
|
||||
service imap {
|
||||
extra_groups = $default_internal_group
|
||||
}
|
||||
service imap-hibernate {
|
||||
unix_listener imap-hibernate {
|
||||
mode = 0660
|
||||
group = $default_internal_group
|
||||
}
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
{% if config.mtail_address %}
|
||||
#
|
||||
# Dovecot Statistics
|
||||
#
|
||||
# OpenMetrics endpoint at http://{{- config.mtail_address}}:3904/metrics
|
||||
service stats {
|
||||
inet_listener http {
|
||||
port = 3904
|
||||
address = {{- config.mtail_address}}
|
||||
}
|
||||
}
|
||||
|
||||
# IMAP Command Metrics
|
||||
# - Bytes in/out for compression efficiency analysis
|
||||
# - Lock wait time for contention debugging
|
||||
# - Grouped by command name and reply state
|
||||
metric imap_command {
|
||||
filter = event=imap_command_finished
|
||||
fields = bytes_in bytes_out lock_wait_usecs running_usecs
|
||||
group_by = cmd_name tagged_reply_state
|
||||
}
|
||||
|
||||
# Duration buckets for latency histograms (base 10: 10us, 100us, 1ms, 10ms, 100ms, 1s, 10s, 100s)
|
||||
metric imap_command_duration {
|
||||
filter = event=imap_command_finished
|
||||
group_by = cmd_name duration:exponential:1:8:10
|
||||
}
|
||||
|
||||
# Slow command outliers (>1 second = 1000000 usecs)
|
||||
# Useful for alerting without high cardinality
|
||||
metric imap_command_slow {
|
||||
filter = event=imap_command_finished AND duration>1000000 AND NOT cmd_name=IDLE
|
||||
group_by = cmd_name
|
||||
}
|
||||
|
||||
# IDLE-specific Metrics
|
||||
|
||||
metric imap_idle {
|
||||
filter = event=imap_command_finished AND cmd_name=IDLE
|
||||
fields = bytes_in bytes_out running_usecs
|
||||
group_by = tagged_reply_state
|
||||
}
|
||||
|
||||
metric imap_idle_duration {
|
||||
filter = event=imap_command_finished AND cmd_name=IDLE
|
||||
# Base 10: 100ms to 27h (covers short wakeups to long idle sessions)
|
||||
group_by = duration:exponential:5:11:10
|
||||
}
|
||||
|
||||
metric imap_idle_commands {
|
||||
filter = event=imap_command_finished AND cmd_name=IDLE
|
||||
group_by = tagged_reply_state
|
||||
}
|
||||
|
||||
metric imap_idle_failed {
|
||||
filter = event=imap_command_finished AND cmd_name=IDLE AND NOT tagged_reply_state=OK
|
||||
}
|
||||
|
||||
# Hibernation Metrics (requires imap_hibernate_timeout)
|
||||
|
||||
metric imap_hibernated {
|
||||
filter = event=imap_client_hibernated
|
||||
}
|
||||
|
||||
metric imap_hibernated_failed {
|
||||
filter = event=imap_client_hibernated AND error=*
|
||||
}
|
||||
|
||||
metric imap_unhibernated {
|
||||
filter = event=imap_client_unhibernated
|
||||
fields = hibernation_usecs
|
||||
}
|
||||
|
||||
metric imap_unhibernated_reason {
|
||||
filter = event=imap_client_unhibernated
|
||||
group_by = reason
|
||||
fields = hibernation_usecs
|
||||
}
|
||||
|
||||
metric imap_unhibernated_reason_sleep {
|
||||
filter = event=imap_client_unhibernated
|
||||
group_by = reason hibernation_usecs:exponential:4:8:10
|
||||
}
|
||||
|
||||
metric imap_unhibernated_failed {
|
||||
filter = event=imap_client_unhibernated AND error=*
|
||||
}
|
||||
|
||||
# Hibernation duration buckets (how long clients stayed hibernated)
|
||||
# Base 10: 100ms to 27h
|
||||
metric imap_hibernation_duration {
|
||||
filter = event=imap_client_unhibernated
|
||||
group_by = reason duration:exponential:5:11:10
|
||||
}
|
||||
|
||||
# Authentication / Login Metrics
|
||||
|
||||
metric auth_request {
|
||||
filter = event=auth_request_finished
|
||||
group_by = success
|
||||
}
|
||||
|
||||
metric auth_request_duration {
|
||||
filter = event=auth_request_finished
|
||||
group_by = success duration:exponential:2:6:10
|
||||
}
|
||||
|
||||
metric auth_failed {
|
||||
filter = event=auth_request_finished AND success=no
|
||||
}
|
||||
|
||||
# Passdb cache effectiveness
|
||||
metric auth_passdb {
|
||||
filter = event=auth_passdb_request_finished
|
||||
group_by = result cache
|
||||
}
|
||||
|
||||
# Master login (post-auth userdb lookup)
|
||||
metric auth_master_login {
|
||||
filter = event=auth_master_client_login_finished
|
||||
}
|
||||
|
||||
metric auth_master_login_failed {
|
||||
filter = event=auth_master_client_login_finished AND error=*
|
||||
}
|
||||
|
||||
# Mail Delivery (LMTP) - affects IDLE wakeup latency
|
||||
|
||||
metric mail_delivery {
|
||||
filter = event=mail_delivery_finished
|
||||
}
|
||||
|
||||
metric mail_delivery_duration {
|
||||
filter = event=mail_delivery_finished
|
||||
group_by = duration:exponential:3:7:10
|
||||
}
|
||||
|
||||
metric mail_delivery_failed {
|
||||
filter = event=mail_delivery_finished AND error=*
|
||||
}
|
||||
|
||||
# Connection Events
|
||||
|
||||
metric client_connected {
|
||||
filter = event=client_connection_connected AND category="service:imap"
|
||||
}
|
||||
|
||||
metric client_disconnected {
|
||||
filter = event=client_connection_disconnected AND category="service:imap"
|
||||
fields = bytes_in bytes_out
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
from pyinfra import facts, host
|
||||
from pyinfra.operations import files, systemd
|
||||
|
||||
from cmdeploy.basedeploy import Deployer, get_resource
|
||||
|
||||
|
||||
class FiltermailDeployer(Deployer):
|
||||
services = ["filtermail", "filtermail-incoming"]
|
||||
bin_path = "/usr/local/bin/filtermail"
|
||||
config_path = "/usr/local/lib/chatmaild/chatmail.ini"
|
||||
|
||||
def __init__(self):
|
||||
self.need_restart = False
|
||||
|
||||
def install(self):
|
||||
arch = host.get_fact(facts.server.Arch)
|
||||
url = f"https://github.com/chatmail/filtermail/releases/download/v0.3.0/filtermail-{arch}"
|
||||
sha256sum = {
|
||||
"x86_64": "f14a31323ae2dad3b59d3fdafcde507521da2f951a9478cd1f2fe2b4463df71d",
|
||||
"aarch64": "933770d75046c4fd7084ce8d43f905f8748333426ad839154f0fc654755ef09f",
|
||||
}[arch]
|
||||
self.need_restart |= files.download(
|
||||
name="Download filtermail",
|
||||
src=url,
|
||||
sha256sum=sha256sum,
|
||||
dest=self.bin_path,
|
||||
mode="755",
|
||||
).changed
|
||||
|
||||
def configure(self):
|
||||
for service in self.services:
|
||||
self.need_restart |= files.template(
|
||||
src=get_resource(f"filtermail/{service}.service.j2"),
|
||||
dest=f"/etc/systemd/system/{service}.service",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
bin_path=self.bin_path,
|
||||
config_path=self.config_path,
|
||||
).changed
|
||||
|
||||
def activate(self):
|
||||
for service in self.services:
|
||||
systemd.service(
|
||||
name=f"Start and enable {service}",
|
||||
service=f"{service}.service",
|
||||
running=True,
|
||||
enabled=True,
|
||||
restarted=self.need_restart,
|
||||
daemon_reload=True,
|
||||
)
|
||||
self.need_restart = False
|
||||
@@ -44,37 +44,21 @@ counter warning_count
|
||||
}
|
||||
|
||||
|
||||
counter filtered_outgoing_mail_count
|
||||
counter filtered_mail_count
|
||||
|
||||
counter outgoing_encrypted_mail_count
|
||||
/Outgoing: Filtering encrypted mail\./ {
|
||||
outgoing_encrypted_mail_count++
|
||||
filtered_outgoing_mail_count++
|
||||
counter encrypted_mail_count
|
||||
/Filtering encrypted mail\./ {
|
||||
encrypted_mail_count++
|
||||
filtered_mail_count++
|
||||
}
|
||||
|
||||
counter outgoing_unencrypted_mail_count
|
||||
/Outgoing: Filtering unencrypted mail\./ {
|
||||
outgoing_unencrypted_mail_count++
|
||||
filtered_outgoing_mail_count++
|
||||
counter unencrypted_mail_count
|
||||
/Filtering unencrypted mail\./ {
|
||||
unencrypted_mail_count++
|
||||
filtered_mail_count++
|
||||
}
|
||||
|
||||
|
||||
counter filtered_incoming_mail_count
|
||||
|
||||
counter incoming_encrypted_mail_count
|
||||
/Incoming: Filtering encrypted mail\./ {
|
||||
incoming_encrypted_mail_count++
|
||||
filtered_incoming_mail_count++
|
||||
}
|
||||
|
||||
counter incoming_unencrypted_mail_count
|
||||
/Incoming: Filtering unencrypted mail\./ {
|
||||
incoming_unencrypted_mail_count++
|
||||
filtered_incoming_mail_count++
|
||||
}
|
||||
|
||||
|
||||
counter rejected_unencrypted_mail_count
|
||||
/Rejected unencrypted mail/ {
|
||||
/Rejected unencrypted mail\./ {
|
||||
rejected_unencrypted_mail_count++
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
mtaname = odkim.get_mtasymbol(ctx, "{daemon_name}")
|
||||
if mtaname == "ORIGINATING" then
|
||||
if odkim.internal_ip(ctx) == 1 then
|
||||
-- Outgoing message will be signed,
|
||||
-- no need to look for signatures.
|
||||
return nil
|
||||
@@ -11,7 +10,6 @@ if nsigs == nil then
|
||||
end
|
||||
|
||||
local valid = false
|
||||
local error_msg = "No valid DKIM signature found."
|
||||
for i = 1, nsigs do
|
||||
sig = odkim.get_sighandle(ctx, i - 1)
|
||||
sigres = odkim.sig_result(sig)
|
||||
@@ -23,8 +21,6 @@ for i = 1, nsigs do
|
||||
-- means the message is acceptable.
|
||||
if sigres == 0 then
|
||||
valid = true
|
||||
else
|
||||
error_msg = "DKIM signature is invalid, error code " .. tostring(sigres) .. ", search https://github.com/trusteddomainproject/OpenDKIM/blob/master/libopendkim/dkim.h#L108"
|
||||
end
|
||||
end
|
||||
|
||||
@@ -35,7 +31,7 @@ if valid then
|
||||
odkim.del_header(ctx, "DKIM-Signature", i)
|
||||
end
|
||||
else
|
||||
odkim.set_reply(ctx, "554", "5.7.1", error_msg)
|
||||
odkim.set_reply(ctx, "554", "5.7.1", "No valid DKIM signature found")
|
||||
odkim.set_result(ctx, SMFIS_REJECT)
|
||||
end
|
||||
|
||||
|
||||
@@ -65,9 +65,3 @@ PidFile /run/opendkim/opendkim.pid
|
||||
# The trust anchor enables DNSSEC. In Debian, the trust anchor file is provided
|
||||
# by the package dns-root-data.
|
||||
TrustAnchorFile /usr/share/dns/root.key
|
||||
|
||||
# Sign messages when `-o milter_macro_daemon_name=ORIGINATING` is set.
|
||||
MTA ORIGINATING
|
||||
|
||||
# No hosts are treated as internal, ORIGINATING daemon name should be set explicitly.
|
||||
InternalHosts -
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
from pyinfra.operations import apt, files, server, systemd
|
||||
from pyinfra.operations import apt, files, systemd
|
||||
|
||||
from cmdeploy.basedeploy import Deployer, get_resource
|
||||
|
||||
|
||||
class PostfixDeployer(Deployer):
|
||||
required_users = [("postfix", None, ["opendkim"])]
|
||||
daemon_reload = False
|
||||
|
||||
def __init__(self, config, disable_mail):
|
||||
self.config = config
|
||||
@@ -52,29 +51,6 @@ class PostfixDeployer(Deployer):
|
||||
)
|
||||
need_restart |= header_cleanup.changed
|
||||
|
||||
lmtp_header_cleanup = files.put(
|
||||
src=get_resource("postfix/lmtp_header_cleanup"),
|
||||
dest="/etc/postfix/lmtp_header_cleanup",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
need_restart |= lmtp_header_cleanup.changed
|
||||
|
||||
tls_policy_map = files.put(
|
||||
name="Upload SMTP TLS Policy that accepts self-signed certificates for IP-only hosts",
|
||||
src=get_resource("postfix/smtp_tls_policy_map"),
|
||||
dest="/etc/postfix/smtp_tls_policy_map",
|
||||
user="root",
|
||||
group="root",
|
||||
mode="644",
|
||||
)
|
||||
need_restart |= tls_policy_map.changed
|
||||
if tls_policy_map.changed:
|
||||
server.shell(
|
||||
commands=["postmap /etc/postfix/smtp_tls_policy_map"],
|
||||
)
|
||||
|
||||
# Login map that 1:1 maps email address to login.
|
||||
login_map = files.put(
|
||||
src=get_resource("postfix/login_map"),
|
||||
@@ -84,21 +60,6 @@ class PostfixDeployer(Deployer):
|
||||
mode="644",
|
||||
)
|
||||
need_restart |= login_map.changed
|
||||
|
||||
restart_conf = files.put(
|
||||
name="postfix: restart automatically on failure",
|
||||
src=get_resource("service/10_restart.conf"),
|
||||
dest="/etc/systemd/system/postfix@.service.d/10_restart.conf",
|
||||
)
|
||||
self.daemon_reload = restart_conf.changed
|
||||
|
||||
# Validate postfix configuration before restart
|
||||
if need_restart:
|
||||
server.shell(
|
||||
name="Validate postfix configuration",
|
||||
# Extract stderr and quit with error if non-zero
|
||||
commands=["""bash -c 'w=$(postconf 2>&1 >/dev/null); [[ -z "$w" ]] || { echo "$w"; false; }'"""],
|
||||
)
|
||||
self.need_restart = need_restart
|
||||
|
||||
def activate(self):
|
||||
@@ -112,6 +73,5 @@ class PostfixDeployer(Deployer):
|
||||
running=False if self.disable_mail else True,
|
||||
enabled=False if self.disable_mail else True,
|
||||
restarted=restart,
|
||||
daemon_reload=self.daemon_reload,
|
||||
)
|
||||
self.need_restart = False
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
/^DKIM-Signature:/ IGNORE
|
||||
/^Authentication-Results:/ IGNORE
|
||||
@@ -25,7 +25,7 @@ smtp_tls_security_level=verify
|
||||
# <https://www.postfix.org/postconf.5.html#smtp_tls_servername>
|
||||
smtp_tls_servername = hostname
|
||||
smtp_tls_session_cache_database = btree:${data_directory}/smtp_scache
|
||||
smtp_tls_policy_maps = regexp:/etc/postfix/smtp_tls_policy_map
|
||||
smtp_tls_policy_maps = inline:{nauta.cu=may}
|
||||
smtp_tls_protocols = >=TLSv1.2
|
||||
smtp_tls_mandatory_protocols = >=TLSv1.2
|
||||
|
||||
@@ -64,20 +64,7 @@ alias_database = hash:/etc/aliases
|
||||
mydestination =
|
||||
|
||||
relayhost =
|
||||
{% if disable_ipv6 %}
|
||||
mynetworks = 127.0.0.0/8
|
||||
{% else %}
|
||||
mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128
|
||||
{% endif %}
|
||||
{% if config.addr_v4 %}
|
||||
smtp_bind_address = {{ config.addr_v4 }}
|
||||
{% endif %}
|
||||
{% if config.addr_v6 %}
|
||||
smtp_bind_address6 = {{ config.addr_v6 }}
|
||||
{% endif %}
|
||||
{% if config.addr_v4 or config.addr_v6 %}
|
||||
smtp_bind_address_enforce = yes
|
||||
{% endif %}
|
||||
mailbox_size_limit = 0
|
||||
message_size_limit = {{config.max_message_size}}
|
||||
recipient_delimiter = +
|
||||
@@ -90,7 +77,6 @@ inet_protocols = all
|
||||
|
||||
virtual_transport = lmtp:unix:private/dovecot-lmtp
|
||||
virtual_mailbox_domains = {{ config.mail_domain }}
|
||||
lmtp_header_checks = regexp:/etc/postfix/lmtp_header_cleanup
|
||||
|
||||
mua_client_restrictions = permit_sasl_authenticated, reject
|
||||
mua_sender_restrictions = reject_sender_login_mismatch, permit_sasl_authenticated, reject
|
||||
|
||||
@@ -31,6 +31,7 @@ submission inet n - y - 5000 smtpd
|
||||
-o smtpd_sender_restrictions=$mua_sender_restrictions
|
||||
-o smtpd_recipient_restrictions=
|
||||
-o smtpd_relay_restrictions=permit_sasl_authenticated,reject
|
||||
-o milter_macro_daemon_name=ORIGINATING
|
||||
-o smtpd_client_connection_count_limit=1000
|
||||
-o smtpd_proxy_filter=127.0.0.1:{{ config.filtermail_smtp_port }}
|
||||
smtps inet n - y - 5000 smtpd
|
||||
@@ -48,6 +49,7 @@ smtps inet n - y - 5000 smtpd
|
||||
-o smtpd_recipient_restrictions=
|
||||
-o smtpd_relay_restrictions=permit_sasl_authenticated,reject
|
||||
-o smtpd_client_connection_count_limit=1000
|
||||
-o milter_macro_daemon_name=ORIGINATING
|
||||
-o smtpd_proxy_filter=127.0.0.1:{{ config.filtermail_smtp_port }}
|
||||
#628 inet n - y - - qmqpd
|
||||
pickup unix n - y 60 1 pickup
|
||||
@@ -79,7 +81,6 @@ filter unix - n n - - lmtp
|
||||
# Local SMTP server for reinjecting outgoing filtered mail.
|
||||
127.0.0.1:{{ config.postfix_reinject_port }} inet n - n - 100 smtpd
|
||||
-o syslog_name=postfix/reinject
|
||||
-o milter_macro_daemon_name=ORIGINATING
|
||||
-o smtpd_milters=unix:opendkim/opendkim.sock
|
||||
-o cleanup_service_name=authclean
|
||||
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
/^\[[^]]+\]$/ encrypt
|
||||
/^nauta\.cu$/ may
|
||||
@@ -37,10 +37,7 @@ def perform_initial_checks(mail_domain, pre_command=""):
|
||||
return res
|
||||
|
||||
# parse out sts-id if exists, example: "v=STSv1; id=2090123"
|
||||
mta_sts_txt = query_dns("TXT", f"_mta-sts.{mail_domain}")
|
||||
if not mta_sts_txt:
|
||||
return res
|
||||
parts = mta_sts_txt.split("id=")
|
||||
parts = query_dns("TXT", f"_mta-sts.{mail_domain}").split("id=")
|
||||
res["sts_id"] = parts[1].rstrip('"') if len(parts) == 2 else ""
|
||||
return res
|
||||
|
||||
|
||||
@@ -14,10 +14,8 @@ def main():
|
||||
importlib.resources.files("cmdeploy").joinpath("../../../chatmail.ini"),
|
||||
)
|
||||
disable_mail = bool(os.environ.get("CHATMAIL_DISABLE_MAIL"))
|
||||
website_only = bool(os.environ.get("CHATMAIL_WEBSITE_ONLY"))
|
||||
docker = bool(os.environ.get("CHATMAIL_DOCKER"))
|
||||
|
||||
deploy_chatmail(config_path, disable_mail, website_only, docker)
|
||||
deploy_chatmail(config_path, disable_mail)
|
||||
|
||||
|
||||
if pyinfra.is_cli:
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
[Service]
|
||||
Restart=always
|
||||
RestartSec=30
|
||||
@@ -2,10 +2,11 @@
|
||||
Description=Incoming Chatmail Postfix before queue filter
|
||||
|
||||
[Service]
|
||||
ExecStart={{ bin_path }} {{ config_path }} incoming
|
||||
ExecStart={execpath} {config_path} incoming
|
||||
Restart=always
|
||||
RestartSec=30
|
||||
User=vmail
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
Description=Outgoing Chatmail Postfix before queue filter
|
||||
|
||||
[Service]
|
||||
ExecStart={{ bin_path }} {{ config_path }} outgoing
|
||||
ExecStart={execpath} {config_path} outgoing
|
||||
Restart=always
|
||||
RestartSec=30
|
||||
User=vmail
|
||||
@@ -1,4 +1,5 @@
|
||||
import datetime
|
||||
import os
|
||||
import smtplib
|
||||
import socket
|
||||
import subprocess
|
||||
@@ -7,6 +8,7 @@ import time
|
||||
import pytest
|
||||
|
||||
from cmdeploy import remote
|
||||
from cmdeploy.cmdeploy import main
|
||||
from cmdeploy.sshexec import SSHExec
|
||||
|
||||
|
||||
@@ -68,6 +70,46 @@ class TestSSHExecutor:
|
||||
assert (now - since_date).total_seconds() < 60 * 60 * 51
|
||||
|
||||
|
||||
def test_status_cmd(chatmail_config, capsys, request):
|
||||
os.chdir(request.config.invocation_params.dir)
|
||||
assert main(["status"]) == 0
|
||||
status_out = capsys.readouterr()
|
||||
print(status_out.out)
|
||||
|
||||
services = [
|
||||
"acmetool-redirector",
|
||||
"chatmail-metadata",
|
||||
"doveauth",
|
||||
"dovecot",
|
||||
"fcgiwrap",
|
||||
"filtermail-incoming",
|
||||
"filtermail",
|
||||
"lastlogin",
|
||||
"nginx",
|
||||
"opendkim",
|
||||
"postfix@-",
|
||||
"systemd-journald",
|
||||
"turnserver",
|
||||
"unbound",
|
||||
]
|
||||
not_running = []
|
||||
for service in services:
|
||||
active = False
|
||||
for line in status_out:
|
||||
if service in line:
|
||||
active = True
|
||||
if not "loaded" in line:
|
||||
active = False
|
||||
if not "active" in line:
|
||||
active = False
|
||||
if not "running" in line:
|
||||
active = False
|
||||
break
|
||||
if not active:
|
||||
not_running.append(service)
|
||||
assert not_running == []
|
||||
|
||||
|
||||
def test_timezone_env(remote):
|
||||
for line in remote.iter_output("env"):
|
||||
print(line)
|
||||
@@ -189,14 +231,12 @@ def test_exceed_rate_limit(cmsetup, gencreds, maildata, chatmail_config):
|
||||
mail = maildata(
|
||||
"encrypted.eml", from_addr=user1.addr, to_addr=user2.addr
|
||||
).as_string()
|
||||
|
||||
start = time.time()
|
||||
for i in range(chatmail_config.max_user_send_per_minute * 3):
|
||||
print("Sending mail", str(i + 1), "at", time.time() - start, "s.")
|
||||
for i in range(chatmail_config.max_user_send_per_minute + 5):
|
||||
print("Sending mail", str(i))
|
||||
try:
|
||||
user1.smtp.sendmail(user1.addr, [user2.addr], mail)
|
||||
except smtplib.SMTPException as e:
|
||||
if i < chatmail_config.max_user_send_burst_size:
|
||||
if i < chatmail_config.max_user_send_per_minute:
|
||||
pytest.fail(f"rate limit was exceeded too early with msg {i}")
|
||||
outcome = e.recipients[user2.addr]
|
||||
assert outcome[0] == 450
|
||||
|
||||
@@ -17,7 +17,6 @@ def imap_mailbox(cmfactory):
|
||||
password = ac1.get_config("mail_pw")
|
||||
mailbox = imap_tools.MailBox(user.split("@")[1])
|
||||
mailbox.login(user, password)
|
||||
mailbox.dc_ac = ac1
|
||||
return mailbox
|
||||
|
||||
|
||||
@@ -122,28 +121,6 @@ class TestEndToEndDeltaChat:
|
||||
assert ch.id >= 10
|
||||
ac1._evtracker.wait_securejoin_inviter_progress(1000)
|
||||
|
||||
def test_dkim_header_stripped(self, cmfactory, maildomain2, lp, imap_mailbox):
|
||||
"""Test that if a DC address receives a message, it has no
|
||||
DKIM-Signature and Authentication-Results headers."""
|
||||
ac1 = cmfactory.new_online_configuring_account(cache=False)
|
||||
cmfactory.switch_maildomain(maildomain2)
|
||||
ac2 = cmfactory.new_online_configuring_account(cache=False)
|
||||
cmfactory.bring_accounts_online()
|
||||
chat = cmfactory.get_accepted_chat(ac1, imap_mailbox.dc_ac)
|
||||
chat.send_text("message0")
|
||||
chat2 = cmfactory.get_accepted_chat(ac2, imap_mailbox.dc_ac)
|
||||
chat2.send_text("message1")
|
||||
|
||||
lp.sec("receive message with ac1...")
|
||||
received = 0
|
||||
while received < 2:
|
||||
msgs = imap_mailbox.fetch()
|
||||
for msg in msgs:
|
||||
lp.sec(f"ac1 received msg from {msg.from_}")
|
||||
received += 1
|
||||
assert "authentication-results" not in msg.headers
|
||||
assert "dkim-signature" not in msg.headers
|
||||
|
||||
def test_read_receipts_between_instances(self, cmfactory, lp, maildomain2):
|
||||
ac1 = cmfactory.new_online_configuring_account(cache=False)
|
||||
cmfactory.switch_maildomain(maildomain2)
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
import os
|
||||
|
||||
from cmdeploy.cmdeploy import main
|
||||
|
||||
|
||||
def test_status_cmd(chatmail_config, capsys, request):
|
||||
os.chdir(request.config.invocation_params.dir)
|
||||
assert main(["status"]) == 0
|
||||
status_out = capsys.readouterr()
|
||||
print(status_out.out)
|
||||
|
||||
assert len(status_out.out.splitlines()) > 5
|
||||
|
||||
"""
|
||||
don't test actual server state:
|
||||
|
||||
services = [
|
||||
"acmetool-redirector",
|
||||
"chatmail-metadata",
|
||||
"doveauth",
|
||||
"dovecot",
|
||||
"fcgiwrap",
|
||||
"filtermail-incoming",
|
||||
"filtermail",
|
||||
"lastlogin",
|
||||
"nginx",
|
||||
"opendkim",
|
||||
"postfix@-",
|
||||
"systemd-journald",
|
||||
"turnserver",
|
||||
"unbound",
|
||||
]
|
||||
not_running = []
|
||||
for service in services:
|
||||
active = False
|
||||
for line in status_out:
|
||||
if service in line:
|
||||
active = True
|
||||
if not "loaded" in line:
|
||||
active = False
|
||||
if not "active" in line:
|
||||
active = False
|
||||
if not "running" in line:
|
||||
active = False
|
||||
break
|
||||
if not active:
|
||||
not_running.append(service)
|
||||
assert not_running == []
|
||||
"""
|
||||
@@ -1,4 +0,0 @@
|
||||
# Managed by cmdeploy: disable IPv6 in unbound.
|
||||
server:
|
||||
interface: 127.0.0.1
|
||||
do-ip6: no
|
||||
@@ -6,7 +6,7 @@ You can use the `make` command and `make html` to build web pages.
|
||||
|
||||
You need a Python environment where the following install was excuted:
|
||||
|
||||
pip install furo sphinx-autobuild
|
||||
pip install sphinx-build furo sphinx-autobuild
|
||||
|
||||
To develop/change documentation, you can then do:
|
||||
|
||||
|
||||
@@ -16,16 +16,15 @@ You will need the following:
|
||||
|
||||
- Control over a domain through a DNS provider of your choice.
|
||||
|
||||
- A Debian 12 **deployment server** with reachable SMTP/SUBMISSIONS/IMAPS/HTTPS ports.
|
||||
- A Debian 12 server with reachable SMTP/SUBMISSIONS/IMAPS/HTTPS ports.
|
||||
IPv6 is encouraged if available. Chatmail relay servers only require
|
||||
1GB RAM, one CPU, and perhaps 10GB storage for a few thousand active
|
||||
chatmail addresses.
|
||||
|
||||
- A Linux or Unix **build machine** with key-based SSH access to the root
|
||||
user of the deployment server.
|
||||
You must add a passphrase-protected private key to your local ssh-agent because you
|
||||
can’t type in your passphrase during deployment.
|
||||
(An ed25519 private key is required due to an `upstream bug in
|
||||
- Key-based SSH authentication to the root user. You must add a
|
||||
passphrase-protected private key to your local ssh-agent because you
|
||||
can’t type in your passphrase during deployment. (An ed25519 private
|
||||
key is required due to an `upstream bug in
|
||||
paramiko <https://github.com/paramiko/paramiko/issues/2191>`_)
|
||||
|
||||
|
||||
@@ -35,17 +34,16 @@ Setup with ``scripts/cmdeploy``
|
||||
We use ``chat.example.org`` as the chatmail domain in the following
|
||||
steps. Please substitute it with your own domain.
|
||||
|
||||
1. Setup the initial DNS records for your deployment server.
|
||||
The following is an example in the
|
||||
1. Setup the initial DNS records. The following is an example in the
|
||||
familiar BIND zone file format with a TTL of 1 hour (3600 seconds).
|
||||
Please substitute your domain and IP addresses.
|
||||
|
||||
::
|
||||
|
||||
chat.example.org. 3600 IN A 198.51.100.5
|
||||
chat.example.org. 3600 IN AAAA 2001:db8::5
|
||||
www.chat.example.org. 3600 IN CNAME chat.example.org.
|
||||
mta-sts.chat.example.org. 3600 IN CNAME chat.example.org.
|
||||
chat.example.com. 3600 IN A 198.51.100.5
|
||||
chat.example.com. 3600 IN AAAA 2001:db8::5
|
||||
www.chat.example.com. 3600 IN CNAME chat.example.com.
|
||||
mta-sts.chat.example.com. 3600 IN CNAME chat.example.com.
|
||||
|
||||
2. On your local PC, clone the repository and bootstrap the Python
|
||||
virtualenv.
|
||||
@@ -56,20 +54,20 @@ steps. Please substitute it with your own domain.
|
||||
cd relay
|
||||
scripts/initenv.sh
|
||||
|
||||
3. On your local build machine (PC), create a chatmail configuration file
|
||||
3. On your local PC, create chatmail configuration file
|
||||
``chatmail.ini``:
|
||||
|
||||
::
|
||||
|
||||
scripts/cmdeploy init chat.example.org # <-- use your domain
|
||||
|
||||
4. Verify that SSH root login to the deployment server server works:
|
||||
4. Verify that SSH root login to your remote server works:
|
||||
|
||||
::
|
||||
|
||||
ssh root@chat.example.org # <-- use your domain
|
||||
|
||||
5. From your local build machine, setup and configure the remote deployment server:
|
||||
5. From your local PC, deploy the remote chatmail relay server:
|
||||
|
||||
::
|
||||
|
||||
@@ -80,17 +78,10 @@ steps. Please substitute it with your own domain.
|
||||
configure at your DNS provider (it can take some time until they are
|
||||
public).
|
||||
|
||||
Docker installation
|
||||
-------------------
|
||||
|
||||
We have experimental support for `docker compose <https://github.com/chatmail/relay/blob/docker-rebase/docs/DOCKER_INSTALLATION_EN.md>`_,
|
||||
but it is not covered by automated tests yet,
|
||||
so don't expect everything to work.
|
||||
|
||||
Other helpful commands
|
||||
----------------------
|
||||
|
||||
To check the status of your deployment server running the chatmail service:
|
||||
To check the status of your remotely running chatmail service:
|
||||
|
||||
::
|
||||
|
||||
@@ -167,7 +158,7 @@ Disable automatic address creation
|
||||
--------------------------------------------------------
|
||||
|
||||
If you need to stop address creation, e.g. because some script is wildly
|
||||
creating addresses, login with ssh to the deployment machine and run:
|
||||
creating addresses, login with ssh and run:
|
||||
|
||||
::
|
||||
|
||||
@@ -176,23 +167,3 @@ creating addresses, login with ssh to the deployment machine and run:
|
||||
Chatmail address creation will be denied while this file is present.
|
||||
|
||||
|
||||
Migrating to a new build machine
|
||||
----------------------------------
|
||||
|
||||
To move or add a build machine,
|
||||
clone the relay repository on the new build machine, and copy the ``chatmail.ini`` file from the old build machine.
|
||||
Make sure ``rsync`` is installed, then initialize the environment:
|
||||
|
||||
::
|
||||
|
||||
./scripts/initenv.sh
|
||||
|
||||
Run safety checks before a new deployment:
|
||||
|
||||
::
|
||||
|
||||
./scripts/cmdeploy dns
|
||||
./scripts/cmdeploy status
|
||||
|
||||
If you keep multiple build machines (ie laptop and desktop), keep ``chatmail.ini`` in sync between
|
||||
them.
|
||||
|
||||
@@ -1,98 +1,72 @@
|
||||
|
||||
Migrating to a new machine
|
||||
===========================
|
||||
Migrating to a new host
|
||||
-----------------------
|
||||
|
||||
This migration tutorial provides a step-wise approach
|
||||
to safely migrate a chatmail relay from one remote machine to another.
|
||||
If you want to migrate chatmail relay from an old machine to a new
|
||||
machine, you can use these steps. They were tested with a Linux laptop;
|
||||
you might need to adjust some of the steps to your environment.
|
||||
|
||||
Preliminary notes and assumptions
|
||||
---------------------------------
|
||||
Let’s assume that your ``mail_domain`` is ``mail.example.org``, all
|
||||
involved machines run Debian 12, your old site’s IP address is
|
||||
``13.37.13.37``, and your new site’s IP address is ``13.12.23.42``.
|
||||
|
||||
- If the migration is a planned move,
|
||||
it's recommended to lower the Time To Live (TTL) of your DNS records to a value such as 300 (5 minutes),
|
||||
at best much earlier than the actual planned migration.
|
||||
This speeds up propagation of DNS changes in the Internet after the migration is complete.
|
||||
Note, you should lower the TTLs of your DNS records to a value such as
|
||||
300 (5 minutes) so the migration happens as smoothly as possible.
|
||||
|
||||
- The migration steps were tested with a Linux laptop; you might need to adjust some of the steps to your local environment.
|
||||
During the guide you might get a warning about changed SSH Host keys; in
|
||||
this case, just run ``ssh-keygen -R "mail.example.org"`` as recommended.
|
||||
|
||||
- Your ``mail_domain`` is ``mail.example.org``.
|
||||
|
||||
- All remote machines run Debian 12.
|
||||
|
||||
- The old site’s IP version 4 address is ``$OLD_IP4``.
|
||||
|
||||
- The new site’s IP addresses are ``$NEW_IP4`` and ``$NEW_IPV6``.
|
||||
|
||||
|
||||
The six steps to migrate
|
||||
------------------------
|
||||
|
||||
Note that during some of the following steps you might get a warning about changed SSH Host keys;
|
||||
in this case, just run ``ssh-keygen -R "mail.example.org"`` as recommended.
|
||||
|
||||
|
||||
1. **Initially transfer mailboxes from old to new site.**
|
||||
|
||||
Login to old site, forwarding your ssh-agent with ``ssh -A``
|
||||
to allow using ssh to directly copy files from old to new site.
|
||||
::
|
||||
|
||||
ssh -A root@$OLD_IP4
|
||||
tar c /home/vmail/mail | ssh root@$NEW_IP4 "tar x -C /"
|
||||
|
||||
|
||||
2. **Pre-configure the new site but keep it inactive until step 6**
|
||||
::
|
||||
|
||||
CMDEPLOY_STAGES=install,configure scripts/cmdeploy run --ssh-host $NEW_IP4
|
||||
|
||||
|
||||
3. **It's getting serious: disable mail services on the old site.**
|
||||
Users will not be able to send or receive messages until all steps are completed.
|
||||
Other relays and mail servers will retry delivering messages from time to time,
|
||||
so nothing is lost for users.
|
||||
1. First, disable mail services on the old site.
|
||||
|
||||
::
|
||||
|
||||
scripts/cmdeploy run --disable-mail --ssh-host $OLD_IP4
|
||||
cmdeploy run --disable-mail --ssh-host 13.37.13.37
|
||||
|
||||
Now your users will notice the migration and will not be able to send
|
||||
or receive messages until the migration is completed.
|
||||
|
||||
4. **Final synchronization of TLS/DKIM secrets, mail queues and mailboxes.**
|
||||
Again we use ssh-agent forwarding (``-A``) to allow transfering all important data directly
|
||||
from the old to the new site.
|
||||
::
|
||||
|
||||
ssh -A root@$OLD_IP4
|
||||
tar c /var/lib/acme /etc/dkimkeys /var/spool/postfix | ssh root@$NEW_IP4 "tar x -C /"
|
||||
rsync -azH /home/vmail/mail root@$NEW_IP4:/home/vmail/
|
||||
|
||||
Login to the new site and ensure file ownerships are correctly set:
|
||||
2. Now we want to copy ``/home/vmail``, ``/var/lib/acme``,
|
||||
``/etc/dkimkeys``, and ``/var/spool/postfix`` to
|
||||
the new site. Login to the old site while forwarding your SSH agent
|
||||
so you can copy directly from the old to the new site with your SSH
|
||||
key:
|
||||
|
||||
::
|
||||
|
||||
ssh -A root@13.37.13.37
|
||||
tar c - /home/vmail/mail /var/lib/acme /etc/dkimkeys /var/spool/postfix | ssh root@13.12.23.42 "tar x -C /"
|
||||
|
||||
This transfers all addresses, the TLS certificate,
|
||||
and DKIM keys (so DKIM DNS record remains valid).
|
||||
It also preserves the Postfix mail spool so any messages
|
||||
pending delivery will still be delivered.
|
||||
|
||||
3. Install chatmail on the new machine:
|
||||
|
||||
::
|
||||
|
||||
cmdeploy run --disable-mail --ssh-host 13.12.23.42
|
||||
|
||||
Postfix and Dovecot are disabled for now; we will enable them later.
|
||||
We first need to make the new site fully operational.
|
||||
|
||||
4. On the new site, run the following to ensure the ownership is correct
|
||||
in case UIDs/GIDs changed:
|
||||
|
||||
::
|
||||
|
||||
ssh root@$NEW_IP4
|
||||
chown root: -R /var/lib/acme
|
||||
chown opendkim: -R /etc/dkimkeys
|
||||
chown vmail: -R /home/vmail/mail
|
||||
|
||||
5. Now, update DNS entries.
|
||||
|
||||
5. **Update the DNS entries to point to the new site.**
|
||||
You only need to change the ``A`` and ``AAAA`` records, for example:
|
||||
If other MTAs try to deliver messages to your chatmail domain they
|
||||
may fail intermittently, as DNS catches up with the new site settings
|
||||
but normally will retry delivering messages for at least a week, so
|
||||
messages will not be lost.
|
||||
|
||||
::
|
||||
|
||||
mail.example.org. IN A $NEW_IP4
|
||||
mail.example.org. IN AAAA $NEW_IP6
|
||||
|
||||
|
||||
6. **Activate chatmail relay on new site.**
|
||||
|
||||
::
|
||||
|
||||
CMDEPLOY_STAGES=activate scripts/cmdeploy run --ssh-host $NEW_IP4
|
||||
|
||||
Voilà!
|
||||
Users will be able to use the relay as soon as the DNS changes have propagated.
|
||||
If you have lowered the Time-to-Live for DNS records in step 1,
|
||||
better use a higher value again (between 14400 and 86400 seconds) once you are sure everything works.
|
||||
6. Finally, you can execute ``cmdeploy run --ssh-host 13.12.23.42`` to
|
||||
turn on chatmail on the new relay. Your users will be able to use the
|
||||
chatmail relay as soon as the DNS changes have propagated. Voilà!
|
||||
|
||||
|
||||
@@ -42,11 +42,6 @@ The deployed system components of a chatmail relay are:
|
||||
- Dovecot_ is the Mail Delivery Agent (MDA) and
|
||||
stores messages for users until they download them
|
||||
|
||||
- `filtermail <https://github.com/chatmail/filtermail>`_
|
||||
prevents unencrypted email from leaving or entering the chatmail
|
||||
service and is integrated into Postfix’s outbound and inbound mail
|
||||
pipelines.
|
||||
|
||||
- Nginx_ shows the web page with privacy policy and additional information
|
||||
|
||||
- `acmetool <https://hlandau.github.io/acmetool/>`_ manages TLS
|
||||
@@ -90,6 +85,11 @@ short overview of ``chatmaild`` services:
|
||||
<https://doc.dovecot.org/2.3/configuration_manual/authentication/dict/#complete-example-for-authenticating-via-a-unix-socket>`_
|
||||
to authenticate logins.
|
||||
|
||||
- `filtermail <https://github.com/chatmail/relay/blob/main/chatmaild/src/chatmaild/filtermail.py>`_
|
||||
prevents unencrypted email from leaving or entering the chatmail
|
||||
service and is integrated into Postfix’s outbound and inbound mail
|
||||
pipelines.
|
||||
|
||||
- `chatmail-metadata <https://github.com/chatmail/relay/blob/main/chatmaild/src/chatmaild/metadata.py>`_
|
||||
is contacted by a `Dovecot lua
|
||||
script <https://github.com/chatmail/relay/blob/main/cmdeploy/src/cmdeploy/dovecot/push_notification.lua>`_
|
||||
|
||||
@@ -7,21 +7,14 @@ Active development takes place in the `chatmail/relay github repository <https:/
|
||||
You can check out the `'chatmail' tag in the support.delta.chat forum <https://support.delta.chat/tag/chatmail>`_
|
||||
and ask to get added to a non-public support chat for debugging issues.
|
||||
|
||||
We know of three work-in-progress alternative implementation efforts:
|
||||
We know of two work-in-progress alternative implementation efforts:
|
||||
|
||||
- `Mox <https://github.com/mjl-/mox>`_: A Golang email server. `Work
|
||||
is in progress <https://github.com/mjl-/mox/issues/251>`_ to modify
|
||||
it to support all of the features and configuration settings required
|
||||
to operate as a chatmail relay.
|
||||
|
||||
- `Madmail <https://github.com/themadorg/madmail>`_: an
|
||||
experimental fork of `Maddy Mail Server <https://maddy.email/>`_, modified
|
||||
for chatmail deployments. It provides a single binary solution
|
||||
for running a chatmail relay.
|
||||
|
||||
- `Chatmail Cookbook <https://github.com/feld/chatmail-cookbook>`_:
|
||||
A Chef Cookbook implementing a relay server. The project follows the
|
||||
official relay server software and configurations converted to a Chef
|
||||
Cookbook with only minor differences. The cookbook uses DNS-01 for
|
||||
certificate validation and additionally supports FreeBSD. It does not
|
||||
require a Chef server to use.
|
||||
- `Maddy-Chatmail <https://github.com/sadraiiali/maddy_chatmail>`_: a
|
||||
plugin for the `Maddy email server <https://maddy.email/>`_ which
|
||||
aims to implement the chatmail relay features and configuration
|
||||
options.
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
services:
|
||||
chatmail:
|
||||
build:
|
||||
context: ./
|
||||
dockerfile: docker/chatmail_relay.dockerfile
|
||||
image: chatmail-relay:latest
|
||||
restart: unless-stopped
|
||||
container_name: chatmail
|
||||
# Required for systemd — use only one of the following:
|
||||
cgroup: host # compose v2 only
|
||||
# privileged: true # compose v1 (not tested)
|
||||
tty: true # required for logs
|
||||
tmpfs: # required for systemd
|
||||
- /tmp
|
||||
- /run
|
||||
- /run/lock
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
environment:
|
||||
CHANGE_KERNEL_SETTINGS: "False"
|
||||
MAIL_DOMAIN: $MAIL_DOMAIN
|
||||
ACME_EMAIL: $ACME_EMAIL
|
||||
RECREATE_VENV: $RECREATE_VENV
|
||||
MAX_MESSAGE_SIZE: $MAX_MESSAGE_SIZE
|
||||
DEBUG_COMMANDS_ENABLED: $DEBUG_COMMANDS_ENABLED
|
||||
FORCE_REINIT_INI_FILE: $FORCE_REINIT_INI_FILE
|
||||
USE_FOREIGN_CERT_MANAGER: $USE_FOREIGN_CERT_MANAGER
|
||||
ENABLE_CERTS_MONITORING: $ENABLE_CERTS_MONITORING
|
||||
CERTS_MONITORING_TIMEOUT: $CERTS_MONITORING_TIMEOUT
|
||||
IS_DEVELOPMENT_INSTANCE: $IS_DEVELOPMENT_INSTANCE
|
||||
CMDEPLOY_STAGES: ${CMDEPLOY_STAGES:-}
|
||||
network_mode: "host"
|
||||
volumes:
|
||||
## system
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:rw # required for systemd
|
||||
- ./:/opt/chatmail
|
||||
|
||||
## data
|
||||
- ./data/chatmail:/home
|
||||
- ./data/chatmail-dkimkeys:/etc/dkimkeys
|
||||
- ./data/chatmail-acme:/var/lib/acme
|
||||
|
||||
## custom resources
|
||||
# - ./custom/www/src/index.md:/opt/chatmail/www/src/index.md
|
||||
|
||||
## debug
|
||||
# - ./docker/files/setup_chatmail_docker.sh:/setup_chatmail_docker.sh
|
||||
# - ./docker/files/entrypoint.sh:/entrypoint.sh
|
||||
# - ./docker/files/update_ini.sh:/update_ini.sh
|
||||
@@ -1,100 +0,0 @@
|
||||
FROM jrei/systemd-debian:12 AS base
|
||||
|
||||
ENV LANG=en_US.UTF-8
|
||||
|
||||
RUN echo 'APT::Install-Recommends "0";' > /etc/apt/apt.conf.d/01norecommend && \
|
||||
echo 'APT::Install-Suggests "0";' >> /etc/apt/apt.conf.d/01norecommend && \
|
||||
apt-get update && \
|
||||
apt-get install -y \
|
||||
ca-certificates && \
|
||||
DEBIAN_FRONTEND=noninteractive \
|
||||
TZ=Europe/London \
|
||||
apt-get install -y tzdata && \
|
||||
apt-get install -y locales && \
|
||||
sed -i -e "s/# $LANG.*/$LANG UTF-8/" /etc/locale.gen && \
|
||||
dpkg-reconfigure --frontend=noninteractive locales && \
|
||||
update-locale LANG=$LANG \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y \
|
||||
git \
|
||||
python3 \
|
||||
python3-venv \
|
||||
python3-virtualenv \
|
||||
gcc \
|
||||
python3-dev \
|
||||
opendkim \
|
||||
opendkim-tools \
|
||||
curl \
|
||||
rsync \
|
||||
unbound \
|
||||
unbound-anchor \
|
||||
dnsutils \
|
||||
postfix \
|
||||
acl \
|
||||
nginx \
|
||||
libnginx-mod-stream \
|
||||
fcgiwrap \
|
||||
cron \
|
||||
&& for pkg in core imapd lmtpd; do \
|
||||
case "$pkg" in \
|
||||
core) sha256="43f593332e22ac7701c62d58b575d2ca409e0f64857a2803be886c22860f5587" ;; \
|
||||
imapd) sha256="8d8dc6fc00bbb6cdb25d345844f41ce2f1c53f764b79a838eb2a03103eebfa86" ;; \
|
||||
lmtpd) sha256="2f69ba5e35363de50962d42cccbfe4ed8495265044e244007d7ccddad77513ab" ;; \
|
||||
esac; \
|
||||
url="https://download.delta.chat/dovecot/dovecot-${pkg}_2.3.21%2Bdfsg1-3_amd64.deb"; \
|
||||
file="/tmp/$(basename "$url")"; \
|
||||
curl -fsSL "$url" -o "$file"; \
|
||||
echo "$sha256 $file" | sha256sum -c -; \
|
||||
apt-get install -y "$file"; \
|
||||
rm -f "$file"; \
|
||||
done \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /opt/chatmail
|
||||
|
||||
# --- Build-time install stage ---
|
||||
# Bake the "install" deployer stage into the image; we can't use
|
||||
# scripts/initenv.sh because /opt/chatmail is empty at build time as
|
||||
# source arrives at runtime via volume mount., so we use a throwaway venv.
|
||||
# On container start only "configure,activate" stages run.
|
||||
COPY . /tmp/chatmail-src/
|
||||
WORKDIR /tmp/chatmail-src
|
||||
|
||||
# Dummy config — deploy_chatmail() needs a parseable ini to instantiate deployers
|
||||
RUN printf '[params]\nmail_domain = build.local\n' > /tmp/chatmail.ini
|
||||
|
||||
# Do what initenv.sh would do without the docs
|
||||
RUN python3 -m venv /tmp/build-venv && \
|
||||
/tmp/build-venv/bin/pip install --no-cache-dir \
|
||||
-e chatmaild -e cmdeploy
|
||||
|
||||
RUN CMDEPLOY_STAGES=install \
|
||||
CHATMAIL_INI=/tmp/chatmail.ini \
|
||||
CHATMAIL_DOCKER=True \
|
||||
/tmp/build-venv/bin/pyinfra @local \
|
||||
/tmp/chatmail-src/cmdeploy/src/cmdeploy/run.py -y
|
||||
|
||||
RUN rm -rf /tmp/chatmail-src /tmp/build-venv /tmp/chatmail.ini
|
||||
|
||||
WORKDIR /opt/chatmail
|
||||
# --- End build-time install stage ---
|
||||
|
||||
ARG SETUP_CHATMAIL_SERVICE_PATH=/lib/systemd/system/setup_chatmail.service
|
||||
COPY ./docker/files/setup_chatmail.service "$SETUP_CHATMAIL_SERVICE_PATH"
|
||||
RUN ln -sf "$SETUP_CHATMAIL_SERVICE_PATH" "/etc/systemd/system/multi-user.target.wants/setup_chatmail.service"
|
||||
|
||||
COPY --chmod=555 ./docker/files/setup_chatmail_docker.sh /setup_chatmail_docker.sh
|
||||
COPY --chmod=555 ./docker/files/update_ini.sh /update_ini.sh
|
||||
COPY --chmod=555 ./docker/files/entrypoint.sh /entrypoint.sh
|
||||
|
||||
VOLUME ["/sys/fs/cgroup", "/home"]
|
||||
|
||||
STOPSIGNAL SIGRTMIN+3
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
|
||||
CMD [ "--default-standard-output=journal+console", \
|
||||
"--default-standard-error=journal+console" ]
|
||||
|
||||
@@ -1,84 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Convert a chatmail.ini to a Docker .env file.
|
||||
|
||||
Usage: python docker/cm_ini_to_env.py [chatmail.ini] [.env]
|
||||
|
||||
Reads the ini file, extracts all non-default key=value pairs,
|
||||
and writes them as UPPER_CASE env vars suitable for docker-compose.
|
||||
"""
|
||||
|
||||
import configparser
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Keys that only make sense for bare-metal deploys or are handled
|
||||
# separately by the Docker setup and should not appear in .env.
|
||||
SKIP_KEYS = set()
|
||||
|
||||
# Keys that exist in .env but have a different name than the ini key.
|
||||
# ini_key -> env_key
|
||||
RENAMES = {}
|
||||
|
||||
|
||||
def read_ini(path):
|
||||
"""Return dict of key=value from [params] section."""
|
||||
cp = configparser.ConfigParser()
|
||||
cp.read(path)
|
||||
if not cp.has_section("params"):
|
||||
sys.exit(f"Error: {path} has no [params] section")
|
||||
return dict(cp.items("params"))
|
||||
|
||||
|
||||
def read_defaults():
|
||||
"""Return dict of default values from the ini template."""
|
||||
template = Path(__file__).resolve().parent.parent / "chatmaild/src/chatmaild/ini/chatmail.ini.f"
|
||||
if not template.exists():
|
||||
return {}
|
||||
cp = configparser.ConfigParser()
|
||||
cp.read(template)
|
||||
if not cp.has_section("params"):
|
||||
return {}
|
||||
defaults = {}
|
||||
for key, value in cp.items("params"):
|
||||
# Template placeholders like {mail_domain} aren't real defaults.
|
||||
if "{" not in value:
|
||||
defaults[key] = value
|
||||
return defaults
|
||||
|
||||
|
||||
def ini_to_env(ini_path, only_non_default=True):
|
||||
"""Yield (ENV_KEY, value) pairs from an ini file."""
|
||||
params = read_ini(ini_path)
|
||||
defaults = read_defaults() if only_non_default else {}
|
||||
|
||||
for key, value in sorted(params.items()):
|
||||
if key in SKIP_KEYS:
|
||||
continue
|
||||
if only_non_default and key in defaults and value.strip() == defaults[key].strip():
|
||||
continue
|
||||
env_key = RENAMES.get(key, key.upper())
|
||||
yield env_key, value.strip()
|
||||
|
||||
|
||||
def main():
|
||||
ini_path = sys.argv[1] if len(sys.argv) > 1 else "chatmail.ini"
|
||||
env_path = sys.argv[2] if len(sys.argv) > 2 else None
|
||||
|
||||
if not Path(ini_path).exists():
|
||||
sys.exit(f"Error: {ini_path} not found")
|
||||
|
||||
lines = []
|
||||
for env_key, value in ini_to_env(ini_path):
|
||||
lines.append(f'{env_key}="{value}"')
|
||||
|
||||
output = "\n".join(lines) + "\n"
|
||||
|
||||
if env_path:
|
||||
Path(env_path).write_text(output)
|
||||
print(f"Wrote {len(lines)} variables to {env_path}")
|
||||
else:
|
||||
print(output, end="")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,11 +0,0 @@
|
||||
MAIL_DOMAIN="chat.example.com"
|
||||
# ACME_EMAIL=""
|
||||
# RECREATE_VENV="false"
|
||||
# MAX_MESSAGE_SIZE="50M"
|
||||
# DEBUG_COMMANDS_ENABLED="true"
|
||||
# FORCE_REINIT_INI_FILE="true"
|
||||
# USE_FOREIGN_CERT_MANAGER="True"
|
||||
# ENABLE_CERTS_MONITORING="true"
|
||||
# CERTS_MONITORING_TIMEOUT=10
|
||||
# IS_DEVELOPMENT_INSTANCE="True"
|
||||
# CMDEPLOY_STAGES - default: "configure,activate". Set to "install,configure,activate" to force full reinstall.
|
||||
@@ -1,11 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -eo pipefail
|
||||
|
||||
unlink /etc/nginx/sites-enabled/default || true
|
||||
|
||||
SETUP_CHATMAIL_SERVICE_PATH="${SETUP_CHATMAIL_SERVICE_PATH:-/lib/systemd/system/setup_chatmail.service}"
|
||||
|
||||
env_vars=$(printenv | cut -d= -f1 | xargs)
|
||||
sed -i "s|<envs_list>|$env_vars|g" $SETUP_CHATMAIL_SERVICE_PATH
|
||||
|
||||
exec /lib/systemd/systemd $@
|
||||
@@ -1,14 +0,0 @@
|
||||
[Unit]
|
||||
Description=Run container setup commands
|
||||
After=multi-user.target
|
||||
ConditionPathExists=/setup_chatmail_docker.sh
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/bin/bash /setup_chatmail_docker.sh
|
||||
RemainAfterExit=true
|
||||
WorkingDirectory=/opt/chatmail
|
||||
PassEnvironment=<envs_list>
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,84 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
export INI_FILE="${INI_FILE:-chatmail.ini}"
|
||||
export ENABLE_CERTS_MONITORING="${ENABLE_CERTS_MONITORING:-true}"
|
||||
export CERTS_MONITORING_TIMEOUT="${CERTS_MONITORING_TIMEOUT:-60}"
|
||||
export PATH_TO_SSL="${PATH_TO_SSL:-/var/lib/acme/live/${MAIL_DOMAIN}}"
|
||||
export CHANGE_KERNEL_SETTINGS=${CHANGE_KERNEL_SETTINGS:-"False"}
|
||||
export RECREATE_VENV=${RECREATE_VENV:-"false"}
|
||||
|
||||
if [ -z "$MAIL_DOMAIN" ]; then
|
||||
echo "ERROR: Environment variable 'MAIL_DOMAIN' must be set!" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
debug_commands() {
|
||||
echo "Executing debug commands"
|
||||
# git config --global --add safe.directory /opt/chatmail
|
||||
# ./scripts/initenv.sh
|
||||
}
|
||||
|
||||
calculate_hash() {
|
||||
find "$PATH_TO_SSL" -type f -exec sha1sum {} \; | sort | sha1sum | awk '{print $1}'
|
||||
}
|
||||
|
||||
monitor_certificates() {
|
||||
if [ "$ENABLE_CERTS_MONITORING" != "true" ]; then
|
||||
echo "Certs monitoring disabled."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
current_hash=$(calculate_hash)
|
||||
previous_hash=$current_hash
|
||||
|
||||
while true; do
|
||||
current_hash=$(calculate_hash)
|
||||
if [[ "$current_hash" != "$previous_hash" ]]; then
|
||||
# TODO: add an option to restart at a specific time interval
|
||||
echo "[INFO] Certificate's folder hash was changed, reloading nginx, dovecot and postfix services."
|
||||
systemctl reload nginx.service
|
||||
systemctl reload dovecot.service
|
||||
systemctl reload postfix.service
|
||||
previous_hash=$current_hash
|
||||
fi
|
||||
sleep $CERTS_MONITORING_TIMEOUT
|
||||
done
|
||||
}
|
||||
|
||||
### MAIN
|
||||
|
||||
if [ "$DEBUG_COMMANDS_ENABLED" = true ]; then
|
||||
debug_commands
|
||||
fi
|
||||
|
||||
if [ "$FORCE_REINIT_INI_FILE" = true ]; then
|
||||
INI_CMD_ARGS=--force
|
||||
fi
|
||||
|
||||
if [ ! -f /etc/dkimkeys/opendkim.private ]; then
|
||||
/usr/sbin/opendkim-genkey -D /etc/dkimkeys -d $MAIL_DOMAIN -s opendkim
|
||||
fi
|
||||
chown opendkim:opendkim /etc/dkimkeys/opendkim.private
|
||||
chown opendkim:opendkim /etc/dkimkeys/opendkim.txt
|
||||
|
||||
# TODO: Move to debug_commands after git clone is moved to dockerfile.
|
||||
git config --global --add safe.directory /opt/chatmail
|
||||
if [ "$RECREATE_VENV" = true ]; then
|
||||
rm -rf venv
|
||||
fi
|
||||
# Skip venv creation if it already exists
|
||||
if [ ! -x venv/bin/python ] || [ ! -x venv/bin/cmdeploy ]; then
|
||||
./scripts/initenv.sh
|
||||
fi
|
||||
|
||||
./scripts/cmdeploy init --config "${INI_FILE}" $INI_CMD_ARGS $MAIL_DOMAIN || true
|
||||
bash /update_ini.sh
|
||||
|
||||
export CMDEPLOY_STAGES="${CMDEPLOY_STAGES:-configure,activate}"
|
||||
./scripts/cmdeploy run --ssh-host @docker
|
||||
|
||||
echo "ForwardToConsole=yes" >> /etc/systemd/journald.conf
|
||||
systemctl restart systemd-journald
|
||||
|
||||
monitor_certificates &
|
||||
@@ -1,79 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -eo pipefail
|
||||
|
||||
INI_FILE="${INI_FILE:-chatmail.ini}"
|
||||
|
||||
if [ ! -f "$INI_FILE" ]; then
|
||||
echo "Error: file $INI_FILE not found." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TMP_FILE="$(mktemp)"
|
||||
|
||||
convert_to_bytes() {
|
||||
local value="$1"
|
||||
if [[ "$value" =~ ^([0-9]+)([KkMmGgTt])$ ]]; then
|
||||
local num="${BASH_REMATCH[1]}"
|
||||
local unit="${BASH_REMATCH[2]}"
|
||||
case "$unit" in
|
||||
[Kk]) echo $((num * 1024)) ;;
|
||||
[Mm]) echo $((num * 1024 * 1024)) ;;
|
||||
[Gg]) echo $((num * 1024 * 1024 * 1024)) ;;
|
||||
[Tt]) echo $((num * 1024 * 1024 * 1024 * 1024)) ;;
|
||||
esac
|
||||
elif [[ "$value" =~ ^[0-9]+$ ]]; then
|
||||
echo "$value"
|
||||
else
|
||||
echo "Error: incorrect size format: $value." >&2
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
process_specific_params() {
|
||||
local key=$1
|
||||
local value=$2
|
||||
local destination_file=$3
|
||||
|
||||
if [[ "$key" == "max_message_size" ]]; then
|
||||
converted=$(convert_to_bytes "$value") || exit 1
|
||||
if grep -q -e "## .* = .* bytes" "$destination_file"; then
|
||||
sed "s|## .* = .* bytes|## $value = $converted bytes|g" "$destination_file";
|
||||
else
|
||||
echo "## $value = $converted bytes" >> "$destination_file"
|
||||
fi
|
||||
echo "$key = $converted" >> "$destination_file"
|
||||
else
|
||||
echo "$key = $value" >> "$destination_file"
|
||||
fi
|
||||
}
|
||||
|
||||
while IFS= read -r line; do
|
||||
if [[ "$line" =~ ^[[:space:]]*#.* || "$line" =~ ^[[:space:]]*$ ]]; then
|
||||
echo "$line" >> "$TMP_FILE"
|
||||
continue
|
||||
fi
|
||||
|
||||
if [[ "$line" =~ ^([a-z0-9_]+)[[:space:]]*=[[:space:]]*(.*)$ ]]; then
|
||||
key="${BASH_REMATCH[1]}"
|
||||
current_value="${BASH_REMATCH[2]}"
|
||||
env_var_name=$(echo "$key" | tr 'a-z' 'A-Z')
|
||||
env_value="${!env_var_name}"
|
||||
|
||||
if [[ -n "$env_value" ]]; then
|
||||
process_specific_params "$key" "$env_value" "$TMP_FILE"
|
||||
else
|
||||
echo "$line" >> "$TMP_FILE"
|
||||
fi
|
||||
else
|
||||
echo "$line" >> "$TMP_FILE"
|
||||
fi
|
||||
done < "$INI_FILE"
|
||||
|
||||
PERMS=$(stat -c %a "$INI_FILE")
|
||||
OWNER=$(stat -c %u "$INI_FILE")
|
||||
GROUP=$(stat -c %g "$INI_FILE")
|
||||
|
||||
chmod "$PERMS" "$TMP_FILE"
|
||||
chown "$OWNER":"$GROUP" "$TMP_FILE"
|
||||
|
||||
mv "$TMP_FILE" "$INI_FILE"
|
||||
@@ -1,185 +0,0 @@
|
||||
# Known issues and limitations
|
||||
|
||||
- Requires cgroups v2 configured in the system. Operation with cgroups v1 has not been tested.
|
||||
- Yes, of course, using systemd inside a container is a hack, and it would be better to split it into several services, but since this is an MVP, it turned out to be easier to do it this way initially than to rewrite the entire deployment system.
|
||||
- The Docker image is only suitable for amd64. If you need to run it on a different architecture, try modifying the Dockerfile (specifically the part responsible for installing dovecot).
|
||||
|
||||
# Docker installation
|
||||
This section provides instructions for installing Chatmail using Docker Compose.
|
||||
|
||||
**Note:** Docker Compose v2 is required (`docker compose`, not `docker-compose`) for its support of the `cgroup: host` option in `docker-compose.yaml` is only supported by Compose v2.
|
||||
[see documentation](https://docs.docker.com/engine/install/debian/#install-using-the-repository)
|
||||
```shell
|
||||
apt install docker-ce docker-compose-plugin docker.io- docker-compose-
|
||||
```
|
||||
|
||||
## Preliminary setup
|
||||
We use `chat.example.org` as the Chatmail domain in the following steps.
|
||||
Please substitute it with your own domain.
|
||||
|
||||
1. Setup the initial DNS records.
|
||||
The following is an example in the familiar BIND zone file format with
|
||||
a TTL of 1 hour (3600 seconds).
|
||||
Please substitute your domain and IP addresses.
|
||||
|
||||
```
|
||||
chat.example.com. 3600 IN A 198.51.100.5
|
||||
chat.example.com. 3600 IN AAAA 2001:db8::5
|
||||
www.chat.example.com. 3600 IN CNAME chat.example.com.
|
||||
mta-sts.chat.example.com. 3600 IN CNAME chat.example.com.
|
||||
```
|
||||
|
||||
2. clone the repository on your server.
|
||||
|
||||
```shell
|
||||
git clone https://github.com/chatmail/relay
|
||||
cd relay
|
||||
```
|
||||
|
||||
## Installation
|
||||
|
||||
1. Configure kernel parameters because they cannot be changed inside the container, specifically `fs.inotify.max_user_instances` and `fs.inotify.max_user_watches`. Run the following:
|
||||
|
||||
```shell
|
||||
echo "fs.inotify.max_user_instances=65536" | sudo tee -a /etc/sysctl.d/99-inotify.conf
|
||||
echo "fs.inotify.max_user_watches=65536" | sudo tee -a /etc/sysctl.d/99-inotify.conf
|
||||
sudo sysctl --system
|
||||
```
|
||||
|
||||
2. Copy `./docker/example.env` and rename it to `.env`. This file stores variables used in `docker-compose.yaml`.
|
||||
|
||||
```shell
|
||||
cp ./docker/example.env .env
|
||||
```
|
||||
|
||||
3. Configure environment variables in the `.env` file. These variables are used in the `docker-compose.yaml` file to pass repeated values.
|
||||
Below is the list of variables used during deployment:
|
||||
|
||||
- `MAIL_DOMAIN` – The domain name of the future server. (required)
|
||||
- `DEBUG_COMMANDS_ENABLED` – Run debug commands before installation. (default: `false`)
|
||||
- `FORCE_REINIT_INI_FILE` – Recreate the ini configuration file on startup. (default: `false`)
|
||||
- `USE_FOREIGN_CERT_MANAGER` – Use a third-party certificate manager. (default: `false`)
|
||||
- `RECREATE_VENV` - Recreate the virtual environment (venv). If set to `true`, the environment will be recreated when the container starts, which will increase the startup time of the service but can help avoid certain errors. (default: `false`)
|
||||
- `INI_FILE` – Path to the ini configuration file. (default: `./chatmail.ini`)
|
||||
- `PATH_TO_SSL` – Path to where the certificates are stored. (default: `/var/lib/acme/live/${MAIL_DOMAIN}`)
|
||||
- `ENABLE_CERTS_MONITORING` – Enable certificate monitoring if `USE_FOREIGN_CERT_MANAGER=true`. If certificates change, services will be automatically restarted. (default: `false`)
|
||||
- `CERTS_MONITORING_TIMEOUT` – Interval in seconds to check if certificates have changed. (default: `'60'`)
|
||||
- `CMDEPLOY_STAGES` – Deployment stages to run on container start. (default: `"configure,activate"`). Set to `"install,configure,activate"` to force a full reinstall.
|
||||
|
||||
You can also use any variables from the [ini configuration file](https://github.com/chatmail/relay/blob/main/chatmaild/src/chatmaild/ini/chatmail.ini.f); they must be in uppercase.
|
||||
|
||||
4. Build the Docker image:
|
||||
|
||||
```shell
|
||||
docker compose build chatmail
|
||||
```
|
||||
|
||||
5. Start docker compose and wait for the installation to finish:
|
||||
|
||||
```shell
|
||||
docker compose up -d # start service
|
||||
docker compose logs -f chatmail # view container logs, press CTRL+C to exit
|
||||
```
|
||||
|
||||
### venv creation
|
||||
The first container start takes longer because it creates the cmdeploy Python virtualenv at `/opt/chatmail/venv` (persisted on the host via volume mount). Subsequent starts reuse the existing venv. Set `RECREATE_VENV=true` in `.env` to force a rebuild if needed.
|
||||
|
||||
6. After installation is complete, you can open `https://<your_domain_name>` in your browser.
|
||||
|
||||
## Using custom files
|
||||
|
||||
When using Docker, you can apply modified configuration files to make the installation more personalized. This is usually needed for the `www/src` section so that the Chatmail landing page is customized to your taste, but it can be used for any other cases as well.
|
||||
|
||||
To replace files correctly:
|
||||
|
||||
1. Create the `./custom` directory. It is in `.gitignore`, so it won’t cause conflicts when updating.
|
||||
|
||||
```shell
|
||||
mkdir -p ./custom
|
||||
```
|
||||
|
||||
2. Modify the required file. For example, `index.md`:
|
||||
|
||||
```shell
|
||||
mkdir -p ./custom/www/src
|
||||
nano ./custom/www/src/index.md
|
||||
```
|
||||
|
||||
3. In `docker-compose.yaml`, add the file mount in the `volumes` section:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
chatmail:
|
||||
volumes:
|
||||
...
|
||||
## custom resources
|
||||
- ./custom/www/src/index.md:/opt/chatmail/www/src/index.md
|
||||
```
|
||||
|
||||
4. Restart the service:
|
||||
|
||||
```shell
|
||||
docker compose down
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
## Migrating from a bare-metal install
|
||||
|
||||
If you have an existing bare-metal Chatmail installation and want to switch to Docker:
|
||||
|
||||
1. Stop all existing services:
|
||||
|
||||
```shell
|
||||
systemctl stop postfix dovecot doveauth nginx opendkim unbound acmetool-redirector \
|
||||
filtermail filtermail-incoming chatmail-turn iroh-relay chatmail-metadata \
|
||||
lastlogin mtail
|
||||
systemctl disable postfix dovecot doveauth nginx opendkim unbound acmetool-redirector \
|
||||
filtermail filtermail-incoming chatmail-turn iroh-relay chatmail-metadata \
|
||||
lastlogin mtail
|
||||
```
|
||||
|
||||
2. Convert your existing `chatmail.ini` to the Docker `.env` format:
|
||||
|
||||
```shell
|
||||
python3 docker/cm_ini_to_env.py /usr/local/lib/chatmaild/chatmail.ini .env
|
||||
```
|
||||
|
||||
3. Copy persistent data into the `./data/` subdirectories:
|
||||
|
||||
```shell
|
||||
mkdir -p data/chatmail-dkimkeys data/chatmail-acme data/chatmail
|
||||
|
||||
# DKIM keys
|
||||
cp -a /etc/dkimkeys/* data/chatmail-dkimkeys/
|
||||
|
||||
# ACME certificates and account
|
||||
rsync -a /var/lib/acme/ data/chatmail-acme/
|
||||
|
||||
# Mail data
|
||||
rsync -a /home/ data/chatmail/
|
||||
```
|
||||
|
||||
Alternatively, you can mount `/home/vmail` directly by changing the volume in `docker-compose.yaml`:
|
||||
|
||||
```yaml
|
||||
- /home/vmail:/home/vmail
|
||||
```
|
||||
|
||||
The three `./data/` subdirectories cover all persistent state. Everything else is regenerated by the `configure` and `activate` stages on container start.
|
||||
|
||||
## Forcing a full reinstall
|
||||
|
||||
The Docker image bakes the install stage (binary downloads, package setup, chatmaild venv) into the image at build time. On container start, only the `configure` and `activate` stages run by default.
|
||||
|
||||
To force a full reinstall (e.g., after updating the source), either rebuild the image:
|
||||
|
||||
```shell
|
||||
docker compose build chatmail
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
Or override the stages at runtime without rebuilding:
|
||||
|
||||
```shell
|
||||
CMDEPLOY_STAGES="install,configure,activate" docker compose up -d
|
||||
```
|
||||
@@ -1,174 +0,0 @@
|
||||
# Известные проблемы и ограничения
|
||||
- Chatmail будет переустановлен при каждом запуске контейнера (при первом - долго, при последующих быстрее). Так устроен изначальный установщик, потому что он не был заточен под docker. В конце документации [представлено](#фиксирование-версии-chatmail) возможное решение
|
||||
- Требуется настроенный в системе cgroups v2. Работа с cgroups v1 не тестировалась.
|
||||
- Да, понятно дело что systemd использовать в контейнере костыль и надо это всё разнести на несколько сервисов, но это MVP и в первом приближении оказалось сделать проще так, чем переписывать всю систему развертывания.
|
||||
- docker образ подходит только для amd64, если нужно запустить на другой архитектуре, попробуйте изменить dockerfile (конкретно ту часть что ответсвенна за установку dovecot)
|
||||
|
||||
# Docker installation
|
||||
Здесь представлена инструкция по установке chatmail с помощью docker-compose.
|
||||
|
||||
## Предварительная настройка
|
||||
We use `chat.example.org` as the chatmail domain in the following steps.
|
||||
Please substitute it with your own domain.
|
||||
|
||||
1. Настройте начальные записи DNS.Ниже приведен пример в привычном формате файла зоны BIND сTTL 1 час (3600 секунд).
|
||||
Замените домен и IP-адреса на свои.
|
||||
|
||||
```
|
||||
chat.example.com. 3600 IN A 198.51.100.5
|
||||
chat.example.com. 3600 IN AAAA 2001:db8::5
|
||||
www.chat.example.com. 3600 IN CNAME chat.example.com.
|
||||
mta-sts.chat.example.com. 3600 IN CNAME chat.example.com.
|
||||
```
|
||||
|
||||
2. Склонируйте репозиторий на свой сервер.
|
||||
|
||||
```shell
|
||||
git clone https://github.com/chatmail/relay
|
||||
cd relay
|
||||
```
|
||||
|
||||
## Installation
|
||||
|
||||
1. Настроить параметры ядра, потому что внутри контейнера их нельзя изменить, а конкретно `fs.inotify.max_user_instances` и `fs.inotify.max_user_watches`. Для этого выполнить следующее:
|
||||
```shell
|
||||
echo "fs.inotify.max_user_instances=65536" | sudo tee -a /etc/sysctl.d/99-inotify.conf
|
||||
echo "fs.inotify.max_user_watches=65536" | sudo tee -a /etc/sysctl.d/99-inotify.conf
|
||||
sudo sysctl --system
|
||||
```
|
||||
|
||||
2. Скопировать `./docker/example.env` и переименовать в `.env`. Здесь хранятся переменные, которые используются в `docker-compose.yaml`.
|
||||
```shell
|
||||
cp ./docker/example.env .env
|
||||
```
|
||||
|
||||
3. Настроить переменные окружения в `.env` файле. Эти переменные используются в `docker-compose.yaml` файле, чтобы передавать повторяющиеся значения.
|
||||
Ниже перечислен список переменных учавствующих при развертывании:
|
||||
|
||||
- `MAIL_DOMAIN` - Доменное имя будущего сервера. (required)
|
||||
- `DEBUG_COMMANDS_ENABLED` - Выполнить debug команды перед установкой. (default: `false`)
|
||||
- `FORCE_REINIT_INI_FILE` - Пересоздавать ini файл конфигурации при запуске. (default: `false`)
|
||||
- `USE_FOREIGN_CERT_MANAGER` - Использовать сторонний менеджер сертификатов. (default: `false`)
|
||||
- `RECREATE_VENV` - Пересоздать виртуальное окружение (venv). Если выставлено `true`, то окружение будет пересоздано при запуске контейнера, из-за чего включение сервиса займет больше времени, но поможет избежать ряда ошибок. (default: `false`)
|
||||
- `INI_FILE` - путь к ini файлу конфигурации. (default: `./chatmail.ini`)
|
||||
- `PATH_TO_SSL` - Путь где располагаются сертификаты. (default: `/var/lib/acme/live/${MAIL_DOMAIN}`)
|
||||
- `ENABLE_CERTS_MONITORING` - Включить мониторинг сертификатов, если `USE_FOREIGN_CERT_MANAGER=true`. Если сертфикаты изменятся сервисы будут автоматически перезапущены. (default: `false`)
|
||||
- `CERTS_MONITORING_TIMEOUT` - Раз во сколько секунд проверять что изменились сертификаты. (default: `'60'`)
|
||||
|
||||
Также могут быть использованы все переменные из [ini файла конфигурации](https://github.com/chatmail/relay/blob/main/chatmaild/src/chatmaild/ini/chatmail.ini.f), они обязаны быть в uppercase формате.
|
||||
|
||||
4. Собрать docker образ
|
||||
```shell
|
||||
docker compose build chatmail
|
||||
```
|
||||
|
||||
5. Запустить docker compose и дождаться завершения установки
|
||||
```shell
|
||||
docker compose up -d # запуск сервиса
|
||||
docker compose logs -f chatmail # просмотр логов контейнера. Для выхода нажать CTRL+C
|
||||
```
|
||||
|
||||
6. По окончанию установки можно открыть в браузер `https://<your_domain_name>`
|
||||
|
||||
## Использование кастомных файлов
|
||||
При использовании docker есть возможность использовать измененые файлы конфигурации, чтобы сделать установку более персонализированной. Обычно это требуется для секции `www/src`, чтобы ознакомительная страница Chatmail была сделана на ваш вкус. Но также это можно использовать и для любых других случаев.
|
||||
|
||||
Для того чтобы корректно выполнить подмену файлов необходимо
|
||||
1. создать каталог `./custom`, он находится в `.gitignore`, поэтому при обновлении не вызовет конфликтов.
|
||||
```shell
|
||||
mkdir -p ./custom
|
||||
```
|
||||
|
||||
2. Изменить нужный файл. Для примера возьмем `index.md`
|
||||
```shell
|
||||
mkdir -p ./custom/www/src
|
||||
nano ./custom/www/src/index.md
|
||||
```
|
||||
|
||||
3. В `docker-compose.yaml` добавить монтирование файла с помощью секции `volumes`
|
||||
```yaml
|
||||
services:
|
||||
chatmail:
|
||||
volumes:
|
||||
...
|
||||
## custom resources
|
||||
- ./custom/www/src/index.md:/opt/chatmail/www/src/index.md
|
||||
```
|
||||
|
||||
4. Перезапустить сервис
|
||||
```shell
|
||||
docker compose down
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
## Фиксирование версии Chatmail
|
||||
> [!note]
|
||||
> Это опциональные шаги, их делать требуется только если вас не устраивает что сервис устанавливается каждый раз при запуске
|
||||
|
||||
Поскольку в текущей версии docker chatmail сервис устанавливается каждый раз запуске контейнера, чтобы этого не происходило можно зафиксировать версию контейнера после установки. Делается это следующим образом:
|
||||
|
||||
1. Зафиксировать текущее состояние сконфигурированного контейнера
|
||||
```shell
|
||||
docker container commit chatmail configured-chatmail:$(date +'%Y-%m-%d')
|
||||
docker image ls | grep configured-chatmail
|
||||
```
|
||||
|
||||
2. Изменить entrypoint для контейнера в `docker-compose.yaml` на
|
||||
```yaml
|
||||
services:
|
||||
chatmail:
|
||||
image: <image name from step 1>
|
||||
volumes:
|
||||
...
|
||||
## custom resources
|
||||
- ./custom/setup_chatmail_docker.sh:/setup_chatmail_docker.sh
|
||||
```
|
||||
|
||||
3. Создать файл `./custom/setup_chatmail_docker.sh` с новым файлом конфигурации
|
||||
```shell
|
||||
mkdir -p ./custom
|
||||
cat > ./custom/setup_chatmail_docker.sh << 'EOF'
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
export ENABLE_CERTS_MONITORING="${ENABLE_CERTS_MONITORING:-true}"
|
||||
export CERTS_MONITORING_TIMEOUT="${CERTS_MONITORING_TIMEOUT:-60}"
|
||||
export PATH_TO_SSL="${PATH_TO_SSL:-/var/lib/acme/live/${MAIL_DOMAIN}}"
|
||||
|
||||
calculate_hash() {
|
||||
find "$PATH_TO_SSL" -type f -exec sha1sum {} \; | sort | sha1sum | awk '{print $1}'
|
||||
}
|
||||
|
||||
monitor_certificates() {
|
||||
if [ "$ENABLE_CERTS_MONITORING" != "true" ]; then
|
||||
echo "Certs monitoring disabled."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
current_hash=$(calculate_hash)
|
||||
previous_hash=$current_hash
|
||||
|
||||
while true; do
|
||||
current_hash=$(calculate_hash)
|
||||
if [[ "$current_hash" != "$previous_hash" ]]; then
|
||||
# TODO: add an option to restart at a specific time interval
|
||||
echo "[INFO] Certificate's folder hash was changed, reloading nginx, dovecot and postfix services."
|
||||
systemctl reload nginx.service
|
||||
systemctl reload dovecot.service
|
||||
systemctl reload postfix.service
|
||||
previous_hash=$current_hash
|
||||
fi
|
||||
sleep $CERTS_MONITORING_TIMEOUT
|
||||
done
|
||||
}
|
||||
|
||||
monitor_certificates &
|
||||
EOF
|
||||
```
|
||||
|
||||
4. Перезапустить сервис
|
||||
```shell
|
||||
docker compose down
|
||||
docker compose up -d
|
||||
```
|
||||
@@ -23,3 +23,7 @@ you can also **scan this QR code** with Delta Chat:
|
||||
🐣 **Choose** your Avatar and Name
|
||||
|
||||
💬 **Start** chatting with any Delta Chat contacts using [QR invite codes](https://delta.chat/en/help#howtoe2ee)
|
||||
|
||||
{% if config.mail_domain != "nine.testrun.org" %}
|
||||
<div class="experimental">Note: this is only a temporary development chatmail service</div>
|
||||
{% endif %}
|
||||
|
||||
Reference in New Issue
Block a user