Compare commits

...

23 Commits
2.3.0 ... 2.4.0

Author SHA1 Message Date
Anatoliy Sablin
9fba20475b fix #49. 2020-06-23 00:18:27 +03:00
ma1uta
9af5fce014 Merge pull request #50 from lub/patch-1
remove warning about matrix-synapse-ldap3
2020-06-22 19:54:51 +00:00
ma1uta
9843e14c1a Merge pull request #38 from NullIsNot0/NullIsNot0-make-emails-lowercase
Make all 3PID address lowercase to avoid duplicates
2020-06-22 19:51:42 +00:00
lub
60e6f1e23c fix typo
on -> one
2020-06-05 13:23:37 +02:00
lub
6cdbcc69c7 remove warning about matrix-synapse-ldap3
I don't think it's a fair assessment that the project is unmaintaned. When you look at the issues and PRs there still is active development happening.
2020-06-05 13:22:49 +02:00
Anatoliy Sablin
ed7c714738 Fix #41. 2020-05-31 22:56:01 +03:00
Anatoliy Sablin
a9d783192b Add multiple-platform builds. Add experimental arm64 build. 2020-05-31 22:10:32 +03:00
ma1uta
2bb5a734d1 Merge pull request #45 from teutat3s/fix_directory_lookups
Avoid including bridged user in directory lookups
2020-05-19 19:32:31 +00:00
teutat3s
9aa5c4cca9 Avoid including bridged user in directory lookups 2020-05-19 13:04:22 +02:00
Anatoliy Sablin
9c4faab5d8 Add option to log all requests and responses. 2020-05-06 23:46:34 +03:00
Anatoliy Sablin
53c4ffdc4e Add pooling database connection for postgresql. 2020-05-06 20:55:14 +03:00
Anatoliy Sablin
e4144e923a Add error logs. 2020-05-06 19:47:13 +03:00
Anatoliy Sablin
791361c10d Add the migration to fix column types in the postgresql. 2020-05-06 19:39:33 +03:00
NullIsNot0
7c94bd4744 Make all 3PID address lowercase to avoid duplicates
These changes complement #11 where locally saved e-mail address can be "name.surname@example.com", but e-mail address in LDAP can be "Name.Surname@example.com". They are treated as two different e-mail addresses and user gets 2 invitation notification e-mails. We change ThreePid model's address property to convert all info to lowercase and [be915ae](be915aed94) can do it's job better.
The downside of this is that all medium addresses get converted to lowercase, not only e-mails. For now I can't think of any examples where medium values need to stay case sensitive.
2020-05-06 07:41:44 +03:00
Anatoliy Sablin
4b5eecd7e7 Enable v2 by default because Riot require v2 api. 2020-04-21 23:27:20 +03:00
Anatoliy Sablin
a6968fb7e9 Fix #27. 2020-04-07 22:46:14 +03:00
Anatoliy Sablin
d4853b1154 Add config for hostname. 2020-04-07 22:46:14 +03:00
ma1uta
89df4b2425 Merge pull request #33 from aaronraimist/patch-1
ma1sd implements r0.3.0 of the identity server API
2020-04-05 10:20:42 +00:00
Aaron Raimist
0f89121b98 ma1sd implements r0.3.0 of the identity server API 2020-04-04 17:16:25 -05:00
Anatoliy Sablin
8a40ca185b Fix #22. 2020-03-22 12:17:33 +03:00
Anatoliy Sablin
5baeb42623 Fix #29. 2020-03-22 12:12:47 +03:00
Anatoliy Sablin
072e5f66cb #26 Use empty pepper. 2020-02-19 23:35:59 +03:00
Anatoliy Sablin
b2f41d689b #26 fix. 2020-02-19 00:36:05 +03:00
42 changed files with 806 additions and 88 deletions

16
DockerfileX Normal file
View File

@@ -0,0 +1,16 @@
FROM --platform=$BUILDPLATFORM openjdk:11.0.7-jre-slim
VOLUME /etc/ma1sd
VOLUME /var/ma1sd
EXPOSE 8090
ENV JAVA_OPTS=""
ENV CONF_FILE_PATH="/etc/ma1sd/ma1sd.yaml"
ENV SIGN_KEY_PATH="/var/ma1sd/sign.key"
ENV SQLITE_DATABASE_PATH="/var/ma1sd/ma1sd.db"
CMD [ "/start.sh" ]
ADD src/docker/start.sh /start.sh
ADD src/script/ma1sd /app/ma1sd
ADD build/libs/ma1sd.jar /app/ma1sd.jar

View File

@@ -274,6 +274,27 @@ task dockerBuild(type: Exec, dependsOn: shadowJar) {
}
}
task dockerBuildX(type: Exec, dependsOn: shadowJar) {
commandLine 'docker', 'buildx', 'build', '--load', '--platform', 'linux/arm64', '-t', dockerImageTag + '-arm64', project.rootDir
doLast {
exec {
commandLine 'docker', 'buildx', 'build', '--load', '--platform', 'linux/amd64', '-t', dockerImageTag + '-amd64', project.rootDir
}
exec {
commandLine 'docker', 'tag', dockerImageTag + '-arm64', "${dockerImageName}:latest-arm64-dev"
}
exec {
commandLine 'docker', 'tag', dockerImageTag + '-amd64', "${dockerImageName}:latest-amd64-dev"
}
exec {
commandLine 'docker', 'tag', dockerImageTag + '-amd64', "${dockerImageName}:latest-dev"
}
}
}
task dockerPush(type: Exec) {
commandLine 'docker', 'push', dockerImageTag
@@ -283,3 +304,15 @@ task dockerPush(type: Exec) {
}
}
}
task dockerPushX(type: Exec) {
commandLine 'docker', 'push', dockerImageTag
doLast {
exec {
commandLine 'docker', 'push', "${dockerImageName}:latest-dev"
commandLine 'docker', 'push', "${dockerImageName}:latest-amd64-dev"
commandLine 'docker', 'push', "${dockerImageName}:latest-arm64-dev"
}
}
}

View File

@@ -110,7 +110,7 @@ For sql provider (i.e. for the `synapseSql`):
```.yaml
synapseSql:
lookup:
query: 'select user_id as mxid, medium, address from user_threepids' # query for retrive 3PIDs for hashes.
query: 'select user_id as mxid, medium, address from user_threepid_id_server' # query for retrive 3PIDs for hashes.
```
For general sql provider:

View File

@@ -9,6 +9,8 @@
## Binaries
### Requirements
- JDK 1.8
- OpenJDK 11
- OpenJDK 14
### Build
```bash
@@ -70,5 +72,13 @@ Then follow the instruction in the [Debian package](install/debian.md) document.
```
Then follow the instructions in the [Docker install](install/docker.md#configure) document.
### Multi-platform builds
Provided with experimental docker feature [buildx](https://docs.docker.com/buildx/working-with-buildx/)
To build the arm64 and amd64 images run:
```bash
./gradlew dockerBuildX
```
## Next steps
- [Integrate with your infrastructure](getting-started.md#integrate)

View File

@@ -82,8 +82,9 @@ See [the migration instruction](migration-to-postgresql.md) from sqlite to postg
## Logging
```yaml
logging:
root: error # default level for all loggers (apps and thirdparty libraries)
app: info # log level only for the ma1sd
root: error # default level for all loggers (apps and thirdparty libraries)
app: info # log level only for the ma1sd
requests: false # log request and response
```
Possible value: `trace`, `debug`, `info`, `warn`, `error`, `off`.
@@ -100,6 +101,11 @@ Default value for app level: `info`.
| -v | app: debug |
| -vv | app: trace |
#### WARNING
The setting `logging.requests` *MUST NOT* be used in production due it prints full unmasked request and response into the log and can be cause of the data leak.
This setting can be used only to testing and debugging errors.
## Identity stores
See the [Identity stores](stores/README.md) for specific configuration

View File

@@ -56,8 +56,7 @@ Accounts cannot currently migrate/move from one server to another.
See a [brief explanation document](concepts.md) about Matrix and ma1sd concepts and vocabulary.
### I already use the synapse LDAP3 auth provider. Why should I care about ma1sd?
The [synapse LDAP3 auth provider](https://github.com/matrix-org/matrix-synapse-ldap3) is not longer maintained despite
saying so and only handles on specific flow: validate credentials at login.
The [synapse LDAP3 auth provider](https://github.com/matrix-org/matrix-synapse-ldap3) only handles one specific flow: validate credentials at login.
It does not:
- Auto-provision user profiles

View File

@@ -1,5 +1,5 @@
# Identity
Implementation of the [Identity Service API r0.2.0](https://matrix.org/docs/spec/identity_service/r0.2.0.html).
Implementation of the [Identity Service API r0.3.0](https://matrix.org/docs/spec/identity_service/r0.3.0.html).
- [Lookups](#lookups)
- [Invitations](#invitations)

View File

@@ -136,7 +136,7 @@ sql:
```
For the `role` query, `type` can be used to tell ma1sd how to inject the User ID in the query:
- `localpart` will extract and set only the localpart.
- `uid` will extract and set only the localpart.
- `mxid` will use the ID as-is.
On each query, the first parameter `?` is set as a string with the corresponding ID format.

View File

@@ -22,7 +22,7 @@
matrix:
domain: ''
v1: true # deprecated
v2: false # MSC2140 API v2. Disabled by default in order to preserve backward compatibility.
v2: true # MSC2140 API v2. Riot require enabled V2 API.
################
@@ -51,10 +51,39 @@ key:
# - /var/lib/ma1sd/store.db
#
storage:
# backend: sqlite # or postgresql
provider:
sqlite:
database: '/path/to/ma1sd.db'
# postgresql:
# # Wrap all string values with quotes to avoid yaml parsing mistakes
# database: '//localhost/ma1sd' # or full variant //192.168.1.100:5432/ma1sd_database
# username: 'ma1sd_user'
# password: 'ma1sd_password'
#
# # Pool configuration for postgresql backend.
# #######
# # Enable or disable pooling
# pool: false
#
# #######
# # Check database connection before get from pool
# testBeforeGetFromPool: false # or true
#
# #######
# # There is an internal thread which checks each of the database connections as a keep-alive mechanism. This set the
# # number of milliseconds it sleeps between checks -- default is 30000. To disable the checking thread, set this to
# # 0 before you start using the connection source.
# checkConnectionsEveryMillis: 30000
#
# #######
# # Set the number of connections that can be unused in the available list.
# maxConnectionsFree: 5
#
# #######
# # Set the number of milliseconds that a connection can stay open before being closed. Set to 9223372036854775807 to have
# # the connections never expire.
# maxConnectionAgeMillis: 3600000
###################
# Identity Stores #
@@ -129,7 +158,7 @@ threepid:
### hash lookup for synapseSql provider.
# synapseSql:
# lookup:
# query: 'select user_id as mxid, medium, address from user_threepids' # query for retrive 3PIDs for hashes.
# query: 'select user_id as mxid, medium, address from user_threepid_id_server' # query for retrive 3PIDs for hashes.
# legacyRoomNames: false # use the old query to get room names.
### hash lookup for ldap provider (with example of the ldap configuration)
@@ -170,4 +199,6 @@ threepid:
#
# logging:
# root: trace # logging level
# root: error # default level for all loggers (apps and thirdparty libraries)
# app: info # log level only for the ma1sd
# requests: false # or true to dump full requests and responses

View File

@@ -27,7 +27,7 @@ public class ThreePid implements _ThreePid {
public ThreePid(String medium, String address) {
this.medium = medium;
this.address = address;
this.address = address.toLowerCase();
}
@Override

View File

@@ -23,11 +23,13 @@ package io.kamax.mxisd;
import io.kamax.mxisd.config.MatrixConfig;
import io.kamax.mxisd.config.MxisdConfig;
import io.kamax.mxisd.config.PolicyConfig;
import io.kamax.mxisd.config.ServerConfig;
import io.kamax.mxisd.http.undertow.handler.ApiHandler;
import io.kamax.mxisd.http.undertow.handler.AuthorizationHandler;
import io.kamax.mxisd.http.undertow.handler.CheckTermsHandler;
import io.kamax.mxisd.http.undertow.handler.InternalInfoHandler;
import io.kamax.mxisd.http.undertow.handler.OptionsHandler;
import io.kamax.mxisd.http.undertow.handler.RequestDumpingHandler;
import io.kamax.mxisd.http.undertow.handler.SaneHandler;
import io.kamax.mxisd.http.undertow.handler.as.v1.AsNotFoundHandler;
import io.kamax.mxisd.http.undertow.handler.as.v1.AsTransactionHandler;
@@ -99,9 +101,9 @@ public class HttpMxisd {
public void start() {
m.start();
HttpHandler asUserHandler = SaneHandler.around(new AsUserHandler(m.getAs()));
HttpHandler asTxnHandler = SaneHandler.around(new AsTransactionHandler(m.getAs()));
HttpHandler asNotFoundHandler = SaneHandler.around(new AsNotFoundHandler(m.getAs()));
HttpHandler asUserHandler = sane(new AsUserHandler(m.getAs()));
HttpHandler asTxnHandler = sane(new AsTransactionHandler(m.getAs()));
HttpHandler asNotFoundHandler = sane(new AsNotFoundHandler(m.getAs()));
final RoutingHandler handler = Handlers.routing()
.add("OPTIONS", "/**", sane(new OptionsHandler()))
@@ -145,7 +147,8 @@ public class HttpMxisd {
termsEndpoints(handler);
hashEndpoints(handler);
accountEndpoints(handler);
httpSrv = Undertow.builder().addHttpListener(m.getConfig().getServer().getPort(), "0.0.0.0").setHandler(handler).build();
ServerConfig serverConfig = m.getConfig().getServer();
httpSrv = Undertow.builder().addHttpListener(serverConfig.getPort(), serverConfig.getHostname()).setHandler(handler).build();
httpSrv.start();
}
@@ -265,6 +268,11 @@ public class HttpMxisd {
}
private HttpHandler sane(HttpHandler httpHandler) {
return SaneHandler.around(httpHandler);
SaneHandler handler = SaneHandler.around(httpHandler);
if (m.getConfig().getLogging().isRequests()) {
return new RequestDumpingHandler(handler);
} else {
return handler;
}
}
}

View File

@@ -27,7 +27,6 @@ import io.kamax.mxisd.auth.AuthProviders;
import io.kamax.mxisd.backend.IdentityStoreSupplier;
import io.kamax.mxisd.backend.sql.synapse.Synapse;
import io.kamax.mxisd.config.MxisdConfig;
import io.kamax.mxisd.config.PostgresqlStorageConfig;
import io.kamax.mxisd.config.StorageConfig;
import io.kamax.mxisd.crypto.CryptoFactory;
import io.kamax.mxisd.crypto.KeyManager;
@@ -68,7 +67,7 @@ public class Mxisd {
public static final String Version = StringUtils.defaultIfBlank(Mxisd.class.getPackage().getImplementationVersion(), "UNKNOWN");
public static final String Agent = Name + "/" + Version;
private MxisdConfig cfg;
private final MxisdConfig cfg;
private CloseableHttpClient httpClient;
private IRemoteIdentityServerFetcher srvFetcher;
@@ -113,17 +112,7 @@ public class Mxisd {
StorageConfig.BackendEnum storageBackend = cfg.getStorage().getBackend();
StorageConfig.Provider storageProvider = cfg.getStorage().getProvider();
switch (storageBackend) {
case sqlite:
store = new OrmLiteSqlStorage(storageBackend, storageProvider.getSqlite().getDatabase());
break;
case postgresql:
PostgresqlStorageConfig postgresql = storageProvider.getPostgresql();
store = new OrmLiteSqlStorage(storageBackend, postgresql.getDatabase(), postgresql.getUsername(), postgresql.getPassword());
break;
default:
throw new IllegalStateException("Storage provider hasn't been configured");
}
store = new OrmLiteSqlStorage(storageBackend, storageProvider);
keyMgr = CryptoFactory.getKeyManager(cfg.getKey());
signMgr = CryptoFactory.getSignatureManager(cfg, keyMgr);

View File

@@ -162,6 +162,7 @@ public class LdapAuthProvider extends LdapBackend implements AuthenticatorProvid
log.info("No match were found for {}", mxid);
return BackendAuthResult.failure();
} catch (LdapException | IOException | CursorException e) {
log.error("Unable to invoke query request: ", e);
throw new InternalServerError(e);
}
}

View File

@@ -51,7 +51,7 @@ public class SynapseQueries {
if (StringUtils.equals("sqlite", type)) {
return "select " + getUserId(type, domain) + ", displayname from profiles p where displayname like ?";
} else if (StringUtils.equals("postgresql", type)) {
return "select " + getUserId(type, domain) + ", displayname from profiles p where displayname ilike ?";
return "SELECT u.name,p.displayname FROM users u JOIN profiles p ON u.name LIKE concat('@',p.user_id,':%') WHERE u.is_guest = 0 AND u.appservice_id IS NULL AND p.displayname LIKE ?";
} else {
throw new ConfigurationException("Invalid Synapse SQL type: " + type);
}

View File

@@ -0,0 +1,5 @@
package io.kamax.mxisd.config;
public interface DatabaseStorageConfig {
String getDatabase();
}

View File

@@ -5,6 +5,7 @@ import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
public class HashingConfig {
@@ -13,18 +14,19 @@ public class HashingConfig {
private boolean enabled = false;
private int pepperLength = 20;
private RotationPolicyEnum rotationPolicy;
private HashStorageEnum hashStorageType;
private HashStorageEnum hashStorageType = HashStorageEnum.in_memory;
private String delay = "10s";
private transient long delayInSeconds = 10;
private int requests = 10;
private List<Algorithm> algorithms = new ArrayList<>();
public void build() {
public void build(MatrixConfig matrixConfig) {
if (isEnabled()) {
LOGGER.info("--- Hash configuration ---");
LOGGER.info(" Pepper length: {}", getPepperLength());
LOGGER.info(" Rotation policy: {}", getRotationPolicy());
LOGGER.info(" Hash storage type: {}", getHashStorageType());
Objects.requireNonNull(getHashStorageType(), "Storage type must be specified");
if (RotationPolicyEnum.per_seconds == getRotationPolicy()) {
setDelayInSeconds(new DurationDeserializer().deserialize(getDelay()));
LOGGER.info(" Rotation delay: {}", getDelay());
@@ -35,6 +37,9 @@ public class HashingConfig {
}
LOGGER.info(" Algorithms: {}", getAlgorithms());
} else {
if (matrixConfig.isV2()) {
LOGGER.warn("V2 enabled without the hash configuration.");
}
LOGGER.info("Hash configuration disabled, used only `none` pepper.");
}
}

View File

@@ -10,6 +10,7 @@ public class LoggingConfig {
private String root;
private String app;
private boolean requests = false;
public String getRoot() {
return root;
@@ -27,6 +28,14 @@ public class LoggingConfig {
this.app = app;
}
public boolean isRequests() {
return requests;
}
public void setRequests(boolean requests) {
this.requests = requests;
}
public void build() {
LOGGER.info("Logging config:");
if (StringUtils.isNotBlank(getRoot())) {
@@ -43,5 +52,9 @@ public class LoggingConfig {
} else {
LOGGER.info(" Logging level hasn't set, use default");
}
LOGGER.info(" Log requests: {}", isRequests());
if (isRequests()) {
LOGGER.warn(" Request dumping enabled, use this only to debug purposes, don't use it in the production.");
}
}
}

View File

@@ -393,7 +393,7 @@ public class MxisdConfig {
getView().build();
getWordpress().build();
getPolicy().build();
getHashing().build();
getHashing().build(getMatrix());
return this;
}

View File

@@ -20,7 +20,7 @@
package io.kamax.mxisd.config;
public class PostgresqlStorageConfig {
public class PostgresqlStorageConfig implements DatabaseStorageConfig {
private String database;
@@ -28,6 +28,17 @@ public class PostgresqlStorageConfig {
private String password;
private boolean pool;
private int maxConnectionsFree = 1;
private long maxConnectionAgeMillis = 60 * 60 * 1000;
private long checkConnectionsEveryMillis = 30 * 1000;
private boolean testBeforeGetFromPool = false;
@Override
public String getDatabase() {
return database;
}
@@ -51,4 +62,44 @@ public class PostgresqlStorageConfig {
public void setPassword(String password) {
this.password = password;
}
public boolean isPool() {
return pool;
}
public void setPool(boolean pool) {
this.pool = pool;
}
public int getMaxConnectionsFree() {
return maxConnectionsFree;
}
public void setMaxConnectionsFree(int maxConnectionsFree) {
this.maxConnectionsFree = maxConnectionsFree;
}
public long getMaxConnectionAgeMillis() {
return maxConnectionAgeMillis;
}
public void setMaxConnectionAgeMillis(long maxConnectionAgeMillis) {
this.maxConnectionAgeMillis = maxConnectionAgeMillis;
}
public long getCheckConnectionsEveryMillis() {
return checkConnectionsEveryMillis;
}
public void setCheckConnectionsEveryMillis(long checkConnectionsEveryMillis) {
this.checkConnectionsEveryMillis = checkConnectionsEveryMillis;
}
public boolean isTestBeforeGetFromPool() {
return testBeforeGetFromPool;
}
public void setTestBeforeGetFromPool(boolean testBeforeGetFromPool) {
this.testBeforeGetFromPool = testBeforeGetFromPool;
}
}

View File

@@ -20,10 +20,11 @@
package io.kamax.mxisd.config;
public class SQLiteStorageConfig {
public class SQLiteStorageConfig implements DatabaseStorageConfig {
private String database;
@Override
public String getDatabase() {
return database;
}

View File

@@ -34,6 +34,7 @@ public class ServerConfig {
private String name;
private int port = 8090;
private String publicUrl;
private String hostname;
public String getName() {
return name;
@@ -59,6 +60,14 @@ public class ServerConfig {
this.publicUrl = publicUrl;
}
public String getHostname() {
return hostname;
}
public void setHostname(String hostname) {
this.hostname = hostname;
}
public void build() {
log.info("--- Server config ---");
@@ -75,8 +84,13 @@ public class ServerConfig {
log.warn("Public URL is not valid: {}", StringUtils.defaultIfBlank(e.getMessage(), "<no reason provided>"));
}
if (StringUtils.isBlank(getHostname())) {
setHostname("0.0.0.0");
}
log.info("Name: {}", getName());
log.info("Port: {}", getPort());
log.info("Public URL: {}", getPublicUrl());
log.info("Hostname: {}", getHostname());
}
}

View File

@@ -125,7 +125,7 @@ public abstract class SqlConfig {
}
public static class Lookup {
private String query = "SELECT user_id AS mxid, medium, address from user_threepids";
private String query = "SELECT user_id AS mxid, medium, address from user_threepid_id_server";
public String getQuery() {
return query;
@@ -140,7 +140,7 @@ public abstract class SqlConfig {
private Boolean enabled;
private String type = "mxid";
private String query = "SELECT user_id AS uid FROM user_threepids WHERE medium = ? AND address = ?";
private String query = "SELECT user_id AS uid FROM user_threepid_id_server WHERE medium = ? AND address = ?";
private Map<String, String> medium = new HashMap<>();
public Boolean isEnabled() {

View File

@@ -56,7 +56,7 @@ public class SynapseSqlProviderConfig extends SqlConfig {
if (getIdentity().isEnabled() && StringUtils.isBlank(getIdentity().getType())) {
getIdentity().setType("mxid");
getIdentity().setQuery("SELECT user_id AS uid FROM user_threepids WHERE medium = ? AND address = ?");
getIdentity().setQuery("SELECT user_id AS uid FROM user_threepid_id_server WHERE medium = ? AND address = ?");
}
if (getProfile().isEnabled()) {

View File

@@ -1,6 +1,9 @@
package io.kamax.mxisd.hash;
import io.kamax.mxisd.config.HashingConfig;
import io.kamax.mxisd.hash.engine.Engine;
import io.kamax.mxisd.hash.engine.HashEngine;
import io.kamax.mxisd.hash.engine.NoneEngine;
import io.kamax.mxisd.hash.rotation.HashRotationStrategy;
import io.kamax.mxisd.hash.rotation.NoOpRotationStrategy;
import io.kamax.mxisd.hash.rotation.RotationPerRequests;
@@ -21,7 +24,7 @@ public class HashManager {
private static final Logger LOGGER = LoggerFactory.getLogger(HashManager.class);
private HashEngine hashEngine;
private Engine engine;
private HashRotationStrategy rotationStrategy;
private HashStorage hashStorage;
private HashingConfig config;
@@ -32,7 +35,7 @@ public class HashManager {
this.config = config;
this.storage = storage;
initStorage();
hashEngine = new HashEngine(providers, getHashStorage(), config);
engine = config.isEnabled() ? new HashEngine(providers, getHashStorage(), config) : new NoneEngine();
initRotationStrategy();
configured.set(true);
}
@@ -73,8 +76,8 @@ public class HashManager {
this.rotationStrategy.register(getHashEngine());
}
public HashEngine getHashEngine() {
return hashEngine;
public Engine getHashEngine() {
return engine;
}
public HashRotationStrategy getRotationStrategy() {

View File

@@ -0,0 +1,7 @@
package io.kamax.mxisd.hash.engine;
public interface Engine {
void updateHashes();
String getPepper();
}

View File

@@ -1,4 +1,4 @@
package io.kamax.mxisd.hash;
package io.kamax.mxisd.hash.engine;
import io.kamax.mxisd.config.HashingConfig;
import io.kamax.mxisd.hash.storage.HashStorage;
@@ -12,7 +12,7 @@ import org.slf4j.LoggerFactory;
import java.util.Base64;
import java.util.List;
public class HashEngine {
public class HashEngine implements Engine {
private static final Logger LOGGER = LoggerFactory.getLogger(HashEngine.class);
@@ -28,6 +28,7 @@ public class HashEngine {
this.config = config;
}
@Override
public void updateHashes() {
LOGGER.info("Start update hashes.");
synchronized (hashStorage) {
@@ -48,6 +49,7 @@ public class HashEngine {
LOGGER.info("Finish update hashes.");
}
@Override
public String getPepper() {
synchronized (hashStorage) {
return pepper;

View File

@@ -0,0 +1,19 @@
package io.kamax.mxisd.hash.engine;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class NoneEngine implements Engine {
private static final Logger LOGGER = LoggerFactory.getLogger(NoneEngine.class);
@Override
public void updateHashes() {
LOGGER.info("Nothing to update.");
}
@Override
public String getPepper() {
return "";
}
}

View File

@@ -1,12 +1,12 @@
package io.kamax.mxisd.hash.rotation;
import io.kamax.mxisd.hash.HashEngine;
import io.kamax.mxisd.hash.engine.Engine;
public interface HashRotationStrategy {
void register(HashEngine hashEngine);
void register(Engine engine);
HashEngine getHashEngine();
Engine getHashEngine();
void newRequest();

View File

@@ -1,19 +1,19 @@
package io.kamax.mxisd.hash.rotation;
import io.kamax.mxisd.hash.HashEngine;
import io.kamax.mxisd.hash.engine.Engine;
public class NoOpRotationStrategy implements HashRotationStrategy {
private HashEngine hashEngine;
private Engine engine;
@Override
public void register(HashEngine hashEngine) {
this.hashEngine = hashEngine;
public void register(Engine engine) {
this.engine = engine;
}
@Override
public HashEngine getHashEngine() {
return hashEngine;
public Engine getHashEngine() {
return engine;
}
@Override

View File

@@ -1,12 +1,12 @@
package io.kamax.mxisd.hash.rotation;
import io.kamax.mxisd.hash.HashEngine;
import io.kamax.mxisd.hash.engine.Engine;
import java.util.concurrent.atomic.AtomicInteger;
public class RotationPerRequests implements HashRotationStrategy {
private HashEngine hashEngine;
private Engine engine;
private final AtomicInteger counter = new AtomicInteger(0);
private final int barrier;
@@ -15,14 +15,14 @@ public class RotationPerRequests implements HashRotationStrategy {
}
@Override
public void register(HashEngine hashEngine) {
this.hashEngine = hashEngine;
public void register(Engine engine) {
this.engine = engine;
trigger();
}
@Override
public HashEngine getHashEngine() {
return hashEngine;
public Engine getHashEngine() {
return engine;
}
@Override

View File

@@ -1,6 +1,6 @@
package io.kamax.mxisd.hash.rotation;
import io.kamax.mxisd.hash.HashEngine;
import io.kamax.mxisd.hash.engine.Engine;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
@@ -9,7 +9,7 @@ import java.util.concurrent.TimeUnit;
public class TimeBasedRotation implements HashRotationStrategy {
private final long delay;
private HashEngine hashEngine;
private Engine engine;
private final ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor();
public TimeBasedRotation(long delay) {
@@ -17,15 +17,15 @@ public class TimeBasedRotation implements HashRotationStrategy {
}
@Override
public void register(HashEngine hashEngine) {
this.hashEngine = hashEngine;
public void register(Engine engine) {
this.engine = engine;
Runtime.getRuntime().addShutdownHook(new Thread(executorService::shutdown));
executorService.scheduleWithFixedDelay(this::trigger, 0, delay, TimeUnit.SECONDS);
}
@Override
public HashEngine getHashEngine() {
return hashEngine;
public Engine getHashEngine() {
return engine;
}
@Override

View File

@@ -0,0 +1,5 @@
package io.kamax.mxisd.http.undertow.conduit;
public interface ConduitWithDump {
String dump();
}

View File

@@ -0,0 +1,107 @@
/*
* JBoss, Home of Professional Open Source.
* Copyright 2014 Red Hat, Inc., and individual contributors
* as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.kamax.mxisd.http.undertow.conduit;
import org.xnio.IoUtils;
import org.xnio.channels.StreamSourceChannel;
import org.xnio.conduits.AbstractStreamSinkConduit;
import org.xnio.conduits.ConduitWritableByteChannel;
import org.xnio.conduits.Conduits;
import org.xnio.conduits.StreamSinkConduit;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
/**
* Conduit that saves all the data that is written through it and can dump it to the console
* <p>
* Obviously this should not be used in production.
*
* @author Stuart Douglas
*/
public class DebuggingStreamSinkConduit extends AbstractStreamSinkConduit<StreamSinkConduit> implements ConduitWithDump {
private final List<byte[]> data = new CopyOnWriteArrayList<>();
/**
* Construct a new instance.
*
* @param next the delegate conduit to set
*/
public DebuggingStreamSinkConduit(StreamSinkConduit next) {
super(next);
}
@Override
public int write(ByteBuffer src) throws IOException {
int pos = src.position();
int res = super.write(src);
if (res > 0) {
byte[] d = new byte[res];
for (int i = 0; i < res; ++i) {
d[i] = src.get(i + pos);
}
data.add(d);
}
return res;
}
@Override
public long write(ByteBuffer[] dsts, int offs, int len) throws IOException {
for (int i = offs; i < len; ++i) {
if (dsts[i].hasRemaining()) {
return write(dsts[i]);
}
}
return 0;
}
@Override
public long transferFrom(final FileChannel src, final long position, final long count) throws IOException {
return src.transferTo(position, count, new ConduitWritableByteChannel(this));
}
@Override
public long transferFrom(final StreamSourceChannel source, final long count, final ByteBuffer throughBuffer) throws IOException {
return IoUtils.transfer(source, count, throughBuffer, new ConduitWritableByteChannel(this));
}
@Override
public int writeFinal(ByteBuffer src) throws IOException {
return Conduits.writeFinalBasic(this, src);
}
@Override
public long writeFinal(ByteBuffer[] srcs, int offset, int length) throws IOException {
return Conduits.writeFinalBasic(this, srcs, offset, length);
}
@Override
public String dump() {
StringBuilder sb = new StringBuilder();
for (byte[] datum : data) {
sb.append(new String(datum, StandardCharsets.UTF_8));
}
return sb.toString();
}
}

View File

@@ -0,0 +1,95 @@
/*
* JBoss, Home of Professional Open Source.
* Copyright 2014 Red Hat, Inc., and individual contributors
* as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.kamax.mxisd.http.undertow.conduit;
import org.xnio.IoUtils;
import org.xnio.channels.StreamSinkChannel;
import org.xnio.conduits.AbstractStreamSourceConduit;
import org.xnio.conduits.ConduitReadableByteChannel;
import org.xnio.conduits.StreamSourceConduit;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
/**
* Conduit that saves all the data that is written through it and can dump it to the console
* <p>
* Obviously this should not be used in production.
*
* @author Stuart Douglas
*/
public class DebuggingStreamSourceConduit extends AbstractStreamSourceConduit<StreamSourceConduit> implements ConduitWithDump {
private final List<byte[]> data = new CopyOnWriteArrayList<>();
/**
* Construct a new instance.
*
* @param next the delegate conduit to set
*/
public DebuggingStreamSourceConduit(StreamSourceConduit next) {
super(next);
}
public long transferTo(final long position, final long count, final FileChannel target) throws IOException {
return target.transferFrom(new ConduitReadableByteChannel(this), position, count);
}
public long transferTo(final long count, final ByteBuffer throughBuffer, final StreamSinkChannel target) throws IOException {
return IoUtils.transfer(new ConduitReadableByteChannel(this), count, throughBuffer, target);
}
@Override
public int read(ByteBuffer dst) throws IOException {
int pos = dst.position();
int res = super.read(dst);
if (res > 0) {
byte[] d = new byte[res];
for (int i = 0; i < res; ++i) {
d[i] = dst.get(i + pos);
}
data.add(d);
}
return res;
}
@Override
public long read(ByteBuffer[] dsts, int offs, int len) throws IOException {
for (int i = offs; i < len; ++i) {
if (dsts[i].hasRemaining()) {
return read(dsts[i]);
}
}
return 0;
}
@Override
public String dump() {
StringBuilder sb = new StringBuilder();
for (byte[] datum : data) {
sb.append(new String(datum, StandardCharsets.UTF_8));
}
return sb.toString();
}
}

View File

@@ -0,0 +1,23 @@
package io.kamax.mxisd.http.undertow.conduit;
import io.undertow.server.ConduitWrapper;
import io.undertow.server.HttpServerExchange;
import io.undertow.util.ConduitFactory;
import org.xnio.conduits.Conduit;
public abstract class LazyConduitWrapper<T extends Conduit> implements ConduitWrapper<T> {
private T conduit = null;
protected abstract T create(ConduitFactory<T> factory, HttpServerExchange exchange);
@Override
public T wrap(ConduitFactory<T> factory, HttpServerExchange exchange) {
conduit = create(factory, exchange);
return conduit;
}
public T get() {
return conduit;
}
}

View File

@@ -0,0 +1,186 @@
/*
* JBoss, Home of Professional Open Source.
* Copyright 2014 Red Hat, Inc., and individual contributors
* as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.kamax.mxisd.http.undertow.handler;
import io.kamax.mxisd.http.undertow.conduit.ConduitWithDump;
import io.kamax.mxisd.http.undertow.conduit.DebuggingStreamSinkConduit;
import io.kamax.mxisd.http.undertow.conduit.DebuggingStreamSourceConduit;
import io.kamax.mxisd.http.undertow.conduit.LazyConduitWrapper;
import io.undertow.security.api.SecurityContext;
import io.undertow.server.HttpHandler;
import io.undertow.server.HttpServerExchange;
import io.undertow.server.handlers.Cookie;
import io.undertow.util.ConduitFactory;
import io.undertow.util.HeaderValues;
import io.undertow.util.Headers;
import io.undertow.util.LocaleUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.xnio.conduits.StreamSinkConduit;
import org.xnio.conduits.StreamSourceConduit;
import java.util.Deque;
import java.util.Iterator;
import java.util.Map;
/**
* Handler that dumps a exchange to a log.
*
* @author Stuart Douglas
*/
public class RequestDumpingHandler implements HttpHandler {
private static final Logger LOGGER = LoggerFactory.getLogger(RequestDumpingHandler.class);
private final HttpHandler next;
public RequestDumpingHandler(HttpHandler next) {
this.next = next;
}
@Override
public void handleRequest(HttpServerExchange exchange) throws Exception {
LazyConduitWrapper<StreamSourceConduit> requestConduitWrapper = new LazyConduitWrapper<StreamSourceConduit>() {
@Override
protected StreamSourceConduit create(ConduitFactory<StreamSourceConduit> factory, HttpServerExchange exchange) {
return new DebuggingStreamSourceConduit(factory.create());
}
};
LazyConduitWrapper<StreamSinkConduit> responseConduitWrapper = new LazyConduitWrapper<StreamSinkConduit>() {
@Override
protected StreamSinkConduit create(ConduitFactory<StreamSinkConduit> factory, HttpServerExchange exchange) {
return new DebuggingStreamSinkConduit(factory.create());
}
};
exchange.addRequestWrapper(requestConduitWrapper);
exchange.addResponseWrapper(responseConduitWrapper);
final StringBuilder sb = new StringBuilder();
// Log pre-service information
final SecurityContext sc = exchange.getSecurityContext();
sb.append("\n----------------------------REQUEST---------------------------\n");
sb.append(" URI=").append(exchange.getRequestURI()).append("\n");
sb.append(" characterEncoding=").append(exchange.getRequestHeaders().get(Headers.CONTENT_ENCODING)).append("\n");
sb.append(" contentLength=").append(exchange.getRequestContentLength()).append("\n");
sb.append(" contentType=").append(exchange.getRequestHeaders().get(Headers.CONTENT_TYPE)).append("\n");
//sb.append(" contextPath=" + exchange.getContextPath());
if (sc != null) {
if (sc.isAuthenticated()) {
sb.append(" authType=").append(sc.getMechanismName()).append("\n");
sb.append(" principle=").append(sc.getAuthenticatedAccount().getPrincipal()).append("\n");
} else {
sb.append(" authType=none\n");
}
}
Map<String, Cookie> cookies = exchange.getRequestCookies();
if (cookies != null) {
for (Map.Entry<String, Cookie> entry : cookies.entrySet()) {
Cookie cookie = entry.getValue();
sb.append(" cookie=").append(cookie.getName()).append("=").append(cookie.getValue()).append("\n");
}
}
for (HeaderValues header : exchange.getRequestHeaders()) {
for (String value : header) {
sb.append(" header=").append(header.getHeaderName()).append("=").append(value).append("\n");
}
}
sb.append(" locale=").append(LocaleUtils.getLocalesFromHeader(exchange.getRequestHeaders().get(Headers.ACCEPT_LANGUAGE)))
.append("\n");
sb.append(" method=").append(exchange.getRequestMethod()).append("\n");
Map<String, Deque<String>> pnames = exchange.getQueryParameters();
for (Map.Entry<String, Deque<String>> entry : pnames.entrySet()) {
String pname = entry.getKey();
Iterator<String> pvalues = entry.getValue().iterator();
sb.append(" parameter=");
sb.append(pname);
sb.append('=');
while (pvalues.hasNext()) {
sb.append(pvalues.next());
if (pvalues.hasNext()) {
sb.append(", ");
}
}
sb.append("\n");
}
//sb.append(" pathInfo=" + exchange.getPathInfo());
sb.append(" protocol=").append(exchange.getProtocol()).append("\n");
sb.append(" queryString=").append(exchange.getQueryString()).append("\n");
sb.append(" remoteAddr=").append(exchange.getSourceAddress()).append("\n");
sb.append(" remoteHost=").append(exchange.getSourceAddress().getHostName()).append("\n");
//sb.append("requestedSessionId=" + exchange.getRequestedSessionId());
sb.append(" scheme=").append(exchange.getRequestScheme()).append("\n");
sb.append(" host=").append(exchange.getRequestHeaders().getFirst(Headers.HOST)).append("\n");
sb.append(" serverPort=").append(exchange.getDestinationAddress().getPort()).append("\n");
//sb.append(" servletPath=" + exchange.getServletPath());
sb.append(" isSecure=").append(exchange.isSecure()).append("\n");
exchange.addExchangeCompleteListener((exchange1, nextListener) -> {
StreamSourceConduit sourceConduit = requestConduitWrapper.get();
if (sourceConduit instanceof ConduitWithDump) {
ConduitWithDump conduitWithDump = (ConduitWithDump) sourceConduit;
sb.append("body=\n");
sb.append(conduitWithDump.dump()).append("\n");
}
// Log post-service information
sb.append("--------------------------RESPONSE--------------------------\n");
if (sc != null) {
if (sc.isAuthenticated()) {
sb.append(" authType=").append(sc.getMechanismName()).append("\n");
sb.append(" principle=").append(sc.getAuthenticatedAccount().getPrincipal()).append("\n");
} else {
sb.append(" authType=none\n");
}
}
sb.append(" contentLength=").append(exchange1.getResponseContentLength()).append("\n");
sb.append(" contentType=").append(exchange1.getResponseHeaders().getFirst(Headers.CONTENT_TYPE)).append("\n");
Map<String, Cookie> cookies1 = exchange1.getResponseCookies();
if (cookies1 != null) {
for (Cookie cookie : cookies1.values()) {
sb.append(" cookie=").append(cookie.getName()).append("=").append(cookie.getValue()).append("; domain=")
.append(cookie.getDomain()).append("; path=").append(cookie.getPath()).append("\n");
}
}
for (HeaderValues header : exchange1.getResponseHeaders()) {
for (String value : header) {
sb.append(" header=").append(header.getHeaderName()).append("=").append(value).append("\n");
}
}
sb.append(" status=").append(exchange1.getStatusCode()).append("\n");
StreamSinkConduit streamSinkConduit = responseConduitWrapper.get();
if (streamSinkConduit instanceof ConduitWithDump) {
ConduitWithDump conduitWithDump = (ConduitWithDump) streamSinkConduit;
sb.append("body=\n");
sb.append(conduitWithDump.dump());
}
sb.append("\n==============================================================");
nextListener.proceed();
LOGGER.info(sb.toString());
});
// Perform the exchange
next.handleRequest(exchange);
}
}

View File

@@ -31,6 +31,8 @@ public class HashDetailsHandler extends BasicHttpHandler {
for (HashingConfig.Algorithm algorithm : config.getAlgorithms()) {
algorithms.add(algorithm.name().toLowerCase());
}
} else {
algorithms.add(HashingConfig.Algorithm.none.name().toLowerCase());
}
response.add("algorithms", algorithms);
return response;

View File

@@ -67,10 +67,6 @@ public class HashLookupHandler extends LookupHandler implements ApiHandler {
log.info("Got bulk lookup request from {} with client {} - Is recursive? {}",
lookupRequest.getRequester(), lookupRequest.getUserAgent(), lookupRequest.isRecursive());
if (!hashManager.getConfig().isEnabled()) {
throw new InvalidParamException();
}
if (!hashManager.getHashEngine().getPepper().equals(input.getPepper())) {
throw new InvalidPepperException();
}
@@ -89,7 +85,7 @@ public class HashLookupHandler extends LookupHandler implements ApiHandler {
}
private void noneAlgorithm(HttpServerExchange exchange, HashLookupRequest request, ClientHashLookupRequest input) throws Exception {
if (!hashManager.getConfig().getAlgorithms().contains(HashingConfig.Algorithm.none)) {
if (hashManager.getConfig().isEnabled() && !hashManager.getConfig().getAlgorithms().contains(HashingConfig.Algorithm.none)) {
throw new InvalidParamException();
}

View File

@@ -130,7 +130,9 @@ public class HomeserverFederationResolver {
return Optional.empty();
} catch (IOException e) {
throw new RuntimeException("Error while trying to lookup well-known for " + domain, e);
log.info("Error while trying to lookup well-known for " + domain);
log.trace("Error while trying to lookup well-known for " + domain, e);
return Optional.empty();
}
}

View File

@@ -23,12 +23,17 @@ package io.kamax.mxisd.storage.ormlite;
import com.j256.ormlite.dao.CloseableWrappedIterable;
import com.j256.ormlite.dao.Dao;
import com.j256.ormlite.dao.DaoManager;
import com.j256.ormlite.db.PostgresDatabaseType;
import com.j256.ormlite.db.SqliteDatabaseType;
import com.j256.ormlite.jdbc.JdbcConnectionSource;
import com.j256.ormlite.jdbc.JdbcPooledConnectionSource;
import com.j256.ormlite.stmt.QueryBuilder;
import com.j256.ormlite.support.ConnectionSource;
import com.j256.ormlite.table.TableUtils;
import io.kamax.matrix.ThreePid;
import io.kamax.mxisd.config.PolicyConfig;
import io.kamax.mxisd.config.PostgresqlStorageConfig;
import io.kamax.mxisd.config.SQLiteStorageConfig;
import io.kamax.mxisd.config.StorageConfig;
import io.kamax.mxisd.exception.ConfigurationException;
import io.kamax.mxisd.exception.InternalServerError;
@@ -81,6 +86,8 @@ public class OrmLiteSqlStorage implements IStorage {
public static class Migrations {
public static final String FIX_ACCEPTED_DAO = "2019_12_09__2254__fix_accepted_dao";
public static final String FIX_HASH_DAO_UNIQUE_INDEX = "2020_03_22__1153__fix_hash_dao_unique_index";
public static final String CHANGE_TYPE_TO_TEXT_INVITE = "2020_04_21__2338__change_type_table_invites";
}
private Dao<ThreePidInviteIO, String> invDao;
@@ -93,22 +100,25 @@ public class OrmLiteSqlStorage implements IStorage {
private Dao<ChangelogDao, String> changelogDao;
private StorageConfig.BackendEnum backend;
public OrmLiteSqlStorage(StorageConfig.BackendEnum backend, String path) {
this(backend, path, null, null);
}
public OrmLiteSqlStorage(StorageConfig.BackendEnum backend, String database, String username, String password) {
public OrmLiteSqlStorage(StorageConfig.BackendEnum backend, StorageConfig.Provider provider) {
if (backend == null) {
throw new ConfigurationException("storage.backend");
}
this.backend = backend;
if (StringUtils.isBlank(database)) {
throw new ConfigurationException("Storage destination cannot be empty");
}
withCatcher(() -> {
ConnectionSource connPool = new JdbcConnectionSource("jdbc:" + backend + ":" + database, username, password);
ConnectionSource connPool;
switch (backend) {
case postgresql:
connPool = createPostgresqlConnection(provider.getPostgresql());
break;
case sqlite:
connPool = createSqliteConnection(provider.getSqlite());
break;
default:
throw new ConfigurationException("storage.backend");
}
changelogDao = createDaoAndTable(connPool, ChangelogDao.class);
invDao = createDaoAndTable(connPool, ThreePidInviteIO.class);
expInvDao = createDaoAndTable(connPool, HistoricalThreePidInviteIO.class);
@@ -116,17 +126,57 @@ public class OrmLiteSqlStorage implements IStorage {
asTxnDao = createDaoAndTable(connPool, ASTransactionDao.class);
accountDao = createDaoAndTable(connPool, AccountDao.class);
acceptedDao = createDaoAndTable(connPool, AcceptedDao.class, true);
hashDao = createDaoAndTable(connPool, HashDao.class);
hashDao = createDaoAndTable(connPool, HashDao.class, true);
runMigration(connPool);
});
}
private ConnectionSource createSqliteConnection(SQLiteStorageConfig config) throws SQLException {
if (StringUtils.isBlank(config.getDatabase())) {
throw new ConfigurationException("Storage destination cannot be empty");
}
return new JdbcConnectionSource("jdbc:" + backend + ":" + config.getDatabase(), null, null, new SqliteDatabaseType());
}
private ConnectionSource createPostgresqlConnection(PostgresqlStorageConfig config) throws SQLException {
if (StringUtils.isBlank(config.getDatabase())) {
throw new ConfigurationException("Storage destination cannot be empty");
}
if (config.isPool()) {
LOGGER.info("Enable pooling");
JdbcPooledConnectionSource source = new JdbcPooledConnectionSource(
"jdbc:" + backend + ":" + config.getDatabase(), config.getUsername(), config.getPassword(),
new PostgresDatabaseType());
source.setMaxConnectionsFree(config.getMaxConnectionsFree());
source.setMaxConnectionAgeMillis(config.getMaxConnectionAgeMillis());
source.setCheckConnectionsEveryMillis(config.getCheckConnectionsEveryMillis());
source.setTestBeforeGet(config.isTestBeforeGetFromPool());
return source;
} else {
return new JdbcConnectionSource("jdbc:" + backend + ":" + config.getDatabase(), config.getUsername(), config.getPassword(),
new PostgresDatabaseType());
}
}
private void runMigration(ConnectionSource connPol) throws SQLException {
ChangelogDao fixAcceptedDao = changelogDao.queryForId(Migrations.FIX_ACCEPTED_DAO);
if (fixAcceptedDao == null) {
fixAcceptedDao(connPol);
changelogDao.create(new ChangelogDao(Migrations.FIX_ACCEPTED_DAO, new Date(), "Recreate the accepted table."));
}
ChangelogDao fixHashDaoUniqueIndex = changelogDao.queryForId(Migrations.FIX_HASH_DAO_UNIQUE_INDEX);
if (fixHashDaoUniqueIndex == null) {
fixHashDaoUniqueIndex(connPol);
changelogDao
.create(new ChangelogDao(Migrations.FIX_HASH_DAO_UNIQUE_INDEX, new Date(), "Add the id and migrate the unique index."));
}
ChangelogDao fixInviteTableColumnType = changelogDao.queryForId(Migrations.CHANGE_TYPE_TO_TEXT_INVITE);
if (fixInviteTableColumnType == null) {
fixInviteTableColumnType(connPol);
changelogDao.create(new ChangelogDao(Migrations.CHANGE_TYPE_TO_TEXT_INVITE, new Date(), "Modify column type to text."));
}
}
private void fixAcceptedDao(ConnectionSource connPool) throws SQLException {
@@ -135,6 +185,25 @@ public class OrmLiteSqlStorage implements IStorage {
TableUtils.createTableIfNotExists(connPool, AcceptedDao.class);
}
private void fixHashDaoUniqueIndex(ConnectionSource connPool) throws SQLException {
LOGGER.info("Migration: {}", Migrations.FIX_HASH_DAO_UNIQUE_INDEX);
TableUtils.dropTable(hashDao, true);
TableUtils.createTableIfNotExists(connPool, HashDao.class);
}
private void fixInviteTableColumnType(ConnectionSource connPool) throws SQLException {
LOGGER.info("Migration: {}", Migrations.CHANGE_TYPE_TO_TEXT_INVITE);
if (StorageConfig.BackendEnum.postgresql == backend) {
invDao.executeRawNoArgs("alter table invite_3pid alter column \"roomId\" type text");
invDao.executeRawNoArgs("alter table invite_3pid alter column id type text");
invDao.executeRawNoArgs("alter table invite_3pid alter column token type text");
invDao.executeRawNoArgs("alter table invite_3pid alter column sender type text");
invDao.executeRawNoArgs("alter table invite_3pid alter column medium type text");
invDao.executeRawNoArgs("alter table invite_3pid alter column address type text");
invDao.executeRawNoArgs("alter table invite_3pid alter column properties type text");
}
}
private <V, K> Dao<V, K> createDaoAndTable(ConnectionSource connPool, Class<V> c) throws SQLException {
return createDaoAndTable(connPool, c, false);
}

View File

@@ -6,13 +6,16 @@ import com.j256.ormlite.table.DatabaseTable;
@DatabaseTable(tableName = "hashes")
public class HashDao {
@DatabaseField(canBeNull = false, id = true)
@DatabaseField(generatedId = true)
private Long id;
@DatabaseField(canBeNull = false, uniqueCombo = true)
private String mxid;
@DatabaseField(canBeNull = false)
@DatabaseField(canBeNull = false, uniqueCombo = true)
private String medium;
@DatabaseField(canBeNull = false)
@DatabaseField(canBeNull = false, uniqueCombo = true)
private String address;
@DatabaseField(canBeNull = false)
@@ -28,6 +31,14 @@ public class HashDao {
this.hash = hash;
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getMxid() {
return mxid;
}

View File

@@ -20,6 +20,7 @@
package io.kamax.mxisd.test.storage;
import io.kamax.mxisd.config.SQLiteStorageConfig;
import io.kamax.mxisd.config.StorageConfig;
import io.kamax.mxisd.storage.ormlite.OrmLiteSqlStorage;
import org.junit.Test;
@@ -30,14 +31,22 @@ public class OrmLiteSqlStorageTest {
@Test
public void insertAsTxnDuplicate() {
OrmLiteSqlStorage store = new OrmLiteSqlStorage(StorageConfig.BackendEnum.sqlite, ":memory:");
StorageConfig.Provider provider = new StorageConfig.Provider();
SQLiteStorageConfig config = new SQLiteStorageConfig();
config.setDatabase(":memory:");
provider.setSqlite(config);
OrmLiteSqlStorage store = new OrmLiteSqlStorage(StorageConfig.BackendEnum.sqlite, provider);
store.insertTransactionResult("mxisd", "1", Instant.now(), "{}");
store.insertTransactionResult("mxisd", "2", Instant.now(), "{}");
}
@Test(expected = RuntimeException.class)
public void insertAsTxnSame() {
OrmLiteSqlStorage store = new OrmLiteSqlStorage(StorageConfig.BackendEnum.sqlite, ":memory:");
StorageConfig.Provider provider = new StorageConfig.Provider();
SQLiteStorageConfig config = new SQLiteStorageConfig();
config.setDatabase(":memory:");
provider.setSqlite(config);
OrmLiteSqlStorage store = new OrmLiteSqlStorage(StorageConfig.BackendEnum.sqlite, provider);
store.insertTransactionResult("mxisd", "1", Instant.now(), "{}");
store.insertTransactionResult("mxisd", "1", Instant.now(), "{}");
}