Compare commits
13 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
9c4faab5d8 | ||
|
53c4ffdc4e | ||
|
e4144e923a | ||
|
791361c10d | ||
|
4b5eecd7e7 | ||
|
a6968fb7e9 | ||
|
d4853b1154 | ||
|
89df4b2425 | ||
|
0f89121b98 | ||
|
8a40ca185b | ||
|
5baeb42623 | ||
|
072e5f66cb | ||
|
b2f41d689b |
@@ -110,7 +110,7 @@ For sql provider (i.e. for the `synapseSql`):
|
||||
```.yaml
|
||||
synapseSql:
|
||||
lookup:
|
||||
query: 'select user_id as mxid, medium, address from user_threepids' # query for retrive 3PIDs for hashes.
|
||||
query: 'select user_id as mxid, medium, address from user_threepid_id_server' # query for retrive 3PIDs for hashes.
|
||||
```
|
||||
|
||||
For general sql provider:
|
||||
|
@@ -82,8 +82,9 @@ See [the migration instruction](migration-to-postgresql.md) from sqlite to postg
|
||||
## Logging
|
||||
```yaml
|
||||
logging:
|
||||
root: error # default level for all loggers (apps and thirdparty libraries)
|
||||
app: info # log level only for the ma1sd
|
||||
root: error # default level for all loggers (apps and thirdparty libraries)
|
||||
app: info # log level only for the ma1sd
|
||||
requests: false # log request and response
|
||||
```
|
||||
|
||||
Possible value: `trace`, `debug`, `info`, `warn`, `error`, `off`.
|
||||
@@ -100,6 +101,11 @@ Default value for app level: `info`.
|
||||
| -v | app: debug |
|
||||
| -vv | app: trace |
|
||||
|
||||
#### WARNING
|
||||
|
||||
The setting `logging.requests` *MUST NOT* be used in production due it prints full unmasked request and response into the log and can be cause of the data leak.
|
||||
This setting can be used only to testing and debugging errors.
|
||||
|
||||
## Identity stores
|
||||
See the [Identity stores](stores/README.md) for specific configuration
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
# Identity
|
||||
Implementation of the [Identity Service API r0.2.0](https://matrix.org/docs/spec/identity_service/r0.2.0.html).
|
||||
Implementation of the [Identity Service API r0.3.0](https://matrix.org/docs/spec/identity_service/r0.3.0.html).
|
||||
|
||||
- [Lookups](#lookups)
|
||||
- [Invitations](#invitations)
|
||||
|
@@ -136,7 +136,7 @@ sql:
|
||||
|
||||
```
|
||||
For the `role` query, `type` can be used to tell ma1sd how to inject the User ID in the query:
|
||||
- `localpart` will extract and set only the localpart.
|
||||
- `uid` will extract and set only the localpart.
|
||||
- `mxid` will use the ID as-is.
|
||||
|
||||
On each query, the first parameter `?` is set as a string with the corresponding ID format.
|
||||
|
@@ -22,7 +22,7 @@
|
||||
matrix:
|
||||
domain: ''
|
||||
v1: true # deprecated
|
||||
v2: false # MSC2140 API v2. Disabled by default in order to preserve backward compatibility.
|
||||
v2: true # MSC2140 API v2. Riot require enabled V2 API.
|
||||
|
||||
|
||||
################
|
||||
@@ -51,10 +51,39 @@ key:
|
||||
# - /var/lib/ma1sd/store.db
|
||||
#
|
||||
storage:
|
||||
# backend: sqlite # or postgresql
|
||||
provider:
|
||||
sqlite:
|
||||
database: '/path/to/ma1sd.db'
|
||||
|
||||
# postgresql:
|
||||
# # Wrap all string values with quotes to avoid yaml parsing mistakes
|
||||
# database: '//localhost/ma1sd' # or full variant //192.168.1.100:5432/ma1sd_database
|
||||
# username: 'ma1sd_user'
|
||||
# password: 'ma1sd_password'
|
||||
#
|
||||
# # Pool configuration for postgresql backend.
|
||||
# #######
|
||||
# # Enable or disable pooling
|
||||
# pool: false
|
||||
#
|
||||
# #######
|
||||
# # Check database connection before get from pool
|
||||
# testBeforeGetFromPool: false # or true
|
||||
#
|
||||
# #######
|
||||
# # There is an internal thread which checks each of the database connections as a keep-alive mechanism. This set the
|
||||
# # number of milliseconds it sleeps between checks -- default is 30000. To disable the checking thread, set this to
|
||||
# # 0 before you start using the connection source.
|
||||
# checkConnectionsEveryMillis: 30000
|
||||
#
|
||||
# #######
|
||||
# # Set the number of connections that can be unused in the available list.
|
||||
# maxConnectionsFree: 5
|
||||
#
|
||||
# #######
|
||||
# # Set the number of milliseconds that a connection can stay open before being closed. Set to 9223372036854775807 to have
|
||||
# # the connections never expire.
|
||||
# maxConnectionAgeMillis: 3600000
|
||||
|
||||
###################
|
||||
# Identity Stores #
|
||||
@@ -129,7 +158,7 @@ threepid:
|
||||
### hash lookup for synapseSql provider.
|
||||
# synapseSql:
|
||||
# lookup:
|
||||
# query: 'select user_id as mxid, medium, address from user_threepids' # query for retrive 3PIDs for hashes.
|
||||
# query: 'select user_id as mxid, medium, address from user_threepid_id_server' # query for retrive 3PIDs for hashes.
|
||||
# legacyRoomNames: false # use the old query to get room names.
|
||||
|
||||
### hash lookup for ldap provider (with example of the ldap configuration)
|
||||
@@ -170,4 +199,6 @@ threepid:
|
||||
#
|
||||
|
||||
# logging:
|
||||
# root: trace # logging level
|
||||
# root: error # default level for all loggers (apps and thirdparty libraries)
|
||||
# app: info # log level only for the ma1sd
|
||||
# requests: false # or true to dump full requests and responses
|
||||
|
@@ -23,11 +23,13 @@ package io.kamax.mxisd;
|
||||
import io.kamax.mxisd.config.MatrixConfig;
|
||||
import io.kamax.mxisd.config.MxisdConfig;
|
||||
import io.kamax.mxisd.config.PolicyConfig;
|
||||
import io.kamax.mxisd.config.ServerConfig;
|
||||
import io.kamax.mxisd.http.undertow.handler.ApiHandler;
|
||||
import io.kamax.mxisd.http.undertow.handler.AuthorizationHandler;
|
||||
import io.kamax.mxisd.http.undertow.handler.CheckTermsHandler;
|
||||
import io.kamax.mxisd.http.undertow.handler.InternalInfoHandler;
|
||||
import io.kamax.mxisd.http.undertow.handler.OptionsHandler;
|
||||
import io.kamax.mxisd.http.undertow.handler.RequestDumpingHandler;
|
||||
import io.kamax.mxisd.http.undertow.handler.SaneHandler;
|
||||
import io.kamax.mxisd.http.undertow.handler.as.v1.AsNotFoundHandler;
|
||||
import io.kamax.mxisd.http.undertow.handler.as.v1.AsTransactionHandler;
|
||||
@@ -99,9 +101,9 @@ public class HttpMxisd {
|
||||
public void start() {
|
||||
m.start();
|
||||
|
||||
HttpHandler asUserHandler = SaneHandler.around(new AsUserHandler(m.getAs()));
|
||||
HttpHandler asTxnHandler = SaneHandler.around(new AsTransactionHandler(m.getAs()));
|
||||
HttpHandler asNotFoundHandler = SaneHandler.around(new AsNotFoundHandler(m.getAs()));
|
||||
HttpHandler asUserHandler = sane(new AsUserHandler(m.getAs()));
|
||||
HttpHandler asTxnHandler = sane(new AsTransactionHandler(m.getAs()));
|
||||
HttpHandler asNotFoundHandler = sane(new AsNotFoundHandler(m.getAs()));
|
||||
|
||||
final RoutingHandler handler = Handlers.routing()
|
||||
.add("OPTIONS", "/**", sane(new OptionsHandler()))
|
||||
@@ -145,7 +147,8 @@ public class HttpMxisd {
|
||||
termsEndpoints(handler);
|
||||
hashEndpoints(handler);
|
||||
accountEndpoints(handler);
|
||||
httpSrv = Undertow.builder().addHttpListener(m.getConfig().getServer().getPort(), "0.0.0.0").setHandler(handler).build();
|
||||
ServerConfig serverConfig = m.getConfig().getServer();
|
||||
httpSrv = Undertow.builder().addHttpListener(serverConfig.getPort(), serverConfig.getHostname()).setHandler(handler).build();
|
||||
|
||||
httpSrv.start();
|
||||
}
|
||||
@@ -265,6 +268,11 @@ public class HttpMxisd {
|
||||
}
|
||||
|
||||
private HttpHandler sane(HttpHandler httpHandler) {
|
||||
return SaneHandler.around(httpHandler);
|
||||
SaneHandler handler = SaneHandler.around(httpHandler);
|
||||
if (m.getConfig().getLogging().isRequests()) {
|
||||
return new RequestDumpingHandler(handler);
|
||||
} else {
|
||||
return handler;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -27,7 +27,6 @@ import io.kamax.mxisd.auth.AuthProviders;
|
||||
import io.kamax.mxisd.backend.IdentityStoreSupplier;
|
||||
import io.kamax.mxisd.backend.sql.synapse.Synapse;
|
||||
import io.kamax.mxisd.config.MxisdConfig;
|
||||
import io.kamax.mxisd.config.PostgresqlStorageConfig;
|
||||
import io.kamax.mxisd.config.StorageConfig;
|
||||
import io.kamax.mxisd.crypto.CryptoFactory;
|
||||
import io.kamax.mxisd.crypto.KeyManager;
|
||||
@@ -68,7 +67,7 @@ public class Mxisd {
|
||||
public static final String Version = StringUtils.defaultIfBlank(Mxisd.class.getPackage().getImplementationVersion(), "UNKNOWN");
|
||||
public static final String Agent = Name + "/" + Version;
|
||||
|
||||
private MxisdConfig cfg;
|
||||
private final MxisdConfig cfg;
|
||||
|
||||
private CloseableHttpClient httpClient;
|
||||
private IRemoteIdentityServerFetcher srvFetcher;
|
||||
@@ -113,17 +112,7 @@ public class Mxisd {
|
||||
|
||||
StorageConfig.BackendEnum storageBackend = cfg.getStorage().getBackend();
|
||||
StorageConfig.Provider storageProvider = cfg.getStorage().getProvider();
|
||||
switch (storageBackend) {
|
||||
case sqlite:
|
||||
store = new OrmLiteSqlStorage(storageBackend, storageProvider.getSqlite().getDatabase());
|
||||
break;
|
||||
case postgresql:
|
||||
PostgresqlStorageConfig postgresql = storageProvider.getPostgresql();
|
||||
store = new OrmLiteSqlStorage(storageBackend, postgresql.getDatabase(), postgresql.getUsername(), postgresql.getPassword());
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("Storage provider hasn't been configured");
|
||||
}
|
||||
store = new OrmLiteSqlStorage(storageBackend, storageProvider);
|
||||
|
||||
keyMgr = CryptoFactory.getKeyManager(cfg.getKey());
|
||||
signMgr = CryptoFactory.getSignatureManager(cfg, keyMgr);
|
||||
|
@@ -162,6 +162,7 @@ public class LdapAuthProvider extends LdapBackend implements AuthenticatorProvid
|
||||
log.info("No match were found for {}", mxid);
|
||||
return BackendAuthResult.failure();
|
||||
} catch (LdapException | IOException | CursorException e) {
|
||||
log.error("Unable to invoke query request: ", e);
|
||||
throw new InternalServerError(e);
|
||||
}
|
||||
}
|
||||
|
@@ -0,0 +1,5 @@
|
||||
package io.kamax.mxisd.config;
|
||||
|
||||
public interface DatabaseStorageConfig {
|
||||
String getDatabase();
|
||||
}
|
@@ -19,7 +19,7 @@ public class HashingConfig {
|
||||
private int requests = 10;
|
||||
private List<Algorithm> algorithms = new ArrayList<>();
|
||||
|
||||
public void build() {
|
||||
public void build(MatrixConfig matrixConfig) {
|
||||
if (isEnabled()) {
|
||||
LOGGER.info("--- Hash configuration ---");
|
||||
LOGGER.info(" Pepper length: {}", getPepperLength());
|
||||
@@ -35,6 +35,9 @@ public class HashingConfig {
|
||||
}
|
||||
LOGGER.info(" Algorithms: {}", getAlgorithms());
|
||||
} else {
|
||||
if (matrixConfig.isV2()) {
|
||||
LOGGER.warn("V2 enabled without the hash configuration.");
|
||||
}
|
||||
LOGGER.info("Hash configuration disabled, used only `none` pepper.");
|
||||
}
|
||||
}
|
||||
|
@@ -10,6 +10,7 @@ public class LoggingConfig {
|
||||
|
||||
private String root;
|
||||
private String app;
|
||||
private boolean requests = false;
|
||||
|
||||
public String getRoot() {
|
||||
return root;
|
||||
@@ -27,6 +28,14 @@ public class LoggingConfig {
|
||||
this.app = app;
|
||||
}
|
||||
|
||||
public boolean isRequests() {
|
||||
return requests;
|
||||
}
|
||||
|
||||
public void setRequests(boolean requests) {
|
||||
this.requests = requests;
|
||||
}
|
||||
|
||||
public void build() {
|
||||
LOGGER.info("Logging config:");
|
||||
if (StringUtils.isNotBlank(getRoot())) {
|
||||
@@ -43,5 +52,9 @@ public class LoggingConfig {
|
||||
} else {
|
||||
LOGGER.info(" Logging level hasn't set, use default");
|
||||
}
|
||||
LOGGER.info(" Log requests: {}", isRequests());
|
||||
if (isRequests()) {
|
||||
LOGGER.warn(" Request dumping enabled, use this only to debug purposes, don't use it in the production.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -393,7 +393,7 @@ public class MxisdConfig {
|
||||
getView().build();
|
||||
getWordpress().build();
|
||||
getPolicy().build();
|
||||
getHashing().build();
|
||||
getHashing().build(getMatrix());
|
||||
|
||||
return this;
|
||||
}
|
||||
|
@@ -20,7 +20,7 @@
|
||||
|
||||
package io.kamax.mxisd.config;
|
||||
|
||||
public class PostgresqlStorageConfig {
|
||||
public class PostgresqlStorageConfig implements DatabaseStorageConfig {
|
||||
|
||||
private String database;
|
||||
|
||||
@@ -28,6 +28,17 @@ public class PostgresqlStorageConfig {
|
||||
|
||||
private String password;
|
||||
|
||||
private boolean pool;
|
||||
|
||||
private int maxConnectionsFree = 1;
|
||||
|
||||
private long maxConnectionAgeMillis = 60 * 60 * 1000;
|
||||
|
||||
private long checkConnectionsEveryMillis = 30 * 1000;
|
||||
|
||||
private boolean testBeforeGetFromPool = false;
|
||||
|
||||
@Override
|
||||
public String getDatabase() {
|
||||
return database;
|
||||
}
|
||||
@@ -51,4 +62,44 @@ public class PostgresqlStorageConfig {
|
||||
public void setPassword(String password) {
|
||||
this.password = password;
|
||||
}
|
||||
|
||||
public boolean isPool() {
|
||||
return pool;
|
||||
}
|
||||
|
||||
public void setPool(boolean pool) {
|
||||
this.pool = pool;
|
||||
}
|
||||
|
||||
public int getMaxConnectionsFree() {
|
||||
return maxConnectionsFree;
|
||||
}
|
||||
|
||||
public void setMaxConnectionsFree(int maxConnectionsFree) {
|
||||
this.maxConnectionsFree = maxConnectionsFree;
|
||||
}
|
||||
|
||||
public long getMaxConnectionAgeMillis() {
|
||||
return maxConnectionAgeMillis;
|
||||
}
|
||||
|
||||
public void setMaxConnectionAgeMillis(long maxConnectionAgeMillis) {
|
||||
this.maxConnectionAgeMillis = maxConnectionAgeMillis;
|
||||
}
|
||||
|
||||
public long getCheckConnectionsEveryMillis() {
|
||||
return checkConnectionsEveryMillis;
|
||||
}
|
||||
|
||||
public void setCheckConnectionsEveryMillis(long checkConnectionsEveryMillis) {
|
||||
this.checkConnectionsEveryMillis = checkConnectionsEveryMillis;
|
||||
}
|
||||
|
||||
public boolean isTestBeforeGetFromPool() {
|
||||
return testBeforeGetFromPool;
|
||||
}
|
||||
|
||||
public void setTestBeforeGetFromPool(boolean testBeforeGetFromPool) {
|
||||
this.testBeforeGetFromPool = testBeforeGetFromPool;
|
||||
}
|
||||
}
|
||||
|
@@ -20,10 +20,11 @@
|
||||
|
||||
package io.kamax.mxisd.config;
|
||||
|
||||
public class SQLiteStorageConfig {
|
||||
public class SQLiteStorageConfig implements DatabaseStorageConfig {
|
||||
|
||||
private String database;
|
||||
|
||||
@Override
|
||||
public String getDatabase() {
|
||||
return database;
|
||||
}
|
||||
|
@@ -34,6 +34,7 @@ public class ServerConfig {
|
||||
private String name;
|
||||
private int port = 8090;
|
||||
private String publicUrl;
|
||||
private String hostname;
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
@@ -59,6 +60,14 @@ public class ServerConfig {
|
||||
this.publicUrl = publicUrl;
|
||||
}
|
||||
|
||||
public String getHostname() {
|
||||
return hostname;
|
||||
}
|
||||
|
||||
public void setHostname(String hostname) {
|
||||
this.hostname = hostname;
|
||||
}
|
||||
|
||||
public void build() {
|
||||
log.info("--- Server config ---");
|
||||
|
||||
@@ -75,8 +84,13 @@ public class ServerConfig {
|
||||
log.warn("Public URL is not valid: {}", StringUtils.defaultIfBlank(e.getMessage(), "<no reason provided>"));
|
||||
}
|
||||
|
||||
if (StringUtils.isBlank(getHostname())) {
|
||||
setHostname("0.0.0.0");
|
||||
}
|
||||
|
||||
log.info("Name: {}", getName());
|
||||
log.info("Port: {}", getPort());
|
||||
log.info("Public URL: {}", getPublicUrl());
|
||||
log.info("Hostname: {}", getHostname());
|
||||
}
|
||||
}
|
||||
|
@@ -125,7 +125,7 @@ public abstract class SqlConfig {
|
||||
}
|
||||
|
||||
public static class Lookup {
|
||||
private String query = "SELECT user_id AS mxid, medium, address from user_threepids";
|
||||
private String query = "SELECT user_id AS mxid, medium, address from user_threepid_id_server";
|
||||
|
||||
public String getQuery() {
|
||||
return query;
|
||||
@@ -140,7 +140,7 @@ public abstract class SqlConfig {
|
||||
|
||||
private Boolean enabled;
|
||||
private String type = "mxid";
|
||||
private String query = "SELECT user_id AS uid FROM user_threepids WHERE medium = ? AND address = ?";
|
||||
private String query = "SELECT user_id AS uid FROM user_threepid_id_server WHERE medium = ? AND address = ?";
|
||||
private Map<String, String> medium = new HashMap<>();
|
||||
|
||||
public Boolean isEnabled() {
|
||||
|
@@ -56,7 +56,7 @@ public class SynapseSqlProviderConfig extends SqlConfig {
|
||||
|
||||
if (getIdentity().isEnabled() && StringUtils.isBlank(getIdentity().getType())) {
|
||||
getIdentity().setType("mxid");
|
||||
getIdentity().setQuery("SELECT user_id AS uid FROM user_threepids WHERE medium = ? AND address = ?");
|
||||
getIdentity().setQuery("SELECT user_id AS uid FROM user_threepid_id_server WHERE medium = ? AND address = ?");
|
||||
}
|
||||
|
||||
if (getProfile().isEnabled()) {
|
||||
|
@@ -1,6 +1,9 @@
|
||||
package io.kamax.mxisd.hash;
|
||||
|
||||
import io.kamax.mxisd.config.HashingConfig;
|
||||
import io.kamax.mxisd.hash.engine.Engine;
|
||||
import io.kamax.mxisd.hash.engine.HashEngine;
|
||||
import io.kamax.mxisd.hash.engine.NoneEngine;
|
||||
import io.kamax.mxisd.hash.rotation.HashRotationStrategy;
|
||||
import io.kamax.mxisd.hash.rotation.NoOpRotationStrategy;
|
||||
import io.kamax.mxisd.hash.rotation.RotationPerRequests;
|
||||
@@ -21,7 +24,7 @@ public class HashManager {
|
||||
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(HashManager.class);
|
||||
|
||||
private HashEngine hashEngine;
|
||||
private Engine engine;
|
||||
private HashRotationStrategy rotationStrategy;
|
||||
private HashStorage hashStorage;
|
||||
private HashingConfig config;
|
||||
@@ -32,7 +35,7 @@ public class HashManager {
|
||||
this.config = config;
|
||||
this.storage = storage;
|
||||
initStorage();
|
||||
hashEngine = new HashEngine(providers, getHashStorage(), config);
|
||||
engine = config.isEnabled() ? new HashEngine(providers, getHashStorage(), config) : new NoneEngine();
|
||||
initRotationStrategy();
|
||||
configured.set(true);
|
||||
}
|
||||
@@ -73,8 +76,8 @@ public class HashManager {
|
||||
this.rotationStrategy.register(getHashEngine());
|
||||
}
|
||||
|
||||
public HashEngine getHashEngine() {
|
||||
return hashEngine;
|
||||
public Engine getHashEngine() {
|
||||
return engine;
|
||||
}
|
||||
|
||||
public HashRotationStrategy getRotationStrategy() {
|
||||
|
7
src/main/java/io/kamax/mxisd/hash/engine/Engine.java
Normal file
7
src/main/java/io/kamax/mxisd/hash/engine/Engine.java
Normal file
@@ -0,0 +1,7 @@
|
||||
package io.kamax.mxisd.hash.engine;
|
||||
|
||||
public interface Engine {
|
||||
void updateHashes();
|
||||
|
||||
String getPepper();
|
||||
}
|
@@ -1,4 +1,4 @@
|
||||
package io.kamax.mxisd.hash;
|
||||
package io.kamax.mxisd.hash.engine;
|
||||
|
||||
import io.kamax.mxisd.config.HashingConfig;
|
||||
import io.kamax.mxisd.hash.storage.HashStorage;
|
||||
@@ -12,7 +12,7 @@ import org.slf4j.LoggerFactory;
|
||||
import java.util.Base64;
|
||||
import java.util.List;
|
||||
|
||||
public class HashEngine {
|
||||
public class HashEngine implements Engine {
|
||||
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(HashEngine.class);
|
||||
|
||||
@@ -28,6 +28,7 @@ public class HashEngine {
|
||||
this.config = config;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateHashes() {
|
||||
LOGGER.info("Start update hashes.");
|
||||
synchronized (hashStorage) {
|
||||
@@ -48,6 +49,7 @@ public class HashEngine {
|
||||
LOGGER.info("Finish update hashes.");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getPepper() {
|
||||
synchronized (hashStorage) {
|
||||
return pepper;
|
19
src/main/java/io/kamax/mxisd/hash/engine/NoneEngine.java
Normal file
19
src/main/java/io/kamax/mxisd/hash/engine/NoneEngine.java
Normal file
@@ -0,0 +1,19 @@
|
||||
package io.kamax.mxisd.hash.engine;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class NoneEngine implements Engine {
|
||||
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(NoneEngine.class);
|
||||
|
||||
@Override
|
||||
public void updateHashes() {
|
||||
LOGGER.info("Nothing to update.");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getPepper() {
|
||||
return "";
|
||||
}
|
||||
}
|
@@ -1,12 +1,12 @@
|
||||
package io.kamax.mxisd.hash.rotation;
|
||||
|
||||
import io.kamax.mxisd.hash.HashEngine;
|
||||
import io.kamax.mxisd.hash.engine.Engine;
|
||||
|
||||
public interface HashRotationStrategy {
|
||||
|
||||
void register(HashEngine hashEngine);
|
||||
void register(Engine engine);
|
||||
|
||||
HashEngine getHashEngine();
|
||||
Engine getHashEngine();
|
||||
|
||||
void newRequest();
|
||||
|
||||
|
@@ -1,19 +1,19 @@
|
||||
package io.kamax.mxisd.hash.rotation;
|
||||
|
||||
import io.kamax.mxisd.hash.HashEngine;
|
||||
import io.kamax.mxisd.hash.engine.Engine;
|
||||
|
||||
public class NoOpRotationStrategy implements HashRotationStrategy {
|
||||
|
||||
private HashEngine hashEngine;
|
||||
private Engine engine;
|
||||
|
||||
@Override
|
||||
public void register(HashEngine hashEngine) {
|
||||
this.hashEngine = hashEngine;
|
||||
public void register(Engine engine) {
|
||||
this.engine = engine;
|
||||
}
|
||||
|
||||
@Override
|
||||
public HashEngine getHashEngine() {
|
||||
return hashEngine;
|
||||
public Engine getHashEngine() {
|
||||
return engine;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@@ -1,12 +1,12 @@
|
||||
package io.kamax.mxisd.hash.rotation;
|
||||
|
||||
import io.kamax.mxisd.hash.HashEngine;
|
||||
import io.kamax.mxisd.hash.engine.Engine;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
public class RotationPerRequests implements HashRotationStrategy {
|
||||
|
||||
private HashEngine hashEngine;
|
||||
private Engine engine;
|
||||
private final AtomicInteger counter = new AtomicInteger(0);
|
||||
private final int barrier;
|
||||
|
||||
@@ -15,14 +15,14 @@ public class RotationPerRequests implements HashRotationStrategy {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void register(HashEngine hashEngine) {
|
||||
this.hashEngine = hashEngine;
|
||||
public void register(Engine engine) {
|
||||
this.engine = engine;
|
||||
trigger();
|
||||
}
|
||||
|
||||
@Override
|
||||
public HashEngine getHashEngine() {
|
||||
return hashEngine;
|
||||
public Engine getHashEngine() {
|
||||
return engine;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@@ -1,6 +1,6 @@
|
||||
package io.kamax.mxisd.hash.rotation;
|
||||
|
||||
import io.kamax.mxisd.hash.HashEngine;
|
||||
import io.kamax.mxisd.hash.engine.Engine;
|
||||
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
@@ -9,7 +9,7 @@ import java.util.concurrent.TimeUnit;
|
||||
public class TimeBasedRotation implements HashRotationStrategy {
|
||||
|
||||
private final long delay;
|
||||
private HashEngine hashEngine;
|
||||
private Engine engine;
|
||||
private final ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor();
|
||||
|
||||
public TimeBasedRotation(long delay) {
|
||||
@@ -17,15 +17,15 @@ public class TimeBasedRotation implements HashRotationStrategy {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void register(HashEngine hashEngine) {
|
||||
this.hashEngine = hashEngine;
|
||||
public void register(Engine engine) {
|
||||
this.engine = engine;
|
||||
Runtime.getRuntime().addShutdownHook(new Thread(executorService::shutdown));
|
||||
executorService.scheduleWithFixedDelay(this::trigger, 0, delay, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
@Override
|
||||
public HashEngine getHashEngine() {
|
||||
return hashEngine;
|
||||
public Engine getHashEngine() {
|
||||
return engine;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@@ -0,0 +1,5 @@
|
||||
package io.kamax.mxisd.http.undertow.conduit;
|
||||
|
||||
public interface ConduitWithDump {
|
||||
String dump();
|
||||
}
|
@@ -0,0 +1,107 @@
|
||||
/*
|
||||
* JBoss, Home of Professional Open Source.
|
||||
* Copyright 2014 Red Hat, Inc., and individual contributors
|
||||
* as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package io.kamax.mxisd.http.undertow.conduit;
|
||||
|
||||
import org.xnio.IoUtils;
|
||||
import org.xnio.channels.StreamSourceChannel;
|
||||
import org.xnio.conduits.AbstractStreamSinkConduit;
|
||||
import org.xnio.conduits.ConduitWritableByteChannel;
|
||||
import org.xnio.conduits.Conduits;
|
||||
import org.xnio.conduits.StreamSinkConduit;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.channels.FileChannel;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
|
||||
/**
|
||||
* Conduit that saves all the data that is written through it and can dump it to the console
|
||||
* <p>
|
||||
* Obviously this should not be used in production.
|
||||
*
|
||||
* @author Stuart Douglas
|
||||
*/
|
||||
public class DebuggingStreamSinkConduit extends AbstractStreamSinkConduit<StreamSinkConduit> implements ConduitWithDump {
|
||||
|
||||
private final List<byte[]> data = new CopyOnWriteArrayList<>();
|
||||
|
||||
/**
|
||||
* Construct a new instance.
|
||||
*
|
||||
* @param next the delegate conduit to set
|
||||
*/
|
||||
public DebuggingStreamSinkConduit(StreamSinkConduit next) {
|
||||
super(next);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int write(ByteBuffer src) throws IOException {
|
||||
int pos = src.position();
|
||||
int res = super.write(src);
|
||||
if (res > 0) {
|
||||
byte[] d = new byte[res];
|
||||
for (int i = 0; i < res; ++i) {
|
||||
d[i] = src.get(i + pos);
|
||||
}
|
||||
data.add(d);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long write(ByteBuffer[] dsts, int offs, int len) throws IOException {
|
||||
for (int i = offs; i < len; ++i) {
|
||||
if (dsts[i].hasRemaining()) {
|
||||
return write(dsts[i]);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long transferFrom(final FileChannel src, final long position, final long count) throws IOException {
|
||||
return src.transferTo(position, count, new ConduitWritableByteChannel(this));
|
||||
}
|
||||
|
||||
@Override
|
||||
public long transferFrom(final StreamSourceChannel source, final long count, final ByteBuffer throughBuffer) throws IOException {
|
||||
return IoUtils.transfer(source, count, throughBuffer, new ConduitWritableByteChannel(this));
|
||||
}
|
||||
|
||||
@Override
|
||||
public int writeFinal(ByteBuffer src) throws IOException {
|
||||
return Conduits.writeFinalBasic(this, src);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long writeFinal(ByteBuffer[] srcs, int offset, int length) throws IOException {
|
||||
return Conduits.writeFinalBasic(this, srcs, offset, length);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String dump() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
for (byte[] datum : data) {
|
||||
sb.append(new String(datum, StandardCharsets.UTF_8));
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
@@ -0,0 +1,95 @@
|
||||
/*
|
||||
* JBoss, Home of Professional Open Source.
|
||||
* Copyright 2014 Red Hat, Inc., and individual contributors
|
||||
* as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package io.kamax.mxisd.http.undertow.conduit;
|
||||
|
||||
import org.xnio.IoUtils;
|
||||
import org.xnio.channels.StreamSinkChannel;
|
||||
import org.xnio.conduits.AbstractStreamSourceConduit;
|
||||
import org.xnio.conduits.ConduitReadableByteChannel;
|
||||
import org.xnio.conduits.StreamSourceConduit;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.channels.FileChannel;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
|
||||
/**
|
||||
* Conduit that saves all the data that is written through it and can dump it to the console
|
||||
* <p>
|
||||
* Obviously this should not be used in production.
|
||||
*
|
||||
* @author Stuart Douglas
|
||||
*/
|
||||
public class DebuggingStreamSourceConduit extends AbstractStreamSourceConduit<StreamSourceConduit> implements ConduitWithDump {
|
||||
|
||||
private final List<byte[]> data = new CopyOnWriteArrayList<>();
|
||||
|
||||
/**
|
||||
* Construct a new instance.
|
||||
*
|
||||
* @param next the delegate conduit to set
|
||||
*/
|
||||
public DebuggingStreamSourceConduit(StreamSourceConduit next) {
|
||||
super(next);
|
||||
}
|
||||
|
||||
public long transferTo(final long position, final long count, final FileChannel target) throws IOException {
|
||||
return target.transferFrom(new ConduitReadableByteChannel(this), position, count);
|
||||
}
|
||||
|
||||
public long transferTo(final long count, final ByteBuffer throughBuffer, final StreamSinkChannel target) throws IOException {
|
||||
return IoUtils.transfer(new ConduitReadableByteChannel(this), count, throughBuffer, target);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(ByteBuffer dst) throws IOException {
|
||||
int pos = dst.position();
|
||||
int res = super.read(dst);
|
||||
if (res > 0) {
|
||||
byte[] d = new byte[res];
|
||||
for (int i = 0; i < res; ++i) {
|
||||
d[i] = dst.get(i + pos);
|
||||
}
|
||||
data.add(d);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long read(ByteBuffer[] dsts, int offs, int len) throws IOException {
|
||||
for (int i = offs; i < len; ++i) {
|
||||
if (dsts[i].hasRemaining()) {
|
||||
return read(dsts[i]);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String dump() {
|
||||
|
||||
StringBuilder sb = new StringBuilder();
|
||||
for (byte[] datum : data) {
|
||||
sb.append(new String(datum, StandardCharsets.UTF_8));
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
@@ -0,0 +1,23 @@
|
||||
package io.kamax.mxisd.http.undertow.conduit;
|
||||
|
||||
import io.undertow.server.ConduitWrapper;
|
||||
import io.undertow.server.HttpServerExchange;
|
||||
import io.undertow.util.ConduitFactory;
|
||||
import org.xnio.conduits.Conduit;
|
||||
|
||||
public abstract class LazyConduitWrapper<T extends Conduit> implements ConduitWrapper<T> {
|
||||
|
||||
private T conduit = null;
|
||||
|
||||
protected abstract T create(ConduitFactory<T> factory, HttpServerExchange exchange);
|
||||
|
||||
@Override
|
||||
public T wrap(ConduitFactory<T> factory, HttpServerExchange exchange) {
|
||||
conduit = create(factory, exchange);
|
||||
return conduit;
|
||||
}
|
||||
|
||||
public T get() {
|
||||
return conduit;
|
||||
}
|
||||
}
|
@@ -0,0 +1,186 @@
|
||||
/*
|
||||
* JBoss, Home of Professional Open Source.
|
||||
* Copyright 2014 Red Hat, Inc., and individual contributors
|
||||
* as indicated by the @author tags.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package io.kamax.mxisd.http.undertow.handler;
|
||||
|
||||
import io.kamax.mxisd.http.undertow.conduit.ConduitWithDump;
|
||||
import io.kamax.mxisd.http.undertow.conduit.DebuggingStreamSinkConduit;
|
||||
import io.kamax.mxisd.http.undertow.conduit.DebuggingStreamSourceConduit;
|
||||
import io.kamax.mxisd.http.undertow.conduit.LazyConduitWrapper;
|
||||
import io.undertow.security.api.SecurityContext;
|
||||
import io.undertow.server.HttpHandler;
|
||||
import io.undertow.server.HttpServerExchange;
|
||||
import io.undertow.server.handlers.Cookie;
|
||||
import io.undertow.util.ConduitFactory;
|
||||
import io.undertow.util.HeaderValues;
|
||||
import io.undertow.util.Headers;
|
||||
import io.undertow.util.LocaleUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.xnio.conduits.StreamSinkConduit;
|
||||
import org.xnio.conduits.StreamSourceConduit;
|
||||
|
||||
import java.util.Deque;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Handler that dumps a exchange to a log.
|
||||
*
|
||||
* @author Stuart Douglas
|
||||
*/
|
||||
public class RequestDumpingHandler implements HttpHandler {
|
||||
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(RequestDumpingHandler.class);
|
||||
|
||||
private final HttpHandler next;
|
||||
|
||||
public RequestDumpingHandler(HttpHandler next) {
|
||||
this.next = next;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleRequest(HttpServerExchange exchange) throws Exception {
|
||||
LazyConduitWrapper<StreamSourceConduit> requestConduitWrapper = new LazyConduitWrapper<StreamSourceConduit>() {
|
||||
@Override
|
||||
protected StreamSourceConduit create(ConduitFactory<StreamSourceConduit> factory, HttpServerExchange exchange) {
|
||||
return new DebuggingStreamSourceConduit(factory.create());
|
||||
}
|
||||
};
|
||||
LazyConduitWrapper<StreamSinkConduit> responseConduitWrapper = new LazyConduitWrapper<StreamSinkConduit>() {
|
||||
@Override
|
||||
protected StreamSinkConduit create(ConduitFactory<StreamSinkConduit> factory, HttpServerExchange exchange) {
|
||||
return new DebuggingStreamSinkConduit(factory.create());
|
||||
}
|
||||
};
|
||||
exchange.addRequestWrapper(requestConduitWrapper);
|
||||
exchange.addResponseWrapper(responseConduitWrapper);
|
||||
|
||||
final StringBuilder sb = new StringBuilder();
|
||||
// Log pre-service information
|
||||
final SecurityContext sc = exchange.getSecurityContext();
|
||||
sb.append("\n----------------------------REQUEST---------------------------\n");
|
||||
sb.append(" URI=").append(exchange.getRequestURI()).append("\n");
|
||||
sb.append(" characterEncoding=").append(exchange.getRequestHeaders().get(Headers.CONTENT_ENCODING)).append("\n");
|
||||
sb.append(" contentLength=").append(exchange.getRequestContentLength()).append("\n");
|
||||
sb.append(" contentType=").append(exchange.getRequestHeaders().get(Headers.CONTENT_TYPE)).append("\n");
|
||||
//sb.append(" contextPath=" + exchange.getContextPath());
|
||||
if (sc != null) {
|
||||
if (sc.isAuthenticated()) {
|
||||
sb.append(" authType=").append(sc.getMechanismName()).append("\n");
|
||||
sb.append(" principle=").append(sc.getAuthenticatedAccount().getPrincipal()).append("\n");
|
||||
} else {
|
||||
sb.append(" authType=none\n");
|
||||
}
|
||||
}
|
||||
|
||||
Map<String, Cookie> cookies = exchange.getRequestCookies();
|
||||
if (cookies != null) {
|
||||
for (Map.Entry<String, Cookie> entry : cookies.entrySet()) {
|
||||
Cookie cookie = entry.getValue();
|
||||
sb.append(" cookie=").append(cookie.getName()).append("=").append(cookie.getValue()).append("\n");
|
||||
}
|
||||
}
|
||||
for (HeaderValues header : exchange.getRequestHeaders()) {
|
||||
for (String value : header) {
|
||||
sb.append(" header=").append(header.getHeaderName()).append("=").append(value).append("\n");
|
||||
}
|
||||
}
|
||||
sb.append(" locale=").append(LocaleUtils.getLocalesFromHeader(exchange.getRequestHeaders().get(Headers.ACCEPT_LANGUAGE)))
|
||||
.append("\n");
|
||||
sb.append(" method=").append(exchange.getRequestMethod()).append("\n");
|
||||
Map<String, Deque<String>> pnames = exchange.getQueryParameters();
|
||||
for (Map.Entry<String, Deque<String>> entry : pnames.entrySet()) {
|
||||
String pname = entry.getKey();
|
||||
Iterator<String> pvalues = entry.getValue().iterator();
|
||||
sb.append(" parameter=");
|
||||
sb.append(pname);
|
||||
sb.append('=');
|
||||
while (pvalues.hasNext()) {
|
||||
sb.append(pvalues.next());
|
||||
if (pvalues.hasNext()) {
|
||||
sb.append(", ");
|
||||
}
|
||||
}
|
||||
sb.append("\n");
|
||||
}
|
||||
//sb.append(" pathInfo=" + exchange.getPathInfo());
|
||||
sb.append(" protocol=").append(exchange.getProtocol()).append("\n");
|
||||
sb.append(" queryString=").append(exchange.getQueryString()).append("\n");
|
||||
sb.append(" remoteAddr=").append(exchange.getSourceAddress()).append("\n");
|
||||
sb.append(" remoteHost=").append(exchange.getSourceAddress().getHostName()).append("\n");
|
||||
//sb.append("requestedSessionId=" + exchange.getRequestedSessionId());
|
||||
sb.append(" scheme=").append(exchange.getRequestScheme()).append("\n");
|
||||
sb.append(" host=").append(exchange.getRequestHeaders().getFirst(Headers.HOST)).append("\n");
|
||||
sb.append(" serverPort=").append(exchange.getDestinationAddress().getPort()).append("\n");
|
||||
//sb.append(" servletPath=" + exchange.getServletPath());
|
||||
sb.append(" isSecure=").append(exchange.isSecure()).append("\n");
|
||||
|
||||
exchange.addExchangeCompleteListener((exchange1, nextListener) -> {
|
||||
StreamSourceConduit sourceConduit = requestConduitWrapper.get();
|
||||
if (sourceConduit instanceof ConduitWithDump) {
|
||||
ConduitWithDump conduitWithDump = (ConduitWithDump) sourceConduit;
|
||||
sb.append("body=\n");
|
||||
sb.append(conduitWithDump.dump()).append("\n");
|
||||
}
|
||||
|
||||
// Log post-service information
|
||||
sb.append("--------------------------RESPONSE--------------------------\n");
|
||||
if (sc != null) {
|
||||
if (sc.isAuthenticated()) {
|
||||
sb.append(" authType=").append(sc.getMechanismName()).append("\n");
|
||||
sb.append(" principle=").append(sc.getAuthenticatedAccount().getPrincipal()).append("\n");
|
||||
} else {
|
||||
sb.append(" authType=none\n");
|
||||
}
|
||||
}
|
||||
sb.append(" contentLength=").append(exchange1.getResponseContentLength()).append("\n");
|
||||
sb.append(" contentType=").append(exchange1.getResponseHeaders().getFirst(Headers.CONTENT_TYPE)).append("\n");
|
||||
Map<String, Cookie> cookies1 = exchange1.getResponseCookies();
|
||||
if (cookies1 != null) {
|
||||
for (Cookie cookie : cookies1.values()) {
|
||||
sb.append(" cookie=").append(cookie.getName()).append("=").append(cookie.getValue()).append("; domain=")
|
||||
.append(cookie.getDomain()).append("; path=").append(cookie.getPath()).append("\n");
|
||||
}
|
||||
}
|
||||
for (HeaderValues header : exchange1.getResponseHeaders()) {
|
||||
for (String value : header) {
|
||||
sb.append(" header=").append(header.getHeaderName()).append("=").append(value).append("\n");
|
||||
}
|
||||
}
|
||||
sb.append(" status=").append(exchange1.getStatusCode()).append("\n");
|
||||
StreamSinkConduit streamSinkConduit = responseConduitWrapper.get();
|
||||
if (streamSinkConduit instanceof ConduitWithDump) {
|
||||
ConduitWithDump conduitWithDump = (ConduitWithDump) streamSinkConduit;
|
||||
sb.append("body=\n");
|
||||
sb.append(conduitWithDump.dump());
|
||||
|
||||
}
|
||||
|
||||
sb.append("\n==============================================================");
|
||||
|
||||
|
||||
nextListener.proceed();
|
||||
LOGGER.info(sb.toString());
|
||||
});
|
||||
|
||||
|
||||
// Perform the exchange
|
||||
next.handleRequest(exchange);
|
||||
}
|
||||
}
|
@@ -31,6 +31,8 @@ public class HashDetailsHandler extends BasicHttpHandler {
|
||||
for (HashingConfig.Algorithm algorithm : config.getAlgorithms()) {
|
||||
algorithms.add(algorithm.name().toLowerCase());
|
||||
}
|
||||
} else {
|
||||
algorithms.add(HashingConfig.Algorithm.none.name().toLowerCase());
|
||||
}
|
||||
response.add("algorithms", algorithms);
|
||||
return response;
|
||||
|
@@ -67,10 +67,6 @@ public class HashLookupHandler extends LookupHandler implements ApiHandler {
|
||||
log.info("Got bulk lookup request from {} with client {} - Is recursive? {}",
|
||||
lookupRequest.getRequester(), lookupRequest.getUserAgent(), lookupRequest.isRecursive());
|
||||
|
||||
if (!hashManager.getConfig().isEnabled()) {
|
||||
throw new InvalidParamException();
|
||||
}
|
||||
|
||||
if (!hashManager.getHashEngine().getPepper().equals(input.getPepper())) {
|
||||
throw new InvalidPepperException();
|
||||
}
|
||||
@@ -89,7 +85,7 @@ public class HashLookupHandler extends LookupHandler implements ApiHandler {
|
||||
}
|
||||
|
||||
private void noneAlgorithm(HttpServerExchange exchange, HashLookupRequest request, ClientHashLookupRequest input) throws Exception {
|
||||
if (!hashManager.getConfig().getAlgorithms().contains(HashingConfig.Algorithm.none)) {
|
||||
if (hashManager.getConfig().isEnabled() && !hashManager.getConfig().getAlgorithms().contains(HashingConfig.Algorithm.none)) {
|
||||
throw new InvalidParamException();
|
||||
}
|
||||
|
||||
|
@@ -23,12 +23,17 @@ package io.kamax.mxisd.storage.ormlite;
|
||||
import com.j256.ormlite.dao.CloseableWrappedIterable;
|
||||
import com.j256.ormlite.dao.Dao;
|
||||
import com.j256.ormlite.dao.DaoManager;
|
||||
import com.j256.ormlite.db.PostgresDatabaseType;
|
||||
import com.j256.ormlite.db.SqliteDatabaseType;
|
||||
import com.j256.ormlite.jdbc.JdbcConnectionSource;
|
||||
import com.j256.ormlite.jdbc.JdbcPooledConnectionSource;
|
||||
import com.j256.ormlite.stmt.QueryBuilder;
|
||||
import com.j256.ormlite.support.ConnectionSource;
|
||||
import com.j256.ormlite.table.TableUtils;
|
||||
import io.kamax.matrix.ThreePid;
|
||||
import io.kamax.mxisd.config.PolicyConfig;
|
||||
import io.kamax.mxisd.config.PostgresqlStorageConfig;
|
||||
import io.kamax.mxisd.config.SQLiteStorageConfig;
|
||||
import io.kamax.mxisd.config.StorageConfig;
|
||||
import io.kamax.mxisd.exception.ConfigurationException;
|
||||
import io.kamax.mxisd.exception.InternalServerError;
|
||||
@@ -81,6 +86,8 @@ public class OrmLiteSqlStorage implements IStorage {
|
||||
|
||||
public static class Migrations {
|
||||
public static final String FIX_ACCEPTED_DAO = "2019_12_09__2254__fix_accepted_dao";
|
||||
public static final String FIX_HASH_DAO_UNIQUE_INDEX = "2020_03_22__1153__fix_hash_dao_unique_index";
|
||||
public static final String CHANGE_TYPE_TO_TEXT_INVITE = "2020_04_21__2338__change_type_table_invites";
|
||||
}
|
||||
|
||||
private Dao<ThreePidInviteIO, String> invDao;
|
||||
@@ -93,22 +100,25 @@ public class OrmLiteSqlStorage implements IStorage {
|
||||
private Dao<ChangelogDao, String> changelogDao;
|
||||
private StorageConfig.BackendEnum backend;
|
||||
|
||||
public OrmLiteSqlStorage(StorageConfig.BackendEnum backend, String path) {
|
||||
this(backend, path, null, null);
|
||||
}
|
||||
|
||||
public OrmLiteSqlStorage(StorageConfig.BackendEnum backend, String database, String username, String password) {
|
||||
public OrmLiteSqlStorage(StorageConfig.BackendEnum backend, StorageConfig.Provider provider) {
|
||||
if (backend == null) {
|
||||
throw new ConfigurationException("storage.backend");
|
||||
}
|
||||
this.backend = backend;
|
||||
|
||||
if (StringUtils.isBlank(database)) {
|
||||
throw new ConfigurationException("Storage destination cannot be empty");
|
||||
}
|
||||
|
||||
withCatcher(() -> {
|
||||
ConnectionSource connPool = new JdbcConnectionSource("jdbc:" + backend + ":" + database, username, password);
|
||||
ConnectionSource connPool;
|
||||
switch (backend) {
|
||||
case postgresql:
|
||||
connPool = createPostgresqlConnection(provider.getPostgresql());
|
||||
break;
|
||||
case sqlite:
|
||||
connPool = createSqliteConnection(provider.getSqlite());
|
||||
break;
|
||||
default:
|
||||
throw new ConfigurationException("storage.backend");
|
||||
}
|
||||
|
||||
changelogDao = createDaoAndTable(connPool, ChangelogDao.class);
|
||||
invDao = createDaoAndTable(connPool, ThreePidInviteIO.class);
|
||||
expInvDao = createDaoAndTable(connPool, HistoricalThreePidInviteIO.class);
|
||||
@@ -116,17 +126,57 @@ public class OrmLiteSqlStorage implements IStorage {
|
||||
asTxnDao = createDaoAndTable(connPool, ASTransactionDao.class);
|
||||
accountDao = createDaoAndTable(connPool, AccountDao.class);
|
||||
acceptedDao = createDaoAndTable(connPool, AcceptedDao.class, true);
|
||||
hashDao = createDaoAndTable(connPool, HashDao.class);
|
||||
hashDao = createDaoAndTable(connPool, HashDao.class, true);
|
||||
runMigration(connPool);
|
||||
});
|
||||
}
|
||||
|
||||
private ConnectionSource createSqliteConnection(SQLiteStorageConfig config) throws SQLException {
|
||||
if (StringUtils.isBlank(config.getDatabase())) {
|
||||
throw new ConfigurationException("Storage destination cannot be empty");
|
||||
}
|
||||
|
||||
return new JdbcConnectionSource("jdbc:" + backend + ":" + config.getDatabase(), null, null, new SqliteDatabaseType());
|
||||
}
|
||||
|
||||
private ConnectionSource createPostgresqlConnection(PostgresqlStorageConfig config) throws SQLException {
|
||||
if (StringUtils.isBlank(config.getDatabase())) {
|
||||
throw new ConfigurationException("Storage destination cannot be empty");
|
||||
}
|
||||
|
||||
if (config.isPool()) {
|
||||
LOGGER.info("Enable pooling");
|
||||
JdbcPooledConnectionSource source = new JdbcPooledConnectionSource(
|
||||
"jdbc:" + backend + ":" + config.getDatabase(), config.getUsername(), config.getPassword(),
|
||||
new PostgresDatabaseType());
|
||||
source.setMaxConnectionsFree(config.getMaxConnectionsFree());
|
||||
source.setMaxConnectionAgeMillis(config.getMaxConnectionAgeMillis());
|
||||
source.setCheckConnectionsEveryMillis(config.getCheckConnectionsEveryMillis());
|
||||
source.setTestBeforeGet(config.isTestBeforeGetFromPool());
|
||||
return source;
|
||||
} else {
|
||||
return new JdbcConnectionSource("jdbc:" + backend + ":" + config.getDatabase(), config.getUsername(), config.getPassword(),
|
||||
new PostgresDatabaseType());
|
||||
}
|
||||
}
|
||||
|
||||
private void runMigration(ConnectionSource connPol) throws SQLException {
|
||||
ChangelogDao fixAcceptedDao = changelogDao.queryForId(Migrations.FIX_ACCEPTED_DAO);
|
||||
if (fixAcceptedDao == null) {
|
||||
fixAcceptedDao(connPol);
|
||||
changelogDao.create(new ChangelogDao(Migrations.FIX_ACCEPTED_DAO, new Date(), "Recreate the accepted table."));
|
||||
}
|
||||
ChangelogDao fixHashDaoUniqueIndex = changelogDao.queryForId(Migrations.FIX_HASH_DAO_UNIQUE_INDEX);
|
||||
if (fixHashDaoUniqueIndex == null) {
|
||||
fixHashDaoUniqueIndex(connPol);
|
||||
changelogDao
|
||||
.create(new ChangelogDao(Migrations.FIX_HASH_DAO_UNIQUE_INDEX, new Date(), "Add the id and migrate the unique index."));
|
||||
}
|
||||
ChangelogDao fixInviteTableColumnType = changelogDao.queryForId(Migrations.CHANGE_TYPE_TO_TEXT_INVITE);
|
||||
if (fixInviteTableColumnType == null) {
|
||||
fixInviteTableColumnType(connPol);
|
||||
changelogDao.create(new ChangelogDao(Migrations.CHANGE_TYPE_TO_TEXT_INVITE, new Date(), "Modify column type to text."));
|
||||
}
|
||||
}
|
||||
|
||||
private void fixAcceptedDao(ConnectionSource connPool) throws SQLException {
|
||||
@@ -135,6 +185,25 @@ public class OrmLiteSqlStorage implements IStorage {
|
||||
TableUtils.createTableIfNotExists(connPool, AcceptedDao.class);
|
||||
}
|
||||
|
||||
private void fixHashDaoUniqueIndex(ConnectionSource connPool) throws SQLException {
|
||||
LOGGER.info("Migration: {}", Migrations.FIX_HASH_DAO_UNIQUE_INDEX);
|
||||
TableUtils.dropTable(hashDao, true);
|
||||
TableUtils.createTableIfNotExists(connPool, HashDao.class);
|
||||
}
|
||||
|
||||
private void fixInviteTableColumnType(ConnectionSource connPool) throws SQLException {
|
||||
LOGGER.info("Migration: {}", Migrations.CHANGE_TYPE_TO_TEXT_INVITE);
|
||||
if (StorageConfig.BackendEnum.postgresql == backend) {
|
||||
invDao.executeRawNoArgs("alter table invite_3pid alter column \"roomId\" type text");
|
||||
invDao.executeRawNoArgs("alter table invite_3pid alter column id type text");
|
||||
invDao.executeRawNoArgs("alter table invite_3pid alter column token type text");
|
||||
invDao.executeRawNoArgs("alter table invite_3pid alter column sender type text");
|
||||
invDao.executeRawNoArgs("alter table invite_3pid alter column medium type text");
|
||||
invDao.executeRawNoArgs("alter table invite_3pid alter column address type text");
|
||||
invDao.executeRawNoArgs("alter table invite_3pid alter column properties type text");
|
||||
}
|
||||
}
|
||||
|
||||
private <V, K> Dao<V, K> createDaoAndTable(ConnectionSource connPool, Class<V> c) throws SQLException {
|
||||
return createDaoAndTable(connPool, c, false);
|
||||
}
|
||||
|
@@ -6,13 +6,16 @@ import com.j256.ormlite.table.DatabaseTable;
|
||||
@DatabaseTable(tableName = "hashes")
|
||||
public class HashDao {
|
||||
|
||||
@DatabaseField(canBeNull = false, id = true)
|
||||
@DatabaseField(generatedId = true)
|
||||
private Long id;
|
||||
|
||||
@DatabaseField(canBeNull = false, uniqueCombo = true)
|
||||
private String mxid;
|
||||
|
||||
@DatabaseField(canBeNull = false)
|
||||
@DatabaseField(canBeNull = false, uniqueCombo = true)
|
||||
private String medium;
|
||||
|
||||
@DatabaseField(canBeNull = false)
|
||||
@DatabaseField(canBeNull = false, uniqueCombo = true)
|
||||
private String address;
|
||||
|
||||
@DatabaseField(canBeNull = false)
|
||||
@@ -28,6 +31,14 @@ public class HashDao {
|
||||
this.hash = hash;
|
||||
}
|
||||
|
||||
public Long getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(Long id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
public String getMxid() {
|
||||
return mxid;
|
||||
}
|
||||
|
@@ -20,6 +20,7 @@
|
||||
|
||||
package io.kamax.mxisd.test.storage;
|
||||
|
||||
import io.kamax.mxisd.config.SQLiteStorageConfig;
|
||||
import io.kamax.mxisd.config.StorageConfig;
|
||||
import io.kamax.mxisd.storage.ormlite.OrmLiteSqlStorage;
|
||||
import org.junit.Test;
|
||||
@@ -30,14 +31,22 @@ public class OrmLiteSqlStorageTest {
|
||||
|
||||
@Test
|
||||
public void insertAsTxnDuplicate() {
|
||||
OrmLiteSqlStorage store = new OrmLiteSqlStorage(StorageConfig.BackendEnum.sqlite, ":memory:");
|
||||
StorageConfig.Provider provider = new StorageConfig.Provider();
|
||||
SQLiteStorageConfig config = new SQLiteStorageConfig();
|
||||
config.setDatabase(":memory:");
|
||||
provider.setSqlite(config);
|
||||
OrmLiteSqlStorage store = new OrmLiteSqlStorage(StorageConfig.BackendEnum.sqlite, provider);
|
||||
store.insertTransactionResult("mxisd", "1", Instant.now(), "{}");
|
||||
store.insertTransactionResult("mxisd", "2", Instant.now(), "{}");
|
||||
}
|
||||
|
||||
@Test(expected = RuntimeException.class)
|
||||
public void insertAsTxnSame() {
|
||||
OrmLiteSqlStorage store = new OrmLiteSqlStorage(StorageConfig.BackendEnum.sqlite, ":memory:");
|
||||
StorageConfig.Provider provider = new StorageConfig.Provider();
|
||||
SQLiteStorageConfig config = new SQLiteStorageConfig();
|
||||
config.setDatabase(":memory:");
|
||||
provider.setSqlite(config);
|
||||
OrmLiteSqlStorage store = new OrmLiteSqlStorage(StorageConfig.BackendEnum.sqlite, provider);
|
||||
store.insertTransactionResult("mxisd", "1", Instant.now(), "{}");
|
||||
store.insertTransactionResult("mxisd", "1", Instant.now(), "{}");
|
||||
}
|
||||
|
Reference in New Issue
Block a user