use of herddb.model.TransactionContext in project herddb by diennea.
the class JoinOp method execute.
@Override
public StatementExecutionResult execute(TableSpaceManager tableSpaceManager, TransactionContext transactionContext, StatementEvaluationContext context, boolean lockRequired, boolean forWrite) throws StatementExecutionException {
ScanResult resLeft = (ScanResult) left.execute(tableSpaceManager, transactionContext, context, lockRequired, forWrite);
transactionContext = new TransactionContext(resLeft.transactionId);
ScanResult resRight = (ScanResult) right.execute(tableSpaceManager, transactionContext, context, lockRequired, forWrite);
final long resTransactionId = resRight.transactionId;
DataScanner leftScanner = resLeft.dataScanner;
final String[] fieldNamesFromLeft = leftScanner.getFieldNames();
final String[] fieldNamesFromRight = resRight.dataScanner.getFieldNames();
Function2<DataAccessor, DataAccessor, DataAccessor> resultProjection = resultProjection(fieldNamesFromLeft, fieldNamesFromRight);
final Predicate2 predicate;
DataScanner rightScanner = resRight.dataScanner;
if (nonEquiConditions != null && !nonEquiConditions.isEmpty()) {
if (mergeJoin) {
throw new IllegalStateException("Unspected nonEquiConditions " + nonEquiConditions + "" + "for merge join");
}
predicate = (Predicate2) (Object t0, Object t1) -> {
DataAccessor da0 = (DataAccessor) t0;
DataAccessor da1 = (DataAccessor) t1;
DataAccessor currentRow = resultProjection.apply(da0, da1);
for (CompiledSQLExpression exp : nonEquiConditions) {
Object result = exp.evaluate(currentRow, context);
boolean asBoolean = SQLRecordPredicateFunctions.toBoolean(result);
if (!asBoolean) {
return false;
}
}
return true;
};
} else {
predicate = null;
}
if (!mergeJoin && !leftScanner.isRewindSupported()) {
try {
MaterializedRecordSet recordSet = tableSpaceManager.getDbmanager().getRecordSetFactory().createRecordSet(leftScanner.getFieldNames(), leftScanner.getSchema());
leftScanner.forEach(d -> {
recordSet.add(d);
});
recordSet.writeFinished();
SimpleDataScanner materialized = new SimpleDataScanner(leftScanner.getTransaction(), recordSet);
leftScanner.close();
leftScanner = materialized;
} catch (DataScannerException err) {
throw new StatementExecutionException(err);
}
}
if (!mergeJoin && !rightScanner.isRewindSupported()) {
try {
MaterializedRecordSet recordSet = tableSpaceManager.getDbmanager().getRecordSetFactory().createRecordSet(rightScanner.getFieldNames(), rightScanner.getSchema());
rightScanner.forEach(d -> {
recordSet.add(d);
});
recordSet.writeFinished();
SimpleDataScanner materialized = new SimpleDataScanner(rightScanner.getTransaction(), recordSet);
rightScanner.close();
rightScanner = materialized;
} catch (DataScannerException err) {
throw new StatementExecutionException(err);
}
}
Enumerable<DataAccessor> result = mergeJoin ? EnumerableDefaults.mergeJoin(leftScanner.createNonRewindableEnumerable(), rightScanner.createNonRewindableEnumerable(), JoinKey.keyExtractor(leftKeys), JoinKey.keyExtractor(rightKeys), resultProjection, generateNullsOnLeft, generateNullsOnRight) : EnumerableDefaults.hashJoin(leftScanner.createRewindOnCloseEnumerable(), rightScanner.createRewindOnCloseEnumerable(), JoinKey.keyExtractor(leftKeys), JoinKey.keyExtractor(rightKeys), resultProjection, null, generateNullsOnLeft, generateNullsOnRight, predicate);
EnumerableDataScanner joinedScanner = new EnumerableDataScanner(rightScanner.getTransaction(), fieldNames, columns, result, leftScanner, rightScanner);
return new ScanResult(resTransactionId, joinedScanner);
}
use of herddb.model.TransactionContext in project herddb by diennea.
the class ReplaceOp method executeAsync.
@Override
public CompletableFuture<StatementExecutionResult> executeAsync(TableSpaceManager tableSpaceManager, TransactionContext transactionContext, StatementEvaluationContext context, boolean lockRequired, boolean forWrite) {
StatementExecutionResult input = this.input.execute(tableSpaceManager, transactionContext, context, true, true);
ScanResult downstreamScanResult = (ScanResult) input;
TableManager tableManager = (TableManager) tableSpaceManager.getTableManager(tableName);
final Table table = tableManager.getTable();
TableContext tableContext = tableManager.buildTableContext();
long transactionId = transactionContext.transactionId;
List<DMLStatement> statements = new ArrayList<>();
try (DataScanner inputScanner = downstreamScanResult.dataScanner) {
while (inputScanner.hasNext()) {
DataAccessor row = inputScanner.next();
long transactionIdFromScanner = inputScanner.getTransactionId();
if (transactionIdFromScanner > 0 && transactionIdFromScanner != transactionId) {
transactionId = transactionIdFromScanner;
transactionContext = new TransactionContext(transactionId);
}
Bytes oldKey = RecordSerializer.serializeIndexKey(row, table, table.getPrimaryKey());
byte[] oldValue = RecordSerializer.serializeValueRaw(row.toMap(), table, -1);
DMLStatement deleteStatement = new DeleteStatement(tableSpace, tableName, oldKey, null);
statements.add(deleteStatement);
Record oldRecord = new Record(oldKey, Bytes.from_array(oldValue));
byte[] newKey = this.keyFunction.computeNewValue(oldRecord, context, tableContext);
byte[] newValue = this.recordFunction.computeNewValue(oldRecord, context, tableContext);
DMLStatement insertStatement = new InsertStatement(tableSpace, tableName, new Record(Bytes.from_array(newKey), Bytes.from_array(newValue))).setReturnValues(returnValues);
statements.add(insertStatement);
}
if (statements.isEmpty()) {
return CompletableFuture.completedFuture(new DMLStatementExecutionResult(transactionId, 0, null, null));
}
if (statements.size() == 1) {
return tableSpaceManager.executeStatementAsync(statements.get(0), context, transactionContext);
}
CompletableFuture<StatementExecutionResult> finalResult = new CompletableFuture<>();
AtomicInteger updateCounts = new AtomicInteger();
AtomicReference<Bytes> lastKey = new AtomicReference<>();
AtomicReference<Bytes> lastNewValue = new AtomicReference<>();
class ComputeNext implements BiConsumer<StatementExecutionResult, Throwable> {
int current;
public ComputeNext(int current) {
this.current = current;
}
@Override
public void accept(StatementExecutionResult res, Throwable error) {
if (error != null) {
finalResult.completeExceptionally(error);
return;
}
DMLStatementExecutionResult dml = (DMLStatementExecutionResult) res;
updateCounts.addAndGet(dml.getUpdateCount());
if (returnValues) {
lastKey.set(dml.getKey());
lastNewValue.set(dml.getNewvalue());
}
long newTransactionId = res.transactionId;
if (current == statements.size()) {
DMLStatementExecutionResult finalDMLResult = new DMLStatementExecutionResult(newTransactionId, updateCounts.get(), lastKey.get(), lastNewValue.get());
finalResult.complete(finalDMLResult);
return;
}
DMLStatement nextStatement = statements.get(current);
TransactionContext transactionContext = new TransactionContext(newTransactionId);
CompletableFuture<StatementExecutionResult> nextPromise = tableSpaceManager.executeStatementAsync(nextStatement, context, transactionContext);
nextPromise.whenComplete(new ComputeNext(current + 1));
}
}
DMLStatement firstStatement = statements.get(0);
tableSpaceManager.executeStatementAsync(firstStatement, context, transactionContext).whenComplete(new ComputeNext(1));
return finalResult;
} catch (DataScannerException err) {
throw new StatementExecutionException(err);
}
}
use of herddb.model.TransactionContext in project herddb by diennea.
the class UpdateOp method executeAsync.
@Override
public CompletableFuture<StatementExecutionResult> executeAsync(TableSpaceManager tableSpaceManager, TransactionContext transactionContext, StatementEvaluationContext context, boolean lockRequired, boolean forWrite) {
StatementExecutionResult input = this.input.execute(tableSpaceManager, transactionContext, context, true, true);
ScanResult downstreamScanResult = (ScanResult) input;
final Table table = tableSpaceManager.getTableManager(tableName).getTable();
long transactionId = transactionContext.transactionId;
List<DMLStatement> statements = new ArrayList<>();
try (DataScanner inputScanner = downstreamScanResult.dataScanner) {
while (inputScanner.hasNext()) {
DataAccessor row = inputScanner.next();
long transactionIdFromScanner = inputScanner.getTransactionId();
if (transactionIdFromScanner > 0 && transactionIdFromScanner != transactionId) {
transactionId = transactionIdFromScanner;
transactionContext = new TransactionContext(transactionId);
}
Bytes key = RecordSerializer.serializeIndexKey(row, table, table.getPrimaryKey());
DMLStatement updateStatement = new UpdateStatement(tableSpace, tableName, new ConstValueRecordFunction(key), this.recordFunction, null).setReturnValues(returnValues);
statements.add(updateStatement);
}
if (statements.isEmpty()) {
return CompletableFuture.completedFuture(new DMLStatementExecutionResult(transactionId, 0, null, null));
}
if (statements.size() == 1) {
return tableSpaceManager.executeStatementAsync(statements.get(0), context, transactionContext);
}
CompletableFuture<StatementExecutionResult> finalResult = new CompletableFuture<>();
AtomicInteger updateCounts = new AtomicInteger();
AtomicReference<Bytes> lastKey = new AtomicReference<>();
AtomicReference<Bytes> lastNewValue = new AtomicReference<>();
class ComputeNext implements BiConsumer<StatementExecutionResult, Throwable> {
int current;
public ComputeNext(int current) {
this.current = current;
}
@Override
public void accept(StatementExecutionResult res, Throwable error) {
if (error != null) {
finalResult.completeExceptionally(error);
return;
}
DMLStatementExecutionResult dml = (DMLStatementExecutionResult) res;
updateCounts.addAndGet(dml.getUpdateCount());
if (returnValues) {
lastKey.set(dml.getKey());
lastNewValue.set(dml.getNewvalue());
}
long newTransactionId = res.transactionId;
if (current == statements.size()) {
DMLStatementExecutionResult finalDMLResult = new DMLStatementExecutionResult(newTransactionId, updateCounts.get(), lastKey.get(), lastNewValue.get());
finalResult.complete(finalDMLResult);
return;
}
DMLStatement nextStatement = statements.get(current);
TransactionContext transactionContext = new TransactionContext(newTransactionId);
CompletableFuture<StatementExecutionResult> nextPromise = tableSpaceManager.executeStatementAsync(nextStatement, context, transactionContext);
nextPromise.whenComplete(new ComputeNext(current + 1));
}
}
DMLStatement firstStatement = statements.get(0);
tableSpaceManager.executeStatementAsync(firstStatement, context, transactionContext).whenComplete(new ComputeNext(1));
return finalResult;
} catch (DataScannerException err) {
throw new StatementExecutionException(err);
}
}
use of herddb.model.TransactionContext in project herddb by diennea.
the class BackupRestoreTest method test_backup_restore_recover_from_tx_log_with_transaction.
@Test
public void test_backup_restore_recover_from_tx_log_with_transaction() throws Exception {
ServerConfiguration serverconfig_1 = newServerConfigurationWithAutoPort(folder.newFolder().toPath());
serverconfig_1.set(ServerConfiguration.PROPERTY_NODEID, "server1");
serverconfig_1.set(ServerConfiguration.PROPERTY_MODE, ServerConfiguration.PROPERTY_MODE_CLUSTER);
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_ADDRESS, testEnv.getAddress());
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_PATH, testEnv.getPath());
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_SESSIONTIMEOUT, testEnv.getTimeout());
serverconfig_1.set(ServerConfiguration.PROPERTY_ENFORCE_LEADERSHIP, false);
ServerConfiguration serverconfig_2 = serverconfig_1.copy().set(ServerConfiguration.PROPERTY_NODEID, "server2").set(ServerConfiguration.PROPERTY_BASEDIR, folder.newFolder().toPath().toAbsolutePath());
ClientConfiguration client_configuration = new ClientConfiguration(folder.newFolder().toPath());
client_configuration.set(ClientConfiguration.PROPERTY_MODE, ServerConfiguration.PROPERTY_MODE_CLUSTER);
client_configuration.set(ClientConfiguration.PROPERTY_ZOOKEEPER_ADDRESS, testEnv.getAddress());
client_configuration.set(ClientConfiguration.PROPERTY_ZOOKEEPER_PATH, testEnv.getPath());
client_configuration.set(ClientConfiguration.PROPERTY_ZOOKEEPER_SESSIONTIMEOUT, testEnv.getTimeout());
try (Server server_1 = new Server(serverconfig_1)) {
server_1.start();
server_1.waitForStandaloneBoot();
Table table1 = Table.builder().name("t1").column("c", ColumnTypes.INTEGER).column("d", ColumnTypes.INTEGER).primaryKey("c").build();
Index index = Index.builder().onTable(table1).column("d", ColumnTypes.INTEGER).type(Index.TYPE_BRIN).build();
Table table2 = Table.builder().name("t2").column("c", ColumnTypes.INTEGER).column("d", ColumnTypes.INTEGER).primaryKey("c").build();
server_1.getManager().executeStatement(new CreateTableStatement(table1), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
server_1.getManager().executeStatement(new CreateTableStatement(table2), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
server_1.getManager().executeStatement(new CreateIndexStatement(index), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
server_1.getManager().executeUpdate(new InsertStatement(TableSpace.DEFAULT, "t1", RecordSerializer.makeRecord(table1, "c", 1, "d", 2)), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
server_1.getManager().executeUpdate(new InsertStatement(TableSpace.DEFAULT, "t1", RecordSerializer.makeRecord(table1, "c", 2, "d", 2)), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
server_1.getManager().executeUpdate(new InsertStatement(TableSpace.DEFAULT, "t1", RecordSerializer.makeRecord(table1, "c", 3, "d", 2)), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
server_1.getManager().executeUpdate(new InsertStatement(TableSpace.DEFAULT, "t1", RecordSerializer.makeRecord(table1, "c", 4, "d", 2)), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
long tx1 = TestUtils.beginTransaction(server_1.getManager(), TableSpace.DEFAULT);
try (Server server_2 = new Server(serverconfig_2)) {
server_2.start();
try (HDBClient client = new HDBClient(client_configuration);
HDBConnection connection = client.openConnection()) {
assertEquals(4, connection.executeScan(TableSpace.DEFAULT, "SELECT * FROM t1", true, Collections.emptyList(), 0, 0, 10, true).consume().size());
ByteArrayOutputStream oo = new ByteArrayOutputStream();
BackupUtils.dumpTableSpace(TableSpace.DEFAULT, 64 * 1024, connection, oo, new ProgressListener() {
@Override
public void log(String type, String message, Map<String, Object> context) {
System.out.println("PROGRESS: " + type + " " + message + " context:" + context);
if (type.equals("beginTable") && "t1".equals(context.get("table"))) {
try {
server_1.getManager().executeUpdate(new InsertStatement(TableSpace.DEFAULT, "t1", RecordSerializer.makeRecord(table1, "c", 5, "d", 2)), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), new TransactionContext(tx1));
TestUtils.commitTransaction(server_1.getManager(), TableSpace.DEFAULT, tx1);
} catch (StatementExecutionException err) {
throw new RuntimeException(err);
}
}
}
});
byte[] backupData = oo.toByteArray();
connection.executeUpdate(TableSpace.DEFAULT, "DELETE FROM t1", 0, false, true, Collections.emptyList());
assertEquals(0, connection.executeScan(TableSpace.DEFAULT, "SELECT * FROM t1", true, Collections.emptyList(), 0, 0, 10, true).consume().size());
BackupUtils.restoreTableSpace("newts", server_2.getNodeId(), connection, new ByteArrayInputStream(backupData), new ProgressListener() {
});
assertEquals(5, connection.executeScan("newts", "SELECT * FROM newts.t1", true, Collections.emptyList(), 0, 0, 10, true).consume().size());
assertEquals(1, server_2.getManager().getTableSpaceManager("newts").getIndexesOnTable("t1").size());
}
}
}
}
use of herddb.model.TransactionContext in project herddb by diennea.
the class CheckpointTest method manyUpdates.
@Test
public void manyUpdates() throws Exception {
Path dataPath = folder.newFolder("data").toPath();
Path logsPath = folder.newFolder("logs").toPath();
Path metadataPath = folder.newFolder("metadata").toPath();
Path tmpDir = folder.newFolder("tmpDir").toPath();
String nodeId = "localhost";
ServerConfiguration config1 = newServerConfigurationWithAutoPort();
config1.set(ServerConfiguration.PROPERTY_MAX_LOGICAL_PAGE_SIZE, 1024 * 1024L);
config1.set(ServerConfiguration.PROPERTY_MAX_PK_MEMORY, 2 * 1024 * 1024L);
config1.set(ServerConfiguration.PROPERTY_MAX_DATA_MEMORY, 2 * 1024 * 1024L);
config1.set(ServerConfiguration.PROPERTY_CHECKPOINT_PERIOD, 10 * 1000L);
int records = 100;
int iterations = 1_0000;
int keylen = 25;
int strlen = 50;
Map<String, String> expectedValues = new HashMap<>();
try (DBManager manager = new DBManager("localhost", new FileMetadataStorageManager(metadataPath), new FileDataStorageManager(dataPath), new FileCommitLogManager(logsPath), tmpDir, null, config1, null)) {
// we want frequent checkpoints
manager.setCheckpointPeriod(10 * 1000L);
manager.start();
CreateTableSpaceStatement st1 = new CreateTableSpaceStatement("tblspace1", Collections.singleton(nodeId), nodeId, 1, 0, 0);
manager.executeStatement(st1, StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), NO_TRANSACTION);
manager.waitForTablespace("tblspace1", 10000);
execute(manager, "CREATE TABLE tblspace1.tsql (k1 string, n1 string, primary key(k1))", Collections.emptyList());
Random random = new Random();
List<String> keys = new ArrayList<>();
long tx = TestUtils.beginTransaction(manager, "tblspace1");
for (int i = 0; i < records; ++i) {
String key;
while (true) {
key = RandomString.getInstance().nextString(keylen);
if (!keys.contains(key)) {
keys.add(key);
break;
}
}
String value = RandomString.getInstance().nextString(strlen);
executeUpdate(manager, "INSERT INTO tblspace1.tsql(k1,n1) values(?,?)", Arrays.asList(key, value), new TransactionContext(tx));
expectedValues.put(key, value);
if (i % 1000 == 0) {
System.out.println("commit at " + i);
TestUtils.commitTransaction(manager, "tblspace1", tx);
tx = TestUtils.beginTransaction(manager, "tblspace1");
}
}
TestUtils.commitTransaction(manager, "tblspace1", tx);
System.out.println("database created");
manager.checkpoint();
TableManagerStats stats = manager.getTableSpaceManager("tblspace1").getTableManager("tsql").getStats();
tx = TestUtils.beginTransaction(manager, "tblspace1");
for (int i = 0; i < iterations; i++) {
String key = keys.get(random.nextInt(keys.size()));
String value = RandomString.getInstance().nextString(strlen);
assertEquals(1, executeUpdate(manager, "UPDATE tblspace1.tsql set n1=? where k1=?", Arrays.asList(value, key, new TransactionContext(tx))).getUpdateCount());
expectedValues.put(key, value);
if (random.nextInt(1000) <= 2) {
System.out.println("checkpoint after " + i + " iterations");
manager.triggerActivator(ActivatorRunRequest.FULL);
}
if (i % 1000 == 0) {
System.out.println("commit after " + i + " iterations");
System.out.println("stats: dirtypages:" + stats.getDirtypages() + " unloads:" + stats.getUnloadedPagesCount());
TestUtils.commitTransaction(manager, "tblspace1", tx);
tx = TestUtils.beginTransaction(manager, "tblspace1");
}
}
TestUtils.commitTransaction(manager, "tblspace1", tx);
for (Map.Entry<String, String> expected : expectedValues.entrySet()) {
try (DataScanner scan = TestUtils.scan(manager, "SELECT n1 FROM tblspace1.tsql where k1=?", Arrays.asList(expected.getKey()))) {
List<DataAccessor> all = scan.consume();
assertEquals(1, all.size());
assertEquals(RawString.of(expected.getValue()), all.get(0).get(0));
}
}
manager.checkpoint();
for (Map.Entry<String, String> expected : expectedValues.entrySet()) {
try (DataScanner scan = TestUtils.scan(manager, "SELECT n1 FROM tblspace1.tsql where k1=?", Arrays.asList(expected.getKey()))) {
List<DataAccessor> all = scan.consume();
assertEquals(1, all.size());
assertEquals(RawString.of(expected.getValue()), all.get(0).get(0));
}
}
}
// reboot
try (DBManager manager = new DBManager("localhost", new FileMetadataStorageManager(metadataPath), new FileDataStorageManager(dataPath), new FileCommitLogManager(logsPath), tmpDir, null, config1, null)) {
// we want frequent checkpoints
manager.setCheckpointPeriod(10 * 1000L);
manager.start();
assertTrue(manager.waitForBootOfLocalTablespaces(60000));
for (Map.Entry<String, String> expected : expectedValues.entrySet()) {
try (DataScanner scan = TestUtils.scan(manager, "SELECT n1 FROM tblspace1.tsql where k1=?", Arrays.asList(expected.getKey()))) {
List<DataAccessor> all = scan.consume();
assertEquals(1, all.size());
assertEquals(RawString.of(expected.getValue()), all.get(0).get(0));
}
}
}
}
Aggregations