use of herddb.model.Transaction in project herddb by diennea.
the class TableSpaceManager method dumpTableSpace.
@SuppressFBWarnings("RCN_REDUNDANT_NULLCHECK_OF_NULL_VALUE")
void dumpTableSpace(String dumpId, Channel channel, int fetchSize, boolean includeLog) throws DataStorageManagerException, LogNotAvailableException {
LOGGER.log(Level.INFO, "dumpTableSpace dumpId:{0} channel {1} fetchSize:{2}, includeLog:{3}", new Object[] { dumpId, channel, fetchSize, includeLog });
TableSpaceCheckpoint checkpoint;
List<DumpedLogEntry> txlogentries = new CopyOnWriteArrayList<>();
CommitLogListener logDumpReceiver = new CommitLogListener() {
@Override
public void logEntry(LogSequenceNumber logPos, LogEntry data) {
// we are going to capture all the changes to the tablespace during the dump, in order to replay
// eventually 'missed' changes during the dump
txlogentries.add(new DumpedLogEntry(logPos, data.serialize()));
// LOGGER.log(Level.SEVERE, "dumping entry " + logPos + ", " + data + " nentries: " + txlogentries.size());
}
};
long lockStamp = acquireWriteLock(null);
if (includeLog) {
log.attachCommitLogListener(logDumpReceiver);
}
checkpoint = checkpoint(true, /* compact records*/
true, true);
LOGGER.log(Level.INFO, "Created checkpoint at {}", checkpoint);
if (checkpoint == null) {
throw new DataStorageManagerException("failed to create a checkpoint, check logs for the reason");
}
/* Downgrade lock */
// System.err.println("DOWNGRADING LOCK " + lockStamp + " TO READ");
lockStamp = generalLock.tryConvertToReadLock(lockStamp);
if (lockStamp == 0) {
throw new DataStorageManagerException("unable to downgrade lock");
}
try {
final int timeout = 60000;
LogSequenceNumber checkpointSequenceNumber = checkpoint.sequenceNumber;
long id = channel.generateRequestId();
LOGGER.log(Level.INFO, "start sending dump, dumpId: {0} to client {1}", new Object[] { dumpId, channel });
try (Pdu response_to_start = channel.sendMessageWithPduReply(id, PduCodec.TablespaceDumpData.write(id, tableSpaceName, dumpId, "start", null, stats.getTablesize(), checkpointSequenceNumber.ledgerId, checkpointSequenceNumber.offset, null, null), timeout)) {
if (response_to_start.type != Pdu.TYPE_ACK) {
LOGGER.log(Level.SEVERE, "error response at start command");
return;
}
}
if (includeLog) {
List<Transaction> transactionsSnapshot = new ArrayList<>();
dataStorageManager.loadTransactions(checkpointSequenceNumber, tableSpaceUUID, transactionsSnapshot::add);
List<Transaction> batch = new ArrayList<>();
for (Transaction t : transactionsSnapshot) {
batch.add(t);
if (batch.size() == 10) {
sendTransactionsDump(batch, channel, dumpId, timeout);
}
}
sendTransactionsDump(batch, channel, dumpId, timeout);
}
for (Entry<String, LogSequenceNumber> entry : checkpoint.tablesCheckpoints.entrySet()) {
final AbstractTableManager tableManager = tables.get(entry.getKey());
final LogSequenceNumber sequenceNumber = entry.getValue();
if (tableManager.isSystemTable()) {
continue;
}
try {
LOGGER.log(Level.INFO, "Sending table checkpoint for {} took at sequence number {}", new Object[] { tableManager.getTable().name, sequenceNumber });
FullTableScanConsumer sink = new SingleTableDumper(tableSpaceName, tableManager, channel, dumpId, timeout, fetchSize);
tableManager.dump(sequenceNumber, sink);
} catch (DataStorageManagerException err) {
LOGGER.log(Level.SEVERE, "error sending dump id " + dumpId, err);
long errorid = channel.generateRequestId();
try (Pdu response = channel.sendMessageWithPduReply(errorid, PduCodec.TablespaceDumpData.write(id, tableSpaceName, dumpId, "error", null, 0, 0, 0, null, null), timeout)) {
}
return;
}
}
if (!txlogentries.isEmpty()) {
txlogentries.sort(Comparator.naturalOrder());
sendDumpedCommitLog(txlogentries, channel, dumpId, timeout);
}
LogSequenceNumber finishLogSequenceNumber = log.getLastSequenceNumber();
channel.sendOneWayMessage(PduCodec.TablespaceDumpData.write(id, tableSpaceName, dumpId, "finish", null, 0, finishLogSequenceNumber.ledgerId, finishLogSequenceNumber.offset, null, null), (Throwable error) -> {
if (error != null) {
LOGGER.log(Level.SEVERE, "Cannot send last dump msg for " + dumpId, error);
} else {
LOGGER.log(Level.INFO, "Sent last dump msg for " + dumpId);
}
});
} catch (InterruptedException | TimeoutException error) {
LOGGER.log(Level.SEVERE, "error sending dump id " + dumpId, error);
} finally {
releaseReadLock(lockStamp, "senddump");
if (includeLog) {
log.removeCommitLogListener(logDumpReceiver);
}
for (Entry<String, LogSequenceNumber> entry : checkpoint.tablesCheckpoints.entrySet()) {
String tableName = entry.getKey();
AbstractTableManager tableManager = tables.get(tableName);
String tableUUID = tableManager.getTable().uuid;
LogSequenceNumber seqNumber = entry.getValue();
LOGGER.log(Level.INFO, "unPinTableCheckpoint {0}.{1} ({2}) {3}", new Object[] { tableSpaceUUID, tableName, tableUUID, seqNumber });
dataStorageManager.unPinTableCheckpoint(tableSpaceUUID, tableUUID, seqNumber);
}
}
}
use of herddb.model.Transaction in project herddb by diennea.
the class TableManager method executeStatementAsync.
@Override
public CompletableFuture<StatementExecutionResult> executeStatementAsync(Statement statement, Transaction transaction, StatementEvaluationContext context) {
CompletableFuture<StatementExecutionResult> res;
long lockStamp = checkpointLock.readLock();
if (statement instanceof UpdateStatement) {
UpdateStatement update = (UpdateStatement) statement;
res = executeUpdateAsync(update, transaction, context);
} else if (statement instanceof InsertStatement) {
InsertStatement insert = (InsertStatement) statement;
res = executeInsertAsync(insert, transaction, context);
} else if (statement instanceof GetStatement) {
GetStatement get = (GetStatement) statement;
res = executeGetAsync(get, transaction, context);
} else if (statement instanceof DeleteStatement) {
DeleteStatement delete = (DeleteStatement) statement;
res = executeDeleteAsync(delete, transaction, context);
} else if (statement instanceof TruncateTableStatement) {
try {
TruncateTableStatement truncate = (TruncateTableStatement) statement;
res = CompletableFuture.completedFuture(executeTruncate(truncate, transaction, context));
} catch (StatementExecutionException err) {
LOGGER.log(Level.SEVERE, "Truncate table failed", err);
res = Futures.exception(err);
}
} else if (statement instanceof TableConsistencyCheckStatement) {
DBManager manager = this.tableSpaceManager.getDbmanager();
res = CompletableFuture.completedFuture(manager.createTableCheckSum((TableConsistencyCheckStatement) statement, context));
} else {
res = Futures.exception(new StatementExecutionException("not implemented " + statement.getClass()));
}
res = res.whenComplete((r, error) -> {
checkpointLock.unlockRead(lockStamp);
});
if (statement instanceof TruncateTableStatement) {
res = res.whenComplete((r, error) -> {
if (error == null) {
try {
flush();
} catch (DataStorageManagerException err) {
throw new HerdDBInternalException(new StatementExecutionException("internal data error: " + err, err));
}
}
});
}
return res;
}
use of herddb.model.Transaction in project herddb by diennea.
the class TableManager method checkForeignKeyConstraintsAsChildTable.
private void checkForeignKeyConstraintsAsChildTable(ForeignKeyDef fk, DataAccessor values, StatementEvaluationContext context, Transaction transaction) throws StatementExecutionException {
// We are creating a SQL query and then using DBManager
// using an SQL query will let us leverage the SQL Planner
// and use the best index to perform the execution
// the SQL Planner will cache the plan, and the plan will also be
// invalidated consistently during DML operations.
String query = childForeignKeyQueries.computeIfAbsent(fk.name, (l -> {
Table parentTable = tableSpaceManager.getTableManagerByUUID(fk.parentTableId).getTable();
// with '*' we are not going to perform projections or copies
StringBuilder q = new StringBuilder("SELECT * FROM ");
q.append(delimit(parentTable.tablespace));
q.append(".");
q.append(delimit(parentTable.name));
q.append(" WHERE ");
for (int i = 0; i < fk.parentTableColumns.length; i++) {
if (i > 0) {
q.append(" AND ");
}
q.append(delimit(fk.parentTableColumns[i]));
q.append("=?");
}
return q.toString();
}));
final List<Object> valuesToMatch = new ArrayList<>(fk.columns.length);
boolean allNulls = true;
for (int i = 0; i < fk.columns.length; i++) {
Object value = values.get(fk.columns[i]);
allNulls = allNulls && value == null;
valuesToMatch.add(value);
}
if (allNulls) {
// all of the values are null, so no check on the parent table
return;
}
TransactionContext tx = transaction != null ? new TransactionContext(transaction.transactionId) : TransactionContext.NO_TRANSACTION;
boolean fkOk;
try (DataScanner scan = tableSpaceManager.getDbmanager().executeSimpleQuery(tableSpaceManager.getTableSpaceName(), query, valuesToMatch, // only one record
1, // keep read locks in TransactionContext
true, tx, null)) {
List<DataAccessor> resultSet = scan.consume();
fkOk = !resultSet.isEmpty();
} catch (DataScannerException err) {
throw new StatementExecutionException(err);
}
if (!fkOk) {
throw new ForeignKeyViolationException(fk.name, "foreignKey " + table.name + "." + fk.name + " violated");
}
}
use of herddb.model.Transaction in project herddb by diennea.
the class SystransactionsTableManager method buildVirtualRecordList.
@Override
protected Iterable<Record> buildVirtualRecordList(Transaction transaction) {
List<Transaction> transactions = tableSpaceManager.getTransactions();
List<Record> result = new ArrayList<>();
for (Transaction tx : transactions) {
result.add(RecordSerializer.makeRecord(table, "tablespace", tableSpaceManager.getTableSpaceName(), "txid", tx.transactionId, "creationtimestamp", new java.sql.Timestamp(tx.localCreationTimestamp)));
}
return result;
}
use of herddb.model.Transaction in project herddb by diennea.
the class AbstractSystemTableManager method scan.
@Override
public DataScanner scan(ScanStatement statement, StatementEvaluationContext context, Transaction transaction, boolean lockRequired, boolean forWrite) throws StatementExecutionException {
Predicate predicate = statement.getPredicate();
MaterializedRecordSet recordSet = tableSpaceManager.getDbmanager().getRecordSetFactory().createRecordSet(table.columnNames, table.columns);
Iterable<Record> data = buildVirtualRecordList(transaction);
StreamSupport.stream(data.spliterator(), false).filter(record -> {
return (predicate == null || predicate.evaluate(record, context));
}).sorted(// enforce sort by PK
sortByPk).map(r -> r.getDataAccessor(table)).forEach(recordSet::add);
recordSet.writeFinished();
recordSet.sort(statement.getComparator());
recordSet.applyLimits(statement.getLimits(), context);
recordSet.applyProjection(statement.getProjection(), context);
return new SimpleDataScanner(transaction, recordSet);
}
Aggregations