use of herddb.model.Transaction in project herddb by diennea.
the class FileDataStorageManager method writeTransactionsAtCheckpoint.
@Override
public Collection<PostCheckpointAction> writeTransactionsAtCheckpoint(String tableSpace, LogSequenceNumber sequenceNumber, Collection<Transaction> transactions) throws DataStorageManagerException {
if (sequenceNumber.isStartOfTime() && !transactions.isEmpty()) {
throw new DataStorageManagerException("impossible to write a non empty transactions list at start-of-time");
}
Path tableSpaceDirectory = getTablespaceDirectory(tableSpace);
try {
Files.createDirectories(tableSpaceDirectory);
Path checkPointFile = getTablespaceTransactionsFile(tableSpace, sequenceNumber);
Path parent = getParent(checkPointFile);
Files.createDirectories(parent);
Path checkpointFileTemp = parent.resolve(checkPointFile.getFileName() + ".tmp");
LOGGER.log(Level.FINE, "writeTransactionsAtCheckpoint for tableSpace {0} sequenceNumber {1} to {2}, active transactions {3}", new Object[] { tableSpace, sequenceNumber, checkPointFile.toAbsolutePath().toString(), transactions.size() });
try (ManagedFile file = ManagedFile.open(checkpointFileTemp, requirefsync);
SimpleBufferedOutputStream buffer = new SimpleBufferedOutputStream(file.getOutputStream(), COPY_BUFFERS_SIZE);
ExtendedDataOutputStream dout = new ExtendedDataOutputStream(buffer)) {
// version
dout.writeVLong(1);
// flags for future implementations
dout.writeVLong(0);
dout.writeUTF(tableSpace);
dout.writeZLong(sequenceNumber.ledgerId);
dout.writeZLong(sequenceNumber.offset);
dout.writeInt(transactions.size());
for (Transaction t : transactions) {
t.serialize(dout);
}
dout.flush();
file.sync();
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
try {
Files.move(checkpointFileTemp, checkPointFile, StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING);
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
Collection<PostCheckpointAction> result = new ArrayList<>();
try (DirectoryStream<Path> stream = Files.newDirectoryStream(tableSpaceDirectory)) {
for (Path p : stream) {
if (isTransactionsFile(p)) {
try {
LogSequenceNumber logPositionInFile = readLogSequenceNumberFromTransactionsFile(tableSpace, p);
if (sequenceNumber.after(logPositionInFile)) {
LOGGER.log(Level.FINEST, "transactions metadata file " + p.toAbsolutePath() + ". will be deleted after checkpoint end");
result.add(new DeleteFileAction(tableSpace, "transactions", "delete transactions file " + p.toAbsolutePath(), p));
}
} catch (DataStorageManagerException ignore) {
LOGGER.log(Level.SEVERE, "Unparsable transactions file " + p.toAbsolutePath(), ignore);
result.add(new DeleteFileAction(tableSpace, "transactions", "delete unparsable transactions file " + p.toAbsolutePath(), p));
}
}
}
} catch (IOException err) {
LOGGER.log(Level.SEVERE, "Could not list dir " + tableSpaceDirectory, err);
}
return result;
}
use of herddb.model.Transaction in project herddb by diennea.
the class RoutedClientSideConnection method requestReceived.
@Override
@SuppressFBWarnings(value = "SF_SWITCH_NO_DEFAULT")
@SuppressWarnings("empty-statement")
public void requestReceived(Pdu message, Channel channel) {
try {
switch(message.type) {
case Pdu.TYPE_TABLESPACE_DUMP_DATA:
{
String dumpId = PduCodec.TablespaceDumpData.readDumpId(message);
TableSpaceDumpReceiver receiver = dumpReceivers.get(dumpId);
LOGGER.log(Level.FINE, "receiver for {0}: {1}", new Object[] { dumpId, receiver });
if (receiver == null) {
if (channel != null) {
ByteBuf resp = PduCodec.ErrorResponse.write(message.messageId, "no such dump receiver " + dumpId);
channel.sendReplyMessage(message.messageId, resp);
}
return;
}
try {
String command = PduCodec.TablespaceDumpData.readCommand(message);
boolean sendAck = true;
switch(command) {
case "start":
{
long ledgerId = PduCodec.TablespaceDumpData.readLedgerId(message);
long offset = PduCodec.TablespaceDumpData.readOffset(message);
receiver.start(new LogSequenceNumber(ledgerId, offset));
break;
}
case "beginTable":
{
byte[] tableDefinition = PduCodec.TablespaceDumpData.readTableDefinition(message);
Table table = Table.deserialize(tableDefinition);
long estimatedSize = PduCodec.TablespaceDumpData.readEstimatedSize(message);
long dumpLedgerId = PduCodec.TablespaceDumpData.readLedgerId(message);
long dumpOffset = PduCodec.TablespaceDumpData.readOffset(message);
List<byte[]> indexesDef = PduCodec.TablespaceDumpData.readIndexesDefinition(message);
List<Index> indexes = indexesDef.stream().map(Index::deserialize).collect(Collectors.toList());
Map<String, Object> stats = new HashMap<>();
stats.put("estimatedSize", estimatedSize);
stats.put("dumpLedgerId", dumpLedgerId);
stats.put("dumpOffset", dumpOffset);
receiver.beginTable(new DumpedTableMetadata(table, new LogSequenceNumber(dumpLedgerId, dumpOffset), indexes), stats);
break;
}
case "endTable":
{
receiver.endTable();
break;
}
case "finish":
{
long ledgerId = PduCodec.TablespaceDumpData.readLedgerId(message);
long offset = PduCodec.TablespaceDumpData.readOffset(message);
receiver.finish(new LogSequenceNumber(ledgerId, offset));
sendAck = false;
break;
}
case "data":
{
List<Record> records = new ArrayList<>();
PduCodec.TablespaceDumpData.readRecords(message, (key, value) -> {
records.add(new Record(Bytes.from_array(key), Bytes.from_array(value)));
});
receiver.receiveTableDataChunk(records);
break;
}
case "txlog":
{
List<DumpedLogEntry> records = new ArrayList<>();
PduCodec.TablespaceDumpData.readRecords(message, (key, value) -> {
records.add(new DumpedLogEntry(LogSequenceNumber.deserialize(key), value));
});
receiver.receiveTransactionLogChunk(records);
break;
}
case "transactions":
{
List<Transaction> transactions = new ArrayList<>();
PduCodec.TablespaceDumpData.readRecords(message, (key, value) -> {
transactions.add(Transaction.deserialize(null, value));
});
receiver.receiveTransactionsAtDump(transactions);
break;
}
default:
throw new DataStorageManagerException("invalid dump command:" + command);
}
if (channel != null && sendAck) {
ByteBuf res = PduCodec.AckResponse.write(message.messageId);
channel.sendReplyMessage(message.messageId, res);
}
} catch (RuntimeException error) {
LOGGER.log(Level.SEVERE, "error while handling dump data", error);
receiver.onError(error);
if (channel != null) {
ByteBuf res = PduCodec.ErrorResponse.write(message.messageId, error);
channel.sendReplyMessage(message.messageId, res);
}
}
}
break;
}
} finally {
message.close();
}
}
use of herddb.model.Transaction in project herddb by diennea.
the class ServerSideConnectionPeer method handlePushTransactionsBlock.
private void handlePushTransactionsBlock(Pdu message, Channel channel) {
try {
String tableSpace = PduCodec.PushTransactionsBlock.readTablespace(message);
List<Transaction> entries = new ArrayList<>();
PduCodec.PushTransactionsBlock.readTransactions(message, (value) -> entries.add(Transaction.deserialize(tableSpace, value)));
LOGGER.log(Level.INFO, "Received " + entries.size() + " records for restore of transactions in tableSpace " + tableSpace);
server.getManager().getTableSpaceManager(tableSpace).restoreRawDumpedTransactions(entries);
ByteBuf res = PduCodec.AckResponse.write(message.messageId);
channel.sendReplyMessage(message.messageId, res);
} catch (StatementExecutionException err) {
ByteBuf res = composeErrorResponse(message.messageId, err);
channel.sendReplyMessage(message.messageId, res);
}
}
use of herddb.model.Transaction in project herddb by diennea.
the class ValuesOp method execute.
@Override
public StatementExecutionResult execute(TableSpaceManager tableSpaceManager, TransactionContext transactionContext, StatementEvaluationContext context, boolean lockRequired, boolean forWrite) throws StatementExecutionException {
Iterator<List<CompiledSQLExpression>> it = tuples.iterator();
Transaction transaction = tableSpaceManager.getTransaction(transactionContext.transactionId);
DataScanner res = new DataScanner(transaction, fieldNames, columns) {
@Override
public boolean hasNext() throws DataScannerException {
return it.hasNext();
}
@Override
public DataAccessor next() throws DataScannerException {
Object[] values = new Object[fieldNames.length];
List<CompiledSQLExpression> tuple = it.next();
for (int i = 0; i < values.length; i++) {
Object value = tuple.get(i).evaluate(DataAccessor.NULL, context);
values[i] = value;
}
return new Tuple(fieldNames, values);
}
};
return new ScanResult(transactionContext.transactionId, res);
}
use of herddb.model.Transaction in project herddb by diennea.
the class TransactionIsolationTest method test.
@Test
public void test() throws Exception {
try (HerdDBEmbeddedDataSource dataSource = new HerdDBEmbeddedDataSource()) {
dataSource.getProperties().setProperty(ServerConfiguration.PROPERTY_BASEDIR, folder.newFolder().getAbsolutePath());
dataSource.getProperties().setProperty(ClientConfiguration.PROPERTY_BASEDIR, folder.newFolder().getAbsolutePath());
try (Connection con = dataSource.getConnection();
Statement statement = con.createStatement()) {
statement.execute("CREATE TABLE mytable (key string primary key, name string)");
statement.execute("CREATE TABLE mytable2 (key string primary key, name string)");
}
Server server = dataSource.getServer();
try (Connection con = dataSource.getConnection();
Statement statement = con.createStatement()) {
assertEquals(1, statement.executeUpdate("INSERT INTO mytable (key,name) values('k1','name1')"));
assertEquals(Connection.TRANSACTION_READ_COMMITTED, con.getTransactionIsolation());
assertEquals(TableSpace.DEFAULT, con.getSchema());
assertTrue(con.getAutoCommit());
con.setAutoCommit(false);
{
HerdDBConnection hCon = (HerdDBConnection) con;
assertEquals(0, hCon.getTransactionId());
// force the creation of a transaction, by issuing a DML command
assertEquals(1, statement.executeUpdate("INSERT INTO mytable2 (key,name) values('c1','name1')"));
long tx = hCon.getTransactionId();
statement.executeQuery("SELECT * FROM mytable").close();
Transaction transaction = server.getManager().getTableSpaceManager(TableSpace.DEFAULT).getTransaction(tx);
// in TRANSACTION_READ_COMMITTED no lock is to be retained
assertTrue(transaction.locks.get("mytable").isEmpty());
con.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ);
statement.executeQuery("SELECT * FROM mytable").close();
LockHandle lock = transaction.lookupLock("mytable", Bytes.from_string("k1"));
assertFalse(lock.write);
statement.executeQuery("SELECT * FROM mytable FOR UPDATE").close();
lock = transaction.lookupLock("mytable", Bytes.from_string("k1"));
assertTrue(lock.write);
con.rollback();
assertNull(server.getManager().getTableSpaceManager(TableSpace.DEFAULT).getTransaction(tx));
}
// test SELECT ... FOR UPDATE
{
HerdDBConnection hCon = (HerdDBConnection) con;
assertEquals(0, hCon.getTransactionId());
con.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ);
// force the creation of a transaction, by issuing a DML command
assertEquals(1, statement.executeUpdate("INSERT INTO mytable2 (key,name) values('c2','name1')"));
long tx = hCon.getTransactionId();
statement.executeQuery("SELECT * FROM mytable FOR UPDATE").close();
Transaction transaction = server.getManager().getTableSpaceManager(TableSpace.DEFAULT).getTransaction(tx);
LockHandle lock = transaction.lookupLock("mytable", Bytes.from_string("k1"));
assertTrue(lock.write);
con.rollback();
assertNull(server.getManager().getTableSpaceManager(TableSpace.DEFAULT).getTransaction(tx));
}
}
}
}
Aggregations