use of herddb.log.CommitLogResult in project herddb by diennea.
the class MemoryCommitLogManager method createCommitLog.
@Override
public CommitLog createCommitLog(String tableSpace, String tablespaceName, String localNodeId) {
return new CommitLog() {
AtomicLong offset = new AtomicLong(-1);
@Override
public CommitLogResult log(LogEntry entry, boolean synch) throws LogNotAvailableException {
if (isHasListeners()) {
synch = true;
}
if (testSerialize) {
// NOOP, but trigger serialization subsystem
try {
entry.serialize(ExtendedDataOutputStream.NULL);
} catch (IOException err) {
throw new LogNotAvailableException(err);
}
}
LogSequenceNumber logPos = new LogSequenceNumber(1, offset.incrementAndGet());
notifyListeners(logPos, entry);
return new CommitLogResult(logPos, !synch, synch);
}
@Override
public LogSequenceNumber getLastSequenceNumber() {
return new LogSequenceNumber(1, offset.get());
}
private volatile boolean closed;
@Override
public void close() throws LogNotAvailableException {
closed = true;
}
@Override
public boolean isFailed() {
return false;
}
@Override
public boolean isClosed() {
return closed;
}
@Override
public void recovery(LogSequenceNumber snapshotSequenceNumber, BiConsumer<LogSequenceNumber, LogEntry> consumer, boolean fencing) throws LogNotAvailableException {
}
@Override
public void dropOldLedgers(LogSequenceNumber lastCheckPointSequenceNumber) throws LogNotAvailableException {
}
@Override
public void startWriting(int expectedReplicaCount) throws LogNotAvailableException {
}
@Override
public void clear() throws LogNotAvailableException {
}
};
}
use of herddb.log.CommitLogResult in project herddb by diennea.
the class Transaction method deserialize.
public static Transaction deserialize(String tableSpace, ExtendedDataInputStream in) throws IOException {
// version
long version = in.readVLong();
// flags for future implementations
long flags = in.readVLong();
if (version != 1 || flags != 0) {
throw new IOException("corrupted transaction file");
}
long id = in.readZLong();
long ledgerId = in.readZLong();
long offset = in.readZLong();
LogSequenceNumber lastSequenceNumber = new LogSequenceNumber(ledgerId, offset);
Transaction t = new Transaction(id, tableSpace, new CommitLogResult(lastSequenceNumber, false, true));
int size = in.readVInt();
for (int i = 0; i < size; i++) {
String table = in.readUTF();
int numRecords = in.readVInt();
Map<Bytes, Record> records = new HashMap<>();
for (int k = 0; k < numRecords; k++) {
byte[] key = in.readArray();
byte[] value = in.readArray();
Bytes bKey = Bytes.from_array(key);
Record record = new Record(bKey, Bytes.from_array(value));
records.put(bKey, record);
}
t.changedRecords.put(table, records);
}
size = in.readVInt();
for (int i = 0; i < size; i++) {
String table = in.readUTF();
int numRecords = in.readVInt();
Map<Bytes, Record> records = new HashMap<>();
for (int k = 0; k < numRecords; k++) {
byte[] key = in.readArray();
byte[] value = in.readArray();
Bytes bKey = Bytes.from_array(key);
Record record = new Record(bKey, Bytes.from_array(value));
records.put(bKey, record);
}
t.newRecords.put(table, records);
}
size = in.readVInt();
for (int i = 0; i < size; i++) {
String table = in.readUTF();
int numRecords = in.readVInt();
Set<Bytes> records = new HashSet<>();
for (int k = 0; k < numRecords; k++) {
byte[] key = in.readArray();
records.add(Bytes.from_array(key));
}
t.deletedRecords.put(table, records);
}
size = in.readVInt();
if (size > 0) {
t.newTables = new HashMap<>();
for (int i = 0; i < size; i++) {
byte[] data = in.readArray();
Table table = Table.deserialize(data);
t.newTables.put(table.name, table);
}
}
size = in.readVInt();
if (size > 0) {
t.droppedTables = new HashSet<>();
for (int i = 0; i < size; i++) {
t.droppedTables.add(in.readUTF());
}
}
size = in.readVInt();
if (size > 0) {
t.newIndexes = new HashMap<>();
for (int i = 0; i < size; i++) {
byte[] data = in.readArray();
Index index = Index.deserialize(data);
t.newIndexes.put(index.name, index);
}
}
size = in.readVInt();
if (size > 0) {
t.droppedIndexes = new HashSet<>();
for (int i = 0; i < size; i++) {
t.droppedIndexes.add(in.readUTF());
}
}
return t;
}
use of herddb.log.CommitLogResult in project herddb by diennea.
the class BookKeeperCommitLogTest method testWriteAsync.
@Test
public void testWriteAsync() throws Exception {
final String tableSpaceUUID = UUID.randomUUID().toString();
final String name = TableSpace.DEFAULT;
final String nodeid = "nodeid";
ServerConfiguration serverConfiguration = newServerConfigurationWithAutoPort();
try (ZookeeperMetadataStorageManager man = new ZookeeperMetadataStorageManager(testEnv.getAddress(), testEnv.getTimeout(), testEnv.getPath());
BookkeeperCommitLogManager logManager = new BookkeeperCommitLogManager(man, serverConfiguration, NullStatsLogger.INSTANCE)) {
man.start();
logManager.start();
CommitLogResult res1;
CommitLogResult res2;
CommitLogResult res3;
try (BookkeeperCommitLog writer = logManager.createCommitLog(tableSpaceUUID, name, nodeid)) {
writer.startWriting(1);
res1 = writer.log(LogEntryFactory.beginTransaction(1), false);
res2 = writer.log(LogEntryFactory.beginTransaction(2), false);
res3 = writer.log(LogEntryFactory.beginTransaction(3), true);
assertTrue(res1.deferred);
assertFalse(res1.sync);
assertTrue(res2.deferred);
assertFalse(res2.sync);
assertFalse(res3.deferred);
assertTrue(res3.sync);
assertNull(res1.getLogSequenceNumber());
assertNull(res2.getLogSequenceNumber());
assertNotNull(res3.getLogSequenceNumber());
}
try (BookkeeperCommitLog reader = logManager.createCommitLog(tableSpaceUUID, name, nodeid)) {
List<Map.Entry<LogSequenceNumber, LogEntry>> list = new ArrayList<>();
reader.recovery(LogSequenceNumber.START_OF_TIME, (lsn, entry) -> {
if (entry.type != LogEntryType.NOOP) {
list.add(new AbstractMap.SimpleImmutableEntry<>(lsn, entry));
}
}, false);
assertEquals(3, list.size());
assertTrue(list.get(0).getKey().after(LogSequenceNumber.START_OF_TIME));
assertTrue(list.get(1).getKey().after(list.get(0).getKey()));
assertTrue(list.get(2).getKey().after(list.get(1).getKey()));
}
}
}
use of herddb.log.CommitLogResult in project herddb by diennea.
the class TableManager method executeDeleteAsync.
private CompletableFuture<StatementExecutionResult> executeDeleteAsync(DeleteStatement delete, Transaction transaction, StatementEvaluationContext context) {
AtomicInteger updateCount = new AtomicInteger();
Holder<Bytes> lastKey = new Holder<>();
Holder<Bytes> lastValue = new Holder<>();
long transactionId = transaction != null ? transaction.transactionId : 0;
Predicate predicate = delete.getPredicate();
List<CompletableFuture<PendingLogEntryWork>> writes = new ArrayList<>();
Map<String, AbstractIndexManager> indexes = tableSpaceManager.getIndexesOnTable(table.name);
ScanStatement scan = new ScanStatement(table.tablespace, table, predicate);
try {
accessTableData(scan, context, new ScanResultOperation() {
@Override
public void accept(Record current, LockHandle lockHandle) throws StatementExecutionException, LogNotAvailableException, DataStorageManagerException {
// ensure we are holding the write locks on every unique index
List<UniqueIndexLockReference> uniqueIndexes = null;
try {
if (indexes != null || childrenTables != null) {
DataAccessor dataAccessor = current.getDataAccessor(table);
if (childrenTables != null) {
for (Table childTable : childrenTables) {
executeForeignKeyConstraintsAsParentTable(childTable, dataAccessor, context, transaction, true);
}
}
if (indexes != null) {
for (AbstractIndexManager index : indexes.values()) {
if (index.isUnique()) {
Bytes indexKey = RecordSerializer.serializeIndexKey(dataAccessor, index.getIndex(), index.getColumnNames());
if (uniqueIndexes == null) {
uniqueIndexes = new ArrayList<>(1);
}
UniqueIndexLockReference uniqueIndexLock = new UniqueIndexLockReference(index, indexKey);
uniqueIndexes.add(uniqueIndexLock);
LockHandle lockForIndex = lockForWrite(uniqueIndexLock.key, transaction, index.getIndexName(), index.getLockManager());
if (transaction == null) {
uniqueIndexLock.lockHandle = lockForIndex;
}
}
}
}
}
} catch (IllegalArgumentException | herddb.utils.IllegalDataAccessException | StatementExecutionException err) {
locksManager.releaseLock(lockHandle);
StatementExecutionException finalError;
if (!(err instanceof StatementExecutionException)) {
finalError = new StatementExecutionException(err.getMessage(), err);
} else {
finalError = (StatementExecutionException) err;
}
CompletableFuture<PendingLogEntryWork> res = Futures.exception(finalError);
if (uniqueIndexes != null) {
for (UniqueIndexLockReference lock : uniqueIndexes) {
res = releaseWriteLock(res, lockHandle, lock.indexManager.getLockManager());
}
}
writes.add(res);
return;
}
LogEntry entry = LogEntryFactory.delete(table, current.key, transaction);
CommitLogResult pos = log.log(entry, entry.transactionId <= 0);
final List<UniqueIndexLockReference> _uniqueIndexes = uniqueIndexes;
writes.add(pos.logSequenceNumber.thenApply(lsn -> new PendingLogEntryWork(entry, pos, lockHandle, _uniqueIndexes)));
lastKey.value = current.key;
lastValue.value = current.value;
updateCount.incrementAndGet();
}
}, transaction, true, true);
} catch (HerdDBInternalException err) {
LOGGER.log(Level.SEVERE, "bad error during a delete", err);
return Futures.exception(err);
}
if (writes.isEmpty()) {
return CompletableFuture.completedFuture(new DMLStatementExecutionResult(transactionId, 0, null, null));
}
if (writes.size() == 1) {
return writes.get(0).whenCompleteAsync((pending, error) -> {
try {
// apply any of the DML operations
if (error == null) {
apply(pending.pos, pending.entry, false);
}
} finally {
releaseMultiplePendingLogEntryWorks(writes);
}
}, tableSpaceManager.getCallbacksExecutor()).thenApply((pending) -> {
return new DMLStatementExecutionResult(transactionId, updateCount.get(), lastKey.value, delete.isReturnValues() ? lastValue.value : null);
});
} else {
return Futures.collect(writes).whenCompleteAsync((pendings, error) -> {
try {
// apply any of the DML operations
if (error == null) {
for (PendingLogEntryWork pending : pendings) {
apply(pending.pos, pending.entry, false);
}
}
} finally {
releaseMultiplePendingLogEntryWorks(writes);
}
}, tableSpaceManager.getCallbacksExecutor()).thenApply((pendings) -> {
return new DMLStatementExecutionResult(transactionId, updateCount.get(), lastKey.value, delete.isReturnValues() ? lastValue.value : null);
});
}
}
use of herddb.log.CommitLogResult in project herddb by diennea.
the class TableSpaceManager method createAndWriteTableCheksum.
// this method return a tableCheckSum object contain scan values (record numbers , table digest,digestType, next autoincrement value, table name, tablespacename, query used for table scan )
public TableChecksum createAndWriteTableCheksum(TableSpaceManager tableSpaceManager, String tableSpaceName, String tableName, StatementEvaluationContext context) throws IOException, DataScannerException {
CommitLogResult pos;
boolean lockAcquired = false;
if (context == null) {
context = StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT();
}
long lockStamp = context.getTableSpaceLock();
LOGGER.log(Level.INFO, "Create and write table {0} checksum in tablespace ", new Object[] { tableName, tableSpaceName });
if (lockStamp == 0) {
lockStamp = acquireWriteLock("checkDataConsistency_" + tableName);
context.setTableSpaceLock(lockStamp);
lockAcquired = true;
}
try {
AbstractTableManager tablemanager = tableSpaceManager.getTableManager(tableName);
if (tableSpaceManager == null) {
throw new TableSpaceDoesNotExistException(String.format("Tablespace %s does not exist.", tableSpaceName));
}
if (tablemanager == null || tablemanager.getCreatedInTransaction() > 0) {
throw new TableDoesNotExistException(String.format("Table %s does not exist.", tablemanager));
}
TableChecksum scanResult = TableDataChecksum.createChecksum(tableSpaceManager.getDbmanager(), null, tableSpaceManager, tableSpaceName, tableName);
byte[] serialize = MAPPER.writeValueAsBytes(scanResult);
Bytes value = Bytes.from_array(serialize);
LogEntry entry = LogEntryFactory.dataConsistency(tableName, value);
pos = log.log(entry, false);
apply(pos, entry, false);
return scanResult;
} finally {
if (lockAcquired) {
releaseWriteLock(context.getTableSpaceLock(), "checkDataConsistency");
context.setTableSpaceLock(0);
}
}
}
Aggregations