use of herddb.log.LogSequenceNumber in project herddb by diennea.
the class FileDataStorageManager method indexCheckpoint.
@Override
public List<PostCheckpointAction> indexCheckpoint(String tableSpace, String indexName, IndexStatus indexStatus, boolean pin) throws DataStorageManagerException {
Path dir = getIndexDirectory(tableSpace, indexName);
LogSequenceNumber logPosition = indexStatus.sequenceNumber;
Path checkpointFile = getTableCheckPointsFile(dir, logPosition);
Path parent = getParent(checkpointFile);
Path checkpointFileTemp = parent.resolve(checkpointFile.getFileName() + ".tmp");
if (Files.isRegularFile(checkpointFile)) {
IndexStatus actualStatus = readIndexStatusFromFile(checkpointFile);
if (actualStatus != null && actualStatus.equals(indexStatus)) {
LOGGER.log(Level.INFO, "indexCheckpoint " + tableSpace + ", " + indexName + ": " + indexStatus + " already saved on" + checkpointFile);
return Collections.emptyList();
}
}
LOGGER.log(Level.FINE, "indexCheckpoint " + tableSpace + ", " + indexName + ": " + indexStatus + " to file " + checkpointFile);
try (ManagedFile file = ManagedFile.open(checkpointFileTemp, requirefsync);
SimpleBufferedOutputStream buffer = new SimpleBufferedOutputStream(file.getOutputStream(), COPY_BUFFERS_SIZE);
XXHash64Utils.HashingOutputStream oo = new XXHash64Utils.HashingOutputStream(buffer);
ExtendedDataOutputStream dataOutputKeys = new ExtendedDataOutputStream(oo)) {
// version
dataOutputKeys.writeVLong(1);
// flags for future implementations
dataOutputKeys.writeVLong(0);
indexStatus.serialize(dataOutputKeys);
dataOutputKeys.writeLong(oo.hash());
dataOutputKeys.flush();
file.sync();
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
try {
Files.move(checkpointFileTemp, checkpointFile, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.ATOMIC_MOVE);
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
/* Checkpoint pinning */
final Map<Long, Integer> pins = pinIndexAndGetPages(tableSpace, indexName, indexStatus, pin);
final Set<LogSequenceNumber> checkpoints = pinIndexAndGetCheckpoints(tableSpace, indexName, indexStatus, pin);
long maxPageId = indexStatus.activePages.stream().max(Comparator.naturalOrder()).orElse(Long.MAX_VALUE);
List<PostCheckpointAction> result = new ArrayList<>();
// we can drop old page files now
List<Path> pageFiles = getIndexPageFiles(tableSpace, indexName);
for (Path p : pageFiles) {
long pageId = getPageId(p);
LOGGER.log(Level.FINEST, "checkpoint file {0} pageId {1}", new Object[] { p.toAbsolutePath(), pageId });
if (pageId > 0 && !pins.containsKey(pageId) && !indexStatus.activePages.contains(pageId) && pageId < maxPageId) {
LOGGER.log(Level.FINEST, "checkpoint file " + p.toAbsolutePath() + " pageId " + pageId + ". will be deleted after checkpoint end");
result.add(new DeleteFileAction(tableSpace, indexName, "delete page " + pageId + " file " + p.toAbsolutePath(), p));
}
}
try (DirectoryStream<Path> stream = Files.newDirectoryStream(dir)) {
for (Path p : stream) {
if (isTableOrIndexCheckpointsFile(p) && !p.equals(checkpointFile)) {
IndexStatus status = readIndexStatusFromFile(p);
if (logPosition.after(status.sequenceNumber) && !checkpoints.contains(status.sequenceNumber)) {
LOGGER.log(Level.FINEST, "checkpoint metadata file " + p.toAbsolutePath() + ". will be deleted after checkpoint end");
result.add(new DeleteFileAction(tableSpace, indexName, "delete checkpoint metadata file " + p.toAbsolutePath(), p));
}
}
}
} catch (IOException err) {
LOGGER.log(Level.SEVERE, "Could not list indexName dir " + dir, err);
}
return result;
}
use of herddb.log.LogSequenceNumber in project herddb by diennea.
the class FileDataStorageManager method readLogSequenceNumberFromTablesMetadataFile.
private static LogSequenceNumber readLogSequenceNumberFromTablesMetadataFile(String tableSpace, Path file) throws DataStorageManagerException {
try (InputStream input = new BufferedInputStream(Files.newInputStream(file, StandardOpenOption.READ), 4 * 1024 * 1024);
ExtendedDataInputStream din = new ExtendedDataInputStream(input)) {
// version
long version = din.readVLong();
// flags for future implementations
long flags = din.readVLong();
if (version != 1 || flags != 0) {
throw new DataStorageManagerException("corrupted table list file " + file.toAbsolutePath());
}
String readname = din.readUTF();
if (!readname.equals(tableSpace)) {
throw new DataStorageManagerException("file " + file.toAbsolutePath() + " is not for spablespace " + tableSpace);
}
long ledgerId = din.readZLong();
long offset = din.readZLong();
return new LogSequenceNumber(ledgerId, offset);
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
}
use of herddb.log.LogSequenceNumber in project herddb by diennea.
the class MemoryDataStorageManager method indexCheckpoint.
@Override
public List<PostCheckpointAction> indexCheckpoint(String tableSpace, String indexName, IndexStatus indexStatus, boolean pin) throws DataStorageManagerException {
/* Checkpoint pinning */
final Map<Long, Integer> pins = pinIndexAndGetPages(tableSpace, indexName, indexStatus, pin);
final Set<LogSequenceNumber> checkpoints = pinIndexAndGetCheckpoints(tableSpace, indexName, indexStatus, pin);
List<Long> pagesForIndex = new ArrayList<>();
String prefix = tableSpace + "." + indexName + "_";
for (String key : indexpages.keySet()) {
if (key.startsWith(prefix)) {
long pageId = Long.parseLong(key.substring(prefix.length()));
if (!pins.containsKey(pageId)) {
pagesForIndex.add(pageId);
}
}
}
pagesForIndex.removeAll(indexStatus.activePages);
List<PostCheckpointAction> result = new ArrayList<>();
for (long pageId : pagesForIndex) {
result.add(new PostCheckpointAction(tableSpace, indexName, "drop page " + pageId) {
@Override
public void run() {
// remove only after checkpoint completed
indexpages.remove(prefix + pageId);
}
});
}
for (String oldStatus : indexStatuses.keySet()) {
if (oldStatus.startsWith(prefix)) {
/* Check for checkpoint skip only if match expected structure */
final LogSequenceNumber log = evaluateLogSequenceNumber(prefix);
if (log != null) {
/* If is pinned skip this status*/
if (checkpoints.contains(log)) {
continue;
}
}
result.add(new PostCheckpointAction(tableSpace, indexName, "drop index checkpoint " + oldStatus) {
@Override
public void run() {
// remove only after checkpoint completed
indexStatuses.remove(oldStatus);
}
});
}
}
VisibleByteArrayOutputStream oo = new VisibleByteArrayOutputStream(1024);
try (ExtendedDataOutputStream dataOutputKeys = new ExtendedDataOutputStream(oo)) {
indexStatus.serialize(dataOutputKeys);
dataOutputKeys.flush();
oo.write(oo.xxhash64());
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
/* Uses a copy to limit byte[] size at the min needed */
indexStatuses.put(checkpointName(tableSpace, indexName, indexStatus.sequenceNumber), oo.toByteArray());
return result;
}
use of herddb.log.LogSequenceNumber in project herddb by diennea.
the class MemoryDataStorageManager method tableCheckpoint.
@Override
public List<PostCheckpointAction> tableCheckpoint(String tableSpace, String tableName, TableStatus tableStatus, boolean pin) throws DataStorageManagerException {
/* Checkpoint pinning */
final Map<Long, Integer> pins = pinTableAndGetPages(tableSpace, tableName, tableStatus, pin);
final Set<LogSequenceNumber> checkpoints = pinTableAndGetCheckpoints(tableSpace, tableName, tableStatus, pin);
List<Long> pagesForTable = new ArrayList<>();
String prefix = tableSpace + "." + tableName + "_";
for (String key : pages.keySet()) {
if (key.startsWith(prefix)) {
long pageId = Long.parseLong(key.substring(prefix.length()));
if (!pins.containsKey(pageId)) {
pagesForTable.add(pageId);
}
}
}
pagesForTable.removeAll(tableStatus.activePages.keySet());
List<PostCheckpointAction> result = new ArrayList<>();
for (long pageId : pagesForTable) {
result.add(new PostCheckpointAction(tableSpace, tableName, "drop page " + pageId) {
@Override
public void run() {
// remove only after checkpoint completed
pages.remove(prefix + pageId);
LOGGER.log(Level.SEVERE, "removing " + (prefix + pageId));
}
});
}
for (String oldStatus : tableStatuses.keySet()) {
if (oldStatus.startsWith(prefix)) {
/* Check for checkpoint skip only if match expected structure */
final LogSequenceNumber log = evaluateLogSequenceNumber(prefix);
if (log != null) {
/* If is pinned skip this status*/
if (checkpoints.contains(log)) {
continue;
}
}
result.add(new PostCheckpointAction(tableSpace, tableName, "drop table checkpoint " + oldStatus) {
@Override
public void run() {
// remove only after checkpoint completed
tableStatuses.remove(oldStatus);
}
});
}
}
VisibleByteArrayOutputStream oo = new VisibleByteArrayOutputStream(1024);
try (ExtendedDataOutputStream dataOutputKeys = new ExtendedDataOutputStream(oo)) {
tableStatus.serialize(dataOutputKeys);
dataOutputKeys.flush();
oo.write(oo.xxhash64());
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
/* Uses a copy to limit byte[] size at the min needed */
tableStatuses.put(checkpointName(tableSpace, tableName, tableStatus.sequenceNumber), oo.toByteArray());
return result;
}
use of herddb.log.LogSequenceNumber in project herddb by diennea.
the class RoutedClientSideConnection method requestReceived.
@Override
@SuppressFBWarnings(value = "SF_SWITCH_NO_DEFAULT")
@SuppressWarnings("empty-statement")
public void requestReceived(Pdu message, Channel channel) {
try {
switch(message.type) {
case Pdu.TYPE_TABLESPACE_DUMP_DATA:
{
String dumpId = PduCodec.TablespaceDumpData.readDumpId(message);
TableSpaceDumpReceiver receiver = dumpReceivers.get(dumpId);
LOGGER.log(Level.FINE, "receiver for {0}: {1}", new Object[] { dumpId, receiver });
if (receiver == null) {
if (channel != null) {
ByteBuf resp = PduCodec.ErrorResponse.write(message.messageId, "no such dump receiver " + dumpId);
channel.sendReplyMessage(message.messageId, resp);
}
return;
}
try {
String command = PduCodec.TablespaceDumpData.readCommand(message);
boolean sendAck = true;
switch(command) {
case "start":
{
long ledgerId = PduCodec.TablespaceDumpData.readLedgerId(message);
long offset = PduCodec.TablespaceDumpData.readOffset(message);
receiver.start(new LogSequenceNumber(ledgerId, offset));
break;
}
case "beginTable":
{
byte[] tableDefinition = PduCodec.TablespaceDumpData.readTableDefinition(message);
Table table = Table.deserialize(tableDefinition);
long estimatedSize = PduCodec.TablespaceDumpData.readEstimatedSize(message);
long dumpLedgerId = PduCodec.TablespaceDumpData.readLedgerId(message);
long dumpOffset = PduCodec.TablespaceDumpData.readOffset(message);
List<byte[]> indexesDef = PduCodec.TablespaceDumpData.readIndexesDefinition(message);
List<Index> indexes = indexesDef.stream().map(Index::deserialize).collect(Collectors.toList());
Map<String, Object> stats = new HashMap<>();
stats.put("estimatedSize", estimatedSize);
stats.put("dumpLedgerId", dumpLedgerId);
stats.put("dumpOffset", dumpOffset);
receiver.beginTable(new DumpedTableMetadata(table, new LogSequenceNumber(dumpLedgerId, dumpOffset), indexes), stats);
break;
}
case "endTable":
{
receiver.endTable();
break;
}
case "finish":
{
long ledgerId = PduCodec.TablespaceDumpData.readLedgerId(message);
long offset = PduCodec.TablespaceDumpData.readOffset(message);
receiver.finish(new LogSequenceNumber(ledgerId, offset));
sendAck = false;
break;
}
case "data":
{
List<Record> records = new ArrayList<>();
PduCodec.TablespaceDumpData.readRecords(message, (key, value) -> {
records.add(new Record(Bytes.from_array(key), Bytes.from_array(value)));
});
receiver.receiveTableDataChunk(records);
break;
}
case "txlog":
{
List<DumpedLogEntry> records = new ArrayList<>();
PduCodec.TablespaceDumpData.readRecords(message, (key, value) -> {
records.add(new DumpedLogEntry(LogSequenceNumber.deserialize(key), value));
});
receiver.receiveTransactionLogChunk(records);
break;
}
case "transactions":
{
List<Transaction> transactions = new ArrayList<>();
PduCodec.TablespaceDumpData.readRecords(message, (key, value) -> {
transactions.add(Transaction.deserialize(null, value));
});
receiver.receiveTransactionsAtDump(transactions);
break;
}
default:
throw new DataStorageManagerException("invalid dump command:" + command);
}
if (channel != null && sendAck) {
ByteBuf res = PduCodec.AckResponse.write(message.messageId);
channel.sendReplyMessage(message.messageId, res);
}
} catch (RuntimeException error) {
LOGGER.log(Level.SEVERE, "error while handling dump data", error);
receiver.onError(error);
if (channel != null) {
ByteBuf res = PduCodec.ErrorResponse.write(message.messageId, error);
channel.sendReplyMessage(message.messageId, res);
}
}
}
break;
}
} finally {
message.close();
}
}
Aggregations