use of herddb.storage.IndexStatus in project herddb by diennea.
the class MemoryHashIndexManager method start.
@Override
public void start(LogSequenceNumber sequenceNumber) throws DataStorageManagerException {
LOGGER.log(Level.SEVERE, "loading in memory all the keys for mem index {0}", new Object[] { index.name });
bootSequenceNumber = sequenceNumber;
if (LogSequenceNumber.START_OF_TIME.equals(sequenceNumber)) {
/* Empty index (booting from the start) */
LOGGER.log(Level.SEVERE, "loaded empty index {0}", new Object[] { index.name });
} else {
IndexStatus status;
try {
status = dataStorageManager.getIndexStatus(tableSpaceUUID, index.uuid, sequenceNumber);
} catch (DataStorageManagerException e) {
LOGGER.log(Level.SEVERE, "cannot load index {0} due to {1}, it will be rebuilt", new Object[] { index.name, e });
rebuild();
return;
}
for (long pageId : status.activePages) {
LOGGER.log(Level.SEVERE, "recovery index " + index.name + ", load " + pageId);
Map<Bytes, List<Bytes>> read = dataStorageManager.readIndexPage(tableSpaceUUID, index.uuid, pageId, in -> {
Map<Bytes, List<Bytes>> deserialized = new HashMap<>();
// version
long version = in.readVLong();
// flags for future implementations
long flags = in.readVLong();
if (version != 1 || flags != 0) {
throw new DataStorageManagerException("corrupted index page");
}
int size = in.readVInt();
for (int i = 0; i < size; i++) {
byte[] indexKey = in.readArray();
int entrySize = in.readVInt();
List<Bytes> value = new ArrayList<>(entrySize);
for (int kk = 0; kk < entrySize; kk++) {
byte[] tableKey = in.readArray();
value.add(Bytes.from_array(tableKey));
}
deserialized.put(Bytes.from_array(indexKey), value);
}
return deserialized;
});
data.putAll(read);
}
newPageId.set(status.newPageId);
LOGGER.log(Level.SEVERE, "loaded {0} keys for index {1}", new Object[] { data.size(), index.name });
}
}
use of herddb.storage.IndexStatus in project herddb by diennea.
the class FileDataStorageManager method indexCheckpoint.
@Override
public List<PostCheckpointAction> indexCheckpoint(String tableSpace, String indexName, IndexStatus indexStatus, boolean pin) throws DataStorageManagerException {
Path dir = getIndexDirectory(tableSpace, indexName);
LogSequenceNumber logPosition = indexStatus.sequenceNumber;
Path checkpointFile = getTableCheckPointsFile(dir, logPosition);
Path parent = getParent(checkpointFile);
Path checkpointFileTemp = parent.resolve(checkpointFile.getFileName() + ".tmp");
try {
Files.createDirectories(dir);
if (Files.isRegularFile(checkpointFile)) {
IndexStatus actualStatus = readIndexStatusFromFile(checkpointFile);
if (actualStatus != null && actualStatus.equals(indexStatus)) {
LOGGER.log(Level.SEVERE, "indexCheckpoint " + tableSpace + ", " + indexName + ": " + indexStatus + " already saved on" + checkpointFile);
return Collections.emptyList();
}
}
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
LOGGER.log(Level.FINE, "indexCheckpoint " + tableSpace + ", " + indexName + ": " + indexStatus + " to file " + checkpointFile);
try (ManagedFile file = ManagedFile.open(checkpointFileTemp);
SimpleBufferedOutputStream buffer = new SimpleBufferedOutputStream(file.getOutputStream(), COPY_BUFFERS_SIZE);
XXHash64Utils.HashingOutputStream oo = new XXHash64Utils.HashingOutputStream(buffer);
ExtendedDataOutputStream dataOutputKeys = new ExtendedDataOutputStream(oo)) {
// version
dataOutputKeys.writeVLong(1);
// flags for future implementations
dataOutputKeys.writeVLong(0);
indexStatus.serialize(dataOutputKeys);
dataOutputKeys.writeLong(oo.hash());
dataOutputKeys.flush();
file.sync();
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
try {
Files.move(checkpointFileTemp, checkpointFile, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.ATOMIC_MOVE);
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
/* Checkpoint pinning */
final Map<Long, Integer> pins = pinIndexAndGetPages(tableSpace, indexName, indexStatus, pin);
final Set<LogSequenceNumber> checkpoints = pinIndexAndGetCheckpoints(tableSpace, indexName, indexStatus, pin);
long maxPageId = indexStatus.activePages.stream().max(Comparator.naturalOrder()).orElse(Long.MAX_VALUE);
List<PostCheckpointAction> result = new ArrayList<>();
// we can drop old page files now
List<Path> pageFiles = getIndexPageFiles(tableSpace, indexName);
for (Path p : pageFiles) {
long pageId = getPageId(p);
LOGGER.log(Level.FINEST, "checkpoint file {0} pageId {1}", new Object[] { p.toAbsolutePath(), pageId });
if (pageId > 0 && !pins.containsKey(pageId) && !indexStatus.activePages.contains(pageId) && pageId < maxPageId) {
LOGGER.log(Level.FINEST, "checkpoint file " + p.toAbsolutePath() + " pageId " + pageId + ". will be deleted after checkpoint end");
result.add(new DeleteFileAction(indexName, "delete page " + pageId + " file " + p.toAbsolutePath(), p));
}
}
try (DirectoryStream<Path> stream = Files.newDirectoryStream(dir)) {
for (Path p : stream) {
if (isTableOrIndexCheckpointsFile(p) && !p.equals(checkpointFile)) {
IndexStatus status = readIndexStatusFromFile(p);
if (logPosition.after(status.sequenceNumber) && !checkpoints.contains(status.sequenceNumber)) {
LOGGER.log(Level.FINEST, "checkpoint metadata file " + p.toAbsolutePath() + ". will be deleted after checkpoint end");
result.add(new DeleteFileAction(indexName, "delete checkpoint metadata file " + p.toAbsolutePath(), p));
}
}
}
} catch (IOException err) {
LOGGER.log(Level.SEVERE, "Could not list indexName dir " + dir, err);
}
return result;
}
use of herddb.storage.IndexStatus in project herddb by diennea.
the class MemoryHashIndexManager method checkpoint.
@Override
public List<PostCheckpointAction> checkpoint(LogSequenceNumber sequenceNumber, boolean pin) throws DataStorageManagerException {
if (createdInTransaction > 0) {
LOGGER.log(Level.SEVERE, "checkpoint for index " + index.name + " skipped, this index is created on transaction " + createdInTransaction + " which is not committed");
return Collections.emptyList();
}
List<PostCheckpointAction> result = new ArrayList<>();
LOGGER.log(Level.SEVERE, "flush index {0}", new Object[] { index.name });
long pageId = newPageId.getAndIncrement();
Holder<Long> count = new Holder<>();
dataStorageManager.writeIndexPage(tableSpaceUUID, index.uuid, pageId, (out) -> {
long entries = 0;
// version
out.writeVLong(1);
// flags for future implementations
out.writeVLong(0);
out.writeVInt(data.size());
for (Map.Entry<Bytes, List<Bytes>> entry : data.entrySet()) {
out.writeArray(entry.getKey().data);
List<Bytes> entrydata = entry.getValue();
out.writeVInt(entrydata.size());
for (Bytes v : entrydata) {
out.writeArray(v.data);
++entries;
}
}
count.value = entries;
});
IndexStatus indexStatus = new IndexStatus(index.name, sequenceNumber, newPageId.get(), Collections.singleton(pageId), null);
result.addAll(dataStorageManager.indexCheckpoint(tableSpaceUUID, index.uuid, indexStatus, pin));
LOGGER.log(Level.INFO, "checkpoint index {0} finished: logpos {1}, {2} entries, page {3}", new Object[] { index.name, sequenceNumber, Long.toString(count.value), Long.toString(pageId) });
return result;
}
use of herddb.storage.IndexStatus in project herddb by diennea.
the class BLinkKeyToPageIndex method checkpoint.
@Override
public List<PostCheckpointAction> checkpoint(LogSequenceNumber sequenceNumber, boolean pin) throws DataStorageManagerException {
try {
/* Tree can be null if no data was inserted (tree creation deferred to check evaluate key size) */
final BLink<Bytes, Long> tree = this.tree;
if (tree == null) {
return Collections.emptyList();
}
BLinkMetadata<Bytes> metadata = getTree().checkpoint();
byte[] metaPage = MetadataSerializer.INSTANCE.write(metadata);
Set<Long> activePages = new HashSet<>();
metadata.nodes.forEach(node -> activePages.add(node.storeId));
IndexStatus indexStatus = new IndexStatus(indexName, sequenceNumber, newPageId.get(), activePages, metaPage);
List<PostCheckpointAction> result = new ArrayList<>();
result.addAll(dataStorageManager.indexCheckpoint(tableSpace, indexName, indexStatus, pin));
LOGGER.log(Level.INFO, "checkpoint index {0} finished: logpos {1}, {2} pages", new Object[] { indexName, sequenceNumber, Integer.toString(metadata.nodes.size()) });
LOGGER.log(Level.FINE, "checkpoint index {0} finished: logpos {1}, pages {2}", new Object[] { indexName, sequenceNumber, activePages.toString() });
return result;
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
}
use of herddb.storage.IndexStatus in project herddb by diennea.
the class BLinkKeyToPageIndex method start.
@Override
public void start(LogSequenceNumber sequenceNumber) throws DataStorageManagerException {
LOGGER.log(Level.SEVERE, " start index {0}", new Object[] { indexName });
/* Actually the same size */
final long pageSize = memoryManager.getMaxLogicalPageSize();
if (LogSequenceNumber.START_OF_TIME.equals(sequenceNumber)) {
/* Empty index (booting from the start) */
tree = new BLink<>(pageSize, SizeEvaluatorImpl.INSTANCE, memoryManager.getPKPageReplacementPolicy(), indexDataStorage);
LOGGER.log(Level.SEVERE, "loaded empty index {0}", new Object[] { indexName });
} else {
IndexStatus status = dataStorageManager.getIndexStatus(tableSpace, indexName, sequenceNumber);
try {
BLinkMetadata<Bytes> metadata = MetadataSerializer.INSTANCE.read(status.indexData);
tree = new BLink<>(pageSize, SizeEvaluatorImpl.INSTANCE, memoryManager.getPKPageReplacementPolicy(), indexDataStorage, metadata);
} catch (IOException e) {
throw new DataStorageManagerException(e);
}
newPageId.set(status.newPageId);
LOGGER.log(Level.SEVERE, "loaded index {0}: {1} keys", new Object[] { indexName, tree.size() });
}
}
Aggregations