use of herddb.utils.ExtendedDataOutputStream in project herddb by diennea.
the class BookKeeperDataStorageManager method indexCheckpoint.
@Override
public List<PostCheckpointAction> indexCheckpoint(String tableSpace, String indexName, IndexStatus indexStatus, boolean pin) throws DataStorageManagerException {
String dir = getIndexDirectory(tableSpace, indexName);
LogSequenceNumber logPosition = indexStatus.sequenceNumber;
String checkpointFile = getCheckPointsFile(dir, logPosition);
Stat stat = new Stat();
byte[] exists = readZNode(checkpointFile, stat);
if (exists != null) {
IndexStatus actualStatus = readIndexStatusFromFile(exists, checkpointFile);
if (actualStatus != null && actualStatus.equals(indexStatus)) {
LOGGER.log(Level.INFO, "indexCheckpoint " + tableSpace + ", " + indexName + ": " + indexStatus + " already saved on" + checkpointFile);
return Collections.emptyList();
}
}
LOGGER.log(Level.FINE, "indexCheckpoint " + tableSpace + ", " + indexName + ": " + indexStatus + " to file " + checkpointFile);
byte[] content;
try (ByteArrayOutputStream buffer = new ByteArrayOutputStream();
XXHash64Utils.HashingOutputStream oo = new XXHash64Utils.HashingOutputStream(buffer);
ExtendedDataOutputStream dataOutputKeys = new ExtendedDataOutputStream(oo)) {
// version
dataOutputKeys.writeVLong(1);
// flags for future implementations
dataOutputKeys.writeVLong(0);
indexStatus.serialize(dataOutputKeys);
dataOutputKeys.writeLong(oo.hash());
dataOutputKeys.flush();
content = buffer.toByteArray();
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
writeZNodeEnforceOwnership(tableSpace, checkpointFile, content, stat);
/* Checkpoint pinning */
final Map<Long, Integer> pins = pinIndexAndGetPages(tableSpace, indexName, indexStatus, pin);
final Set<LogSequenceNumber> checkpoints = pinIndexAndGetCheckpoints(tableSpace, indexName, indexStatus, pin);
long maxPageId = indexStatus.activePages.stream().max(Comparator.naturalOrder()).orElse(Long.MAX_VALUE);
List<PostCheckpointAction> result = new ArrayList<>();
// we can drop old page files now
PagesMapping tableSpacePagesMapping = getTableSpacePagesMapping(tableSpace).getIndexPagesMapping(indexName);
// we can drop old page files now
for (Map.Entry<Long, Long> pages : tableSpacePagesMapping.pages.entrySet()) {
long pageId = pages.getKey();
long ledgerId = pages.getValue();
LOGGER.log(Level.FINEST, "checkpoint pageId {0} ledgerId {1}", new Object[] { pageId, ledgerId });
if (pageId > 0 && !pins.containsKey(pageId) && !indexStatus.activePages.contains(pageId) && pageId < maxPageId) {
LOGGER.log(Level.FINEST, "checkpoint ledger " + ledgerId + " pageId " + pageId + ". will be deleted after checkpoint end");
result.add(new DropLedgerForIndexAction(tableSpace, indexName, "delete index page " + pageId + " ledgerId " + ledgerId, pageId, ledgerId));
}
}
// we can drop orphan ledgers
for (Long ledgerId : tableSpacePagesMapping.oldLedgers) {
LOGGER.log(Level.FINEST, "checkpoint ledger " + ledgerId + " without page. will be deleted after checkpoint end");
result.add(new DropLedgerForIndexAction(tableSpace, indexName, "delete unused ledgerId " + ledgerId, Long.MAX_VALUE, ledgerId));
}
List<String> children = zkGetChildren(dir);
for (String p : children) {
if (isTableOrIndexCheckpointsFile(p) && !p.equals(checkpointFile)) {
IndexStatus status = readIndexStatusFromFile(p);
if (logPosition.after(status.sequenceNumber) && !checkpoints.contains(status.sequenceNumber)) {
LOGGER.log(Level.FINEST, "checkpoint metadata file " + p + ". will be deleted after checkpoint end");
result.add(new DeleteZNodeAction(tableSpace, indexName, "delete checkpoint metadata file " + p, p));
}
}
}
return result;
}
use of herddb.utils.ExtendedDataOutputStream in project herddb by diennea.
the class RecordSerializerTest method varInt.
private static Bytes varInt(int i) throws Exception {
VisibleByteArrayOutputStream res = new VisibleByteArrayOutputStream(1);
ExtendedDataOutputStream oo = new ExtendedDataOutputStream(res);
oo.writeVInt(i);
return Bytes.from_array(res.toByteArrayNoCopy());
}
use of herddb.utils.ExtendedDataOutputStream in project herddb by diennea.
the class FileDataStorageManager method writeTables.
@Override
public Collection<PostCheckpointAction> writeTables(String tableSpace, LogSequenceNumber sequenceNumber, List<Table> tables, List<Index> indexlist) throws DataStorageManagerException {
if (sequenceNumber.isStartOfTime() && !tables.isEmpty()) {
throw new DataStorageManagerException("impossible to write a non empty table list at start-of-time");
}
Path tableSpaceDirectory = getTablespaceDirectory(tableSpace);
try {
Files.createDirectories(tableSpaceDirectory);
Path fileTables = getTablespaceTablesMetadataFile(tableSpace, sequenceNumber);
Path fileIndexes = getTablespaceIndexesMetadataFile(tableSpace, sequenceNumber);
Path parent = getParent(fileTables);
Files.createDirectories(parent);
LOGGER.log(Level.FINE, "writeTables for tableSpace " + tableSpace + " sequenceNumber " + sequenceNumber + " to " + fileTables.toAbsolutePath().toString());
try (ManagedFile file = ManagedFile.open(fileTables);
SimpleBufferedOutputStream buffer = new SimpleBufferedOutputStream(file.getOutputStream(), COPY_BUFFERS_SIZE);
ExtendedDataOutputStream dout = new ExtendedDataOutputStream(buffer)) {
// version
dout.writeVLong(1);
// flags for future implementations
dout.writeVLong(0);
dout.writeUTF(tableSpace);
dout.writeZLong(sequenceNumber.ledgerId);
dout.writeZLong(sequenceNumber.offset);
dout.writeInt(tables.size());
for (Table t : tables) {
byte[] tableSerialized = t.serialize();
dout.writeArray(tableSerialized);
}
dout.flush();
file.sync();
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
try (ManagedFile file = ManagedFile.open(fileIndexes);
SimpleBufferedOutputStream buffer = new SimpleBufferedOutputStream(file.getOutputStream(), COPY_BUFFERS_SIZE);
ExtendedDataOutputStream dout = new ExtendedDataOutputStream(buffer)) {
// version
dout.writeVLong(1);
// flags for future implementations
dout.writeVLong(0);
dout.writeUTF(tableSpace);
dout.writeZLong(sequenceNumber.ledgerId);
dout.writeZLong(sequenceNumber.offset);
if (indexlist != null) {
dout.writeInt(indexlist.size());
for (Index t : indexlist) {
byte[] indexSerialized = t.serialize();
dout.writeArray(indexSerialized);
}
} else {
dout.writeInt(0);
}
dout.flush();
file.sync();
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
Collection<PostCheckpointAction> result = new ArrayList<>();
try (DirectoryStream<Path> stream = Files.newDirectoryStream(tableSpaceDirectory)) {
for (Path p : stream) {
if (isTablespaceIndexesMetadataFile(p)) {
try {
LogSequenceNumber logPositionInFile = readLogSequenceNumberFromIndexMetadataFile(tableSpace, p);
if (sequenceNumber.after(logPositionInFile)) {
LOGGER.log(Level.FINEST, "indexes metadata file " + p.toAbsolutePath() + ". will be deleted after checkpoint end");
result.add(new DeleteFileAction("indexes", "delete indexesmetadata file " + p.toAbsolutePath(), p));
}
} catch (DataStorageManagerException ignore) {
LOGGER.log(Level.SEVERE, "Unparsable indexesmetadata file " + p.toAbsolutePath(), ignore);
result.add(new DeleteFileAction("indexes", "delete unparsable indexesmetadata file " + p.toAbsolutePath(), p));
}
} else if (isTablespaceTablesMetadataFile(p)) {
try {
LogSequenceNumber logPositionInFile = readLogSequenceNumberFromTablesMetadataFile(tableSpace, p);
if (sequenceNumber.after(logPositionInFile)) {
LOGGER.log(Level.FINEST, "tables metadata file " + p.toAbsolutePath() + ". will be deleted after checkpoint end");
result.add(new DeleteFileAction("tables", "delete tablesmetadata file " + p.toAbsolutePath(), p));
}
} catch (DataStorageManagerException ignore) {
LOGGER.log(Level.SEVERE, "Unparsable tablesmetadata file " + p.toAbsolutePath(), ignore);
result.add(new DeleteFileAction("transactions", "delete unparsable tablesmetadata file " + p.toAbsolutePath(), p));
}
}
}
} catch (IOException err) {
LOGGER.log(Level.SEVERE, "Could not list dir " + tableSpaceDirectory, err);
}
return result;
}
use of herddb.utils.ExtendedDataOutputStream in project herddb by diennea.
the class RecordSerializer method serializePrimaryKey.
public static Bytes serializePrimaryKey(DataAccessor record, ColumnsList table, String[] columns) {
ByteArrayOutputStream key = new ByteArrayOutputStream();
String[] primaryKey = table.getPrimaryKey();
if (primaryKey.length == 1) {
String pkColumn = primaryKey[0];
if (columns.length != 1 && !columns[0].equals(pkColumn)) {
throw new IllegalArgumentException("SQLTranslator error, " + Arrays.toString(columns) + " != " + Arrays.asList(pkColumn));
}
Column c = table.getColumn(pkColumn);
Object v = record.get(c.name);
if (v == null) {
throw new IllegalArgumentException("key field " + pkColumn + " cannot be null. Record data: " + record);
}
byte[] fieldValue = serialize(v, c.type);
return new Bytes(fieldValue);
} else {
// beware that we can serialize even only a part of the PK, for instance of a prefix index scan
try (ExtendedDataOutputStream doo_key = new ExtendedDataOutputStream(key)) {
int i = 0;
for (String pkColumn : columns) {
if (!pkColumn.equals(primaryKey[i])) {
throw new IllegalArgumentException("SQLTranslator error, " + Arrays.toString(columns) + " != " + Arrays.asList(primaryKey));
}
Column c = table.getColumn(pkColumn);
Object v = record.get(c.name);
if (v == null) {
throw new IllegalArgumentException("key field " + pkColumn + " cannot be null. Record data: " + record);
}
byte[] fieldValue = serialize(v, c.type);
doo_key.writeArray(fieldValue);
i++;
}
} catch (IOException err) {
throw new RuntimeException(err);
}
return new Bytes(key.toByteArray());
}
}
use of herddb.utils.ExtendedDataOutputStream in project herddb by diennea.
the class FileDataStorageManager method tableCheckpoint.
@Override
public List<PostCheckpointAction> tableCheckpoint(String tableSpace, String tableName, TableStatus tableStatus, boolean pin) throws DataStorageManagerException {
LogSequenceNumber logPosition = tableStatus.sequenceNumber;
Path dir = getTableDirectory(tableSpace, tableName);
Path checkpointFile = getTableCheckPointsFile(dir, logPosition);
try {
if (Files.isRegularFile(checkpointFile)) {
TableStatus actualStatus = readTableStatusFromFile(checkpointFile);
if (actualStatus != null && actualStatus.equals(tableStatus)) {
LOGGER.log(Level.FINE, "tableCheckpoint " + tableSpace + ", " + tableName + ": " + tableStatus + " (pin:" + pin + ") already saved on file " + checkpointFile);
return Collections.emptyList();
}
}
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
Path parent = getParent(checkpointFile);
Path checkpointFileTemp = parent.resolve(checkpointFile.getFileName() + ".tmp");
LOGGER.log(Level.FINE, "tableCheckpoint " + tableSpace + ", " + tableName + ": " + tableStatus + " (pin:" + pin + ") to file " + checkpointFile);
try (ManagedFile file = ManagedFile.open(checkpointFileTemp, requirefsync);
SimpleBufferedOutputStream buffer = new SimpleBufferedOutputStream(file.getOutputStream(), COPY_BUFFERS_SIZE);
XXHash64Utils.HashingOutputStream oo = new XXHash64Utils.HashingOutputStream(buffer);
ExtendedDataOutputStream dataOutputKeys = new ExtendedDataOutputStream(oo)) {
// version
dataOutputKeys.writeVLong(1);
// flags for future implementations
dataOutputKeys.writeVLong(0);
tableStatus.serialize(dataOutputKeys);
dataOutputKeys.writeLong(oo.hash());
dataOutputKeys.flush();
file.sync();
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
try {
Files.move(checkpointFileTemp, checkpointFile, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.ATOMIC_MOVE);
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
/* Checkpoint pinning */
final Map<Long, Integer> pins = pinTableAndGetPages(tableSpace, tableName, tableStatus, pin);
final Set<LogSequenceNumber> checkpoints = pinTableAndGetCheckpoints(tableSpace, tableName, tableStatus, pin);
long maxPageId = tableStatus.activePages.keySet().stream().max(Comparator.naturalOrder()).orElse(Long.MAX_VALUE);
List<PostCheckpointAction> result = new ArrayList<>();
// we can drop old page files now
List<Path> pageFiles = getTablePageFiles(tableSpace, tableName);
for (Path p : pageFiles) {
long pageId = getPageId(p);
LOGGER.log(Level.FINEST, "checkpoint file {0} pageId {1}", new Object[] { p.toAbsolutePath(), pageId });
if (pageId > 0 && !pins.containsKey(pageId) && !tableStatus.activePages.containsKey(pageId) && pageId < maxPageId) {
LOGGER.log(Level.FINEST, "checkpoint file " + p.toAbsolutePath() + " pageId " + pageId + ". will be deleted after checkpoint end");
result.add(new DeleteFileAction(tableSpace, tableName, "delete page " + pageId + " file " + p.toAbsolutePath(), p));
}
}
try (DirectoryStream<Path> stream = Files.newDirectoryStream(dir)) {
for (Path p : stream) {
if (isTableOrIndexCheckpointsFile(p) && !p.equals(checkpointFile)) {
TableStatus status = readTableStatusFromFile(p);
if (logPosition.after(status.sequenceNumber) && !checkpoints.contains(status.sequenceNumber)) {
LOGGER.log(Level.FINEST, "checkpoint metadata file " + p.toAbsolutePath() + ". will be deleted after checkpoint end");
result.add(new DeleteFileAction(tableSpace, tableName, "delete checkpoint metadata file " + p.toAbsolutePath(), p));
}
}
}
} catch (IOException err) {
LOGGER.log(Level.SEVERE, "Could not list table dir " + dir, err);
}
return result;
}
Aggregations