use of herddb.storage.DataStorageManagerException in project herddb by diennea.
the class FileDataStorageManager method tableCheckpoint.
@Override
public List<PostCheckpointAction> tableCheckpoint(String tableSpace, String tableName, TableStatus tableStatus, boolean pin) throws DataStorageManagerException {
LogSequenceNumber logPosition = tableStatus.sequenceNumber;
Path dir = getTableDirectory(tableSpace, tableName);
Path checkpointFile = getTableCheckPointsFile(dir, logPosition);
try {
if (Files.isRegularFile(checkpointFile)) {
TableStatus actualStatus = readTableStatusFromFile(checkpointFile);
if (actualStatus != null && actualStatus.equals(tableStatus)) {
LOGGER.log(Level.FINE, "tableCheckpoint " + tableSpace + ", " + tableName + ": " + tableStatus + " (pin:" + pin + ") already saved on file " + checkpointFile);
return Collections.emptyList();
}
}
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
Path parent = getParent(checkpointFile);
Path checkpointFileTemp = parent.resolve(checkpointFile.getFileName() + ".tmp");
LOGGER.log(Level.FINE, "tableCheckpoint " + tableSpace + ", " + tableName + ": " + tableStatus + " (pin:" + pin + ") to file " + checkpointFile);
try (ManagedFile file = ManagedFile.open(checkpointFileTemp, requirefsync);
SimpleBufferedOutputStream buffer = new SimpleBufferedOutputStream(file.getOutputStream(), COPY_BUFFERS_SIZE);
XXHash64Utils.HashingOutputStream oo = new XXHash64Utils.HashingOutputStream(buffer);
ExtendedDataOutputStream dataOutputKeys = new ExtendedDataOutputStream(oo)) {
// version
dataOutputKeys.writeVLong(1);
// flags for future implementations
dataOutputKeys.writeVLong(0);
tableStatus.serialize(dataOutputKeys);
dataOutputKeys.writeLong(oo.hash());
dataOutputKeys.flush();
file.sync();
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
try {
Files.move(checkpointFileTemp, checkpointFile, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.ATOMIC_MOVE);
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
/* Checkpoint pinning */
final Map<Long, Integer> pins = pinTableAndGetPages(tableSpace, tableName, tableStatus, pin);
final Set<LogSequenceNumber> checkpoints = pinTableAndGetCheckpoints(tableSpace, tableName, tableStatus, pin);
long maxPageId = tableStatus.activePages.keySet().stream().max(Comparator.naturalOrder()).orElse(Long.MAX_VALUE);
List<PostCheckpointAction> result = new ArrayList<>();
// we can drop old page files now
List<Path> pageFiles = getTablePageFiles(tableSpace, tableName);
for (Path p : pageFiles) {
long pageId = getPageId(p);
LOGGER.log(Level.FINEST, "checkpoint file {0} pageId {1}", new Object[] { p.toAbsolutePath(), pageId });
if (pageId > 0 && !pins.containsKey(pageId) && !tableStatus.activePages.containsKey(pageId) && pageId < maxPageId) {
LOGGER.log(Level.FINEST, "checkpoint file " + p.toAbsolutePath() + " pageId " + pageId + ". will be deleted after checkpoint end");
result.add(new DeleteFileAction(tableSpace, tableName, "delete page " + pageId + " file " + p.toAbsolutePath(), p));
}
}
try (DirectoryStream<Path> stream = Files.newDirectoryStream(dir)) {
for (Path p : stream) {
if (isTableOrIndexCheckpointsFile(p) && !p.equals(checkpointFile)) {
TableStatus status = readTableStatusFromFile(p);
if (logPosition.after(status.sequenceNumber) && !checkpoints.contains(status.sequenceNumber)) {
LOGGER.log(Level.FINEST, "checkpoint metadata file " + p.toAbsolutePath() + ". will be deleted after checkpoint end");
result.add(new DeleteFileAction(tableSpace, tableName, "delete checkpoint metadata file " + p.toAbsolutePath(), p));
}
}
}
} catch (IOException err) {
LOGGER.log(Level.SEVERE, "Could not list table dir " + dir, err);
}
return result;
}
use of herddb.storage.DataStorageManagerException in project herddb by diennea.
the class FileDataStorageManager method initIndex.
@Override
public void initIndex(String tableSpace, String uuid) throws DataStorageManagerException {
Path indexDir = getIndexDirectory(tableSpace, uuid);
LOGGER.log(Level.FINE, "initIndex {0} {1} at {2}", new Object[] { tableSpace, uuid, indexDir });
try {
Files.createDirectories(indexDir);
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
}
use of herddb.storage.DataStorageManagerException in project herddb by diennea.
the class FileDataStorageManager method rawReadDataPage.
public static List<Record> rawReadDataPage(Path pageFile) throws DataStorageManagerException, IOException {
List<Record> result;
long hashFromFile;
long hashFromDigest;
try (ODirectFileInputStream odirect = new ODirectFileInputStream(pageFile, O_DIRECT_BLOCK_BATCH);
XXHash64Utils.HashingStream hash = new XXHash64Utils.HashingStream(odirect);
ExtendedDataInputStream dataIn = new ExtendedDataInputStream(hash)) {
// version
long version = dataIn.readVLong();
// flags for future implementations
long flags = dataIn.readVLong();
if (version != 1 || flags != 0) {
throw new DataStorageManagerException("corrupted data file " + pageFile.toAbsolutePath());
}
int numRecords = dataIn.readInt();
result = new ArrayList<>(numRecords);
for (int i = 0; i < numRecords; i++) {
Bytes key = dataIn.readBytes();
Bytes value = dataIn.readBytes();
result.add(new Record(key, value));
}
hashFromDigest = hash.hash();
hashFromFile = dataIn.readLong();
}
if (hashFromFile != NO_HASH_PRESENT && hashFromDigest != hashFromFile) {
throw new DataStorageManagerException("Corrupted datafile " + pageFile + ". Bad hash " + hashFromFile + " <> " + hashFromDigest);
}
return result;
}
use of herddb.storage.DataStorageManagerException in project herddb by diennea.
the class FileDataStorageManager method writeIndexPage.
@Override
public void writeIndexPage(String tableSpace, String indexName, long pageId, DataWriter writer) throws DataStorageManagerException {
long _start = System.currentTimeMillis();
Path tableDir = getIndexDirectory(tableSpace, indexName);
Path pageFile = getPageFile(tableDir, pageId);
long size;
try {
if (indexodirect) {
try (ODirectFileOutputStream odirect = new ODirectFileOutputStream(pageFile, O_DIRECT_BLOCK_BATCH, StandardOpenOption.WRITE, StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING)) {
size = writeIndexPage(writer, null, odirect);
}
} else {
try (ManagedFile file = ManagedFile.open(pageFile, requirefsync, StandardOpenOption.WRITE, StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);
SimpleBufferedOutputStream buffer = new SimpleBufferedOutputStream(file.getOutputStream(), COPY_BUFFERS_SIZE)) {
size = writeIndexPage(writer, file, buffer);
}
}
} catch (IOException err) {
LOGGER.log(Level.SEVERE, "Failed to write on path: {0}", pageFile);
Path path = pageFile;
boolean exists;
do {
exists = Files.exists(path);
if (exists) {
LOGGER.log(Level.INFO, "Path {0}: directory {1}, file {2}, link {3}, writable {4}, readable {5}, executable {6}", new Object[] { path, Files.isDirectory(path), Files.isRegularFile(path), Files.isSymbolicLink(path), Files.isWritable(path), Files.isReadable(path), Files.isExecutable(path) });
} else {
LOGGER.log(Level.INFO, "Path {0} doesn't exists", path);
}
path = path.getParent();
} while (path != null && !exists);
throw new DataStorageManagerException(err);
}
long now = System.currentTimeMillis();
long delta = (now - _start);
if (LOGGER.isLoggable(Level.FINER)) {
LOGGER.log(Level.FINER, "writePage {0} KBytes, time {2} ms", new Object[] { (size / 1024) + "", delta + "" });
}
indexPageWrites.registerSuccessfulEvent(delta, TimeUnit.MILLISECONDS);
}
use of herddb.storage.DataStorageManagerException in project herddb by diennea.
the class FileDataStorageManager method readPage.
@Override
public List<Record> readPage(String tableSpace, String tableName, Long pageId) throws DataStorageManagerException {
long _start = System.currentTimeMillis();
Path tableDir = getTableDirectory(tableSpace, tableName);
Path pageFile = getPageFile(tableDir, pageId);
List<Record> result;
try {
if (pageodirect) {
try (ODirectFileInputStream odirect = new ODirectFileInputStream(pageFile, O_DIRECT_BLOCK_BATCH)) {
result = rawReadDataPage(pageFile, odirect);
}
} else {
try (InputStream input = Files.newInputStream(pageFile);
BufferedInputStream buffer = new BufferedInputStream(input, COPY_BUFFERS_SIZE)) {
result = rawReadDataPage(pageFile, buffer);
}
}
} catch (NoSuchFileException nsfe) {
throw new DataPageDoesNotExistException("No such page: " + tableSpace + "_" + tableName + "." + pageId, nsfe);
} catch (IOException err) {
throw new DataStorageManagerException("error reading data page: " + tableSpace + "_" + tableName + "." + pageId, err);
}
long _stop = System.currentTimeMillis();
long delta = _stop - _start;
LOGGER.log(Level.FINE, "readPage {0}.{1} {2} ms", new Object[] { tableSpace, tableName, delta + "" });
dataPageReads.registerSuccessfulEvent(delta, TimeUnit.MILLISECONDS);
return result;
}
Aggregations