use of herddb.storage.DataStorageManagerException in project herddb by diennea.
the class FileDataStorageManager method truncateIndex.
@Override
public void truncateIndex(String tablespace, String name) throws DataStorageManagerException {
Path tableDir = getIndexDirectory(tablespace, name);
LOGGER.log(Level.INFO, "truncateIndex {0}.{1} in {2}", new Object[] { tablespace, name, tableDir });
try {
cleanDirectory(tableDir);
} catch (IOException ex) {
throw new DataStorageManagerException(ex);
}
}
use of herddb.storage.DataStorageManagerException in project herddb by diennea.
the class FileDataStorageManager method getLatestTableStatus.
@Override
public TableStatus getLatestTableStatus(String tableSpace, String tableName) throws DataStorageManagerException {
try {
Path lastFile = getLastTableCheckpointFile(tableSpace, tableName);
TableStatus latestStatus;
if (lastFile == null) {
latestStatus = TableStatus.buildTableStatusForNewCreatedTable(tableName);
} else {
latestStatus = readTableStatusFromFile(lastFile);
}
return latestStatus;
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
}
use of herddb.storage.DataStorageManagerException in project herddb by diennea.
the class FileDataStorageManager method readIndexPage.
private <X> X readIndexPage(DataReader<X> reader, Path pageFile, InputStream stream) throws IOException, DataStorageManagerException {
int size = (int) Files.size(pageFile);
byte[] dataPage = new byte[size];
int read = stream.read(dataPage);
if (read != size) {
throw new IOException("short read, read " + read + " instead of " + size + " bytes from " + pageFile);
}
try (ByteArrayCursor dataIn = ByteArrayCursor.wrap(dataPage)) {
/*
* When writing with O_DIRECT this stream will be zero padded at the end. It isn't a problem: reader
* must already handle his stop condition without reading file till the end because it contains an
* hash after data end that must not be read by the reader.
*/
// version
long version = dataIn.readVLong();
// flags for future implementations
long flags = dataIn.readVLong();
if (version != 1 || flags != 0) {
throw new DataStorageManagerException("corrupted data file " + pageFile.toAbsolutePath());
}
X result = reader.read(dataIn);
int pos = dataIn.getPosition();
long hashFromFile = dataIn.readLong();
if (hashChecksEnabled && hashFromFile != NO_HASH_PRESENT) {
// after the hash we will have zeroes or garbage
// the hash is not at the end of file, but after data
long hashFromDigest = XXHash64Utils.hash(dataPage, 0, pos);
if (hashFromDigest != hashFromFile) {
throw new DataStorageManagerException("Corrupted datafile " + pageFile + ". Bad hash " + hashFromFile + " <> " + hashFromDigest);
}
}
return result;
}
}
use of herddb.storage.DataStorageManagerException in project herddb by diennea.
the class FileDataStorageManager method writeTransactionsAtCheckpoint.
@Override
public Collection<PostCheckpointAction> writeTransactionsAtCheckpoint(String tableSpace, LogSequenceNumber sequenceNumber, Collection<Transaction> transactions) throws DataStorageManagerException {
if (sequenceNumber.isStartOfTime() && !transactions.isEmpty()) {
throw new DataStorageManagerException("impossible to write a non empty transactions list at start-of-time");
}
Path tableSpaceDirectory = getTablespaceDirectory(tableSpace);
try {
Files.createDirectories(tableSpaceDirectory);
Path checkPointFile = getTablespaceTransactionsFile(tableSpace, sequenceNumber);
Path parent = getParent(checkPointFile);
Files.createDirectories(parent);
Path checkpointFileTemp = parent.resolve(checkPointFile.getFileName() + ".tmp");
LOGGER.log(Level.FINE, "writeTransactionsAtCheckpoint for tableSpace {0} sequenceNumber {1} to {2}, active transactions {3}", new Object[] { tableSpace, sequenceNumber, checkPointFile.toAbsolutePath().toString(), transactions.size() });
try (ManagedFile file = ManagedFile.open(checkpointFileTemp, requirefsync);
SimpleBufferedOutputStream buffer = new SimpleBufferedOutputStream(file.getOutputStream(), COPY_BUFFERS_SIZE);
ExtendedDataOutputStream dout = new ExtendedDataOutputStream(buffer)) {
// version
dout.writeVLong(1);
// flags for future implementations
dout.writeVLong(0);
dout.writeUTF(tableSpace);
dout.writeZLong(sequenceNumber.ledgerId);
dout.writeZLong(sequenceNumber.offset);
dout.writeInt(transactions.size());
for (Transaction t : transactions) {
t.serialize(dout);
}
dout.flush();
file.sync();
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
try {
Files.move(checkpointFileTemp, checkPointFile, StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING);
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
Collection<PostCheckpointAction> result = new ArrayList<>();
try (DirectoryStream<Path> stream = Files.newDirectoryStream(tableSpaceDirectory)) {
for (Path p : stream) {
if (isTransactionsFile(p)) {
try {
LogSequenceNumber logPositionInFile = readLogSequenceNumberFromTransactionsFile(tableSpace, p);
if (sequenceNumber.after(logPositionInFile)) {
LOGGER.log(Level.FINEST, "transactions metadata file " + p.toAbsolutePath() + ". will be deleted after checkpoint end");
result.add(new DeleteFileAction(tableSpace, "transactions", "delete transactions file " + p.toAbsolutePath(), p));
}
} catch (DataStorageManagerException ignore) {
LOGGER.log(Level.SEVERE, "Unparsable transactions file " + p.toAbsolutePath(), ignore);
result.add(new DeleteFileAction(tableSpace, "transactions", "delete unparsable transactions file " + p.toAbsolutePath(), p));
}
}
}
} catch (IOException err) {
LOGGER.log(Level.SEVERE, "Could not list dir " + tableSpaceDirectory, err);
}
return result;
}
use of herddb.storage.DataStorageManagerException in project herddb by diennea.
the class ConcurrentMapKeyToPageIndex method scanner.
@Override
public Stream<Map.Entry<Bytes, Long>> scanner(IndexOperation operation, StatementEvaluationContext context, TableContext tableContext, herddb.core.AbstractIndexManager index) throws DataStorageManagerException {
if (operation instanceof PrimaryIndexSeek) {
PrimaryIndexSeek seek = (PrimaryIndexSeek) operation;
byte[] seekValue;
try {
seekValue = seek.value.computeNewValue(null, context, tableContext);
} catch (InvalidNullValueForKeyException nullKey) {
seekValue = null;
}
if (seekValue == null) {
return Stream.empty();
}
Bytes key = Bytes.from_array(seekValue);
Long pageId = map.get(key);
if (pageId == null) {
return Stream.empty();
}
return Stream.of(new AbstractMap.SimpleImmutableEntry<>(key, pageId));
}
// every predicate (WHEREs...) will always be evaluated anyway on every record, in order to guarantee correctness
if (index != null) {
return index.recordSetScanner(operation, context, tableContext, this);
}
if (operation == null) {
Stream<Map.Entry<Bytes, Long>> baseStream = map.entrySet().stream();
return baseStream;
} else if (operation instanceof PrimaryIndexPrefixScan) {
PrimaryIndexPrefixScan scan = (PrimaryIndexPrefixScan) operation;
byte[] prefix;
try {
prefix = scan.value.computeNewValue(null, context, tableContext);
} catch (InvalidNullValueForKeyException err) {
return Stream.empty();
} catch (StatementExecutionException err) {
throw new RuntimeException(err);
}
Predicate<Map.Entry<Bytes, Long>> predicate = (Map.Entry<Bytes, Long> t) -> {
Bytes fullrecordKey = t.getKey();
return fullrecordKey.startsWith(prefix.length, prefix);
};
Stream<Map.Entry<Bytes, Long>> baseStream = map.entrySet().stream();
return baseStream.filter(predicate);
} else if (operation instanceof PrimaryIndexRangeScan) {
Bytes refminvalue;
PrimaryIndexRangeScan sis = (PrimaryIndexRangeScan) operation;
SQLRecordKeyFunction minKey = sis.minValue;
if (minKey != null) {
refminvalue = Bytes.from_nullable_array(minKey.computeNewValue(null, context, tableContext));
} else {
refminvalue = null;
}
Bytes refmaxvalue;
SQLRecordKeyFunction maxKey = sis.maxValue;
if (maxKey != null) {
refmaxvalue = Bytes.from_nullable_array(maxKey.computeNewValue(null, context, tableContext));
} else {
refmaxvalue = null;
}
Predicate<Map.Entry<Bytes, Long>> predicate;
if (refminvalue != null && refmaxvalue == null) {
predicate = (Map.Entry<Bytes, Long> entry) -> {
Bytes datum = entry.getKey();
return datum.compareTo(refminvalue) >= 0;
};
} else if (refminvalue == null && refmaxvalue != null) {
predicate = (Map.Entry<Bytes, Long> entry) -> {
Bytes datum = entry.getKey();
return datum.compareTo(refmaxvalue) <= 0;
};
} else if (refminvalue != null && refmaxvalue != null) {
predicate = (Map.Entry<Bytes, Long> entry) -> {
Bytes datum = entry.getKey();
return datum.compareTo(refmaxvalue) <= 0 && datum.compareTo(refminvalue) >= 0;
};
} else {
predicate = (Map.Entry<Bytes, Long> entry) -> {
return true;
};
}
Stream<Map.Entry<Bytes, Long>> baseStream = map.entrySet().stream();
return baseStream.filter(predicate);
} else {
throw new DataStorageManagerException("operation " + operation + " not implemented on " + this.getClass());
}
}
Aggregations