use of herddb.model.Record in project herddb by diennea.
the class TableManager method accessRecord.
public Record accessRecord(Map.Entry<Bytes, Long> entry, Predicate predicate, StatementEvaluationContext context, Transaction transaction, LocalScanPageCache lastPageRead, boolean primaryIndexSeek, boolean forWrite, boolean acquireLock) {
Bytes key = entry.getKey();
boolean keep_lock = false;
boolean already_locked = transaction != null && transaction.lookupLock(table.name, key) != null;
LockHandle lock = acquireLock ? (forWrite ? lockForWrite(key, transaction) : lockForRead(key, transaction)) : null;
try {
if (transaction != null) {
if (transaction.recordDeleted(table.name, key)) {
// skip this record. inside current transaction it has been deleted
return null;
}
Record record = transaction.recordUpdated(table.name, key);
if (record != null) {
// use current transaction version of the record
if (predicate == null || predicate.evaluate(record, context)) {
keep_lock = true;
return record;
}
return null;
}
}
Long pageId = entry.getValue();
if (pageId != null) {
boolean pkFilterCompleteMatch = false;
if (!primaryIndexSeek && predicate != null) {
Predicate.PrimaryKeyMatchOutcome outcome = predicate.matchesRawPrimaryKey(key, context);
if (outcome == Predicate.PrimaryKeyMatchOutcome.FAILED) {
return null;
} else if (outcome == Predicate.PrimaryKeyMatchOutcome.FULL_CONDITION_VERIFIED) {
pkFilterCompleteMatch = true;
}
}
Record record = fetchRecord(key, pageId, lastPageRead);
if (record != null && (pkFilterCompleteMatch || predicate == null || predicate.evaluate(record, context))) {
keep_lock = true;
return record;
}
}
return null;
} finally {
// release the lock on the key if it did not match scan criteria
if (transaction == null) {
if (lock != null) {
if (forWrite) {
locksManager.releaseWriteLockForKey(key, lock);
} else {
locksManager.releaseReadLockForKey(key, lock);
}
}
} else if (!keep_lock && !already_locked) {
transaction.releaseLockOnKey(table.name, key, locksManager);
}
}
}
use of herddb.model.Record in project herddb by diennea.
the class TableManager method fetchRecord.
private Record fetchRecord(Bytes key, Long pageId, LocalScanPageCache localScanPageCache) throws StatementExecutionException, DataStorageManagerException {
int maxTrials = 2;
while (true) {
DataPage dataPage = fetchDataPage(pageId, localScanPageCache);
if (dataPage != null) {
Record record = dataPage.get(key);
if (record != null) {
return record;
}
}
Long relocatedPageId = keyToPage.get(key);
LOGGER.log(Level.SEVERE, table.name + " fetchRecord " + key + " failed," + "checkPointRunning:" + checkPointRunning + " pageId:" + pageId + " relocatedPageId:" + relocatedPageId);
if (relocatedPageId == null) {
// deleted
LOGGER.log(Level.SEVERE, "table " + table.name + ", activePages " + pageSet.getActivePages() + ", record " + key + " deleted during data access");
return null;
}
pageId = relocatedPageId;
if (maxTrials-- == 0) {
throw new DataStorageManagerException("inconsistency! table " + table.name + " no record in memory for " + key + " page " + pageId + ", activePages " + pageSet.getActivePages() + " after many trials");
}
}
}
use of herddb.model.Record in project herddb by diennea.
the class FileDataStorageManager method rawReadDataPage.
public static List<Record> rawReadDataPage(Path pageFile) throws DataStorageManagerException, NoSuchFileException, IOException {
List<Record> result;
long hashFromFile;
long hashFromDigest;
try (InputStream input = Files.newInputStream(pageFile);
BufferedInputStream buffer = new BufferedInputStream(input, 1024);
XXHash64Utils.HashingStream hash = new XXHash64Utils.HashingStream(buffer);
ExtendedDataInputStream dataIn = new ExtendedDataInputStream(hash)) {
// version
long version = dataIn.readVLong();
// flags for future implementations
long flags = dataIn.readVLong();
if (version != 1 || flags != 0) {
throw new DataStorageManagerException("corrupted data file " + pageFile.toAbsolutePath());
}
int numRecords = dataIn.readInt();
result = new ArrayList<>(numRecords);
for (int i = 0; i < numRecords; i++) {
byte[] key = dataIn.readArray();
byte[] value = dataIn.readArray();
result.add(new Record(new Bytes(key), new Bytes(value)));
}
hashFromDigest = hash.hash();
hashFromFile = dataIn.readLong();
}
if (hashFromDigest != hashFromFile) {
throw new DataStorageManagerException("Corrupted datafile " + pageFile + ". Bad hash " + hashFromFile + " <> " + hashFromDigest);
}
return result;
}
use of herddb.model.Record in project herddb by diennea.
the class FileDataStorageManager method readPage.
@Override
public List<Record> readPage(String tableSpace, String tableName, Long pageId) throws DataStorageManagerException, DataPageDoesNotExistException {
long _start = System.currentTimeMillis();
Path tableDir = getTableDirectory(tableSpace, tableName);
Path pageFile = getPageFile(tableDir, pageId);
List<Record> result;
try {
result = rawReadDataPage(pageFile);
} catch (NoSuchFileException nsfe) {
throw new DataPageDoesNotExistException("No such page: " + tableSpace + "_" + tableName + "." + pageId, nsfe);
} catch (IOException err) {
throw new DataStorageManagerException("error reading data page: " + tableSpace + "_" + tableName + "." + pageId, err);
}
long _stop = System.currentTimeMillis();
long delta = _stop - _start;
LOGGER.log(Level.FINE, "readPage {0}.{1} {2} ms", new Object[] { tableSpace, tableName, delta + "" });
return result;
}
use of herddb.model.Record in project herddb by diennea.
the class FileDataStorageManager method writePage.
@Override
public void writePage(String tableSpace, String tableName, long pageId, Collection<Record> newPage) throws DataStorageManagerException {
// synch on table is done by the TableManager
long _start = System.currentTimeMillis();
Path tableDir = getTableDirectory(tableSpace, tableName);
try {
Files.createDirectories(tableDir);
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
Path pageFile = getPageFile(tableDir, pageId);
long size;
try (ManagedFile file = ManagedFile.open(pageFile, StandardOpenOption.WRITE, StandardOpenOption.CREATE);
SimpleBufferedOutputStream buffer = new SimpleBufferedOutputStream(file.getOutputStream(), COPY_BUFFERS_SIZE);
XXHash64Utils.HashingOutputStream oo = new XXHash64Utils.HashingOutputStream(buffer);
ExtendedDataOutputStream dataOutput = new ExtendedDataOutputStream(oo)) {
// version
dataOutput.writeVLong(1);
// flags for future implementations
dataOutput.writeVLong(0);
dataOutput.writeInt(newPage.size());
for (Record record : newPage) {
dataOutput.writeArray(record.key.data);
dataOutput.writeArray(record.value.data);
}
size = oo.size();
// footer
dataOutput.writeLong(oo.hash());
dataOutput.flush();
file.sync();
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
long now = System.currentTimeMillis();
if (LOGGER.isLoggable(Level.FINER)) {
LOGGER.log(Level.FINER, "writePage {0} KBytes,{1} records, time {2} ms", new Object[] { (size / 1024) + "", newPage.size(), (now - _start) + "" });
}
}
Aggregations