use of herddb.model.Record in project herddb by diennea.
the class DataPage method put.
boolean put(Record record) {
if (immutable) {
throw new IllegalStateException("page " + pageId + " is immutable!");
}
final Record prev = data.put(record.key, record);
final long newSize = estimateEntrySize(record);
if (newSize > maxSize) {
throw new IllegalStateException("record too big to fit in any page " + newSize + " / " + maxSize + " bytes");
}
final long diff = prev == null ? newSize : newSize - estimateEntrySize(prev);
final long target = maxSize - diff;
final long old = usedMemory.getAndAccumulate(diff, (curr, change) -> curr > target ? curr : curr + diff);
if (old > target) {
/* Remove the added key */
data.remove(record.key);
return false;
}
return true;
}
use of herddb.model.Record in project herddb by diennea.
the class TableManager method writeFromDump.
public void writeFromDump(List<Record> record) throws DataStorageManagerException {
LOGGER.log(Level.INFO, "{0} received {1} records", new Object[] { table.name, record.size() });
checkpointLock.asReadLock().lock();
try {
for (Record r : record) {
applyInsert(r.key, r.value, false);
}
} finally {
checkpointLock.asReadLock().unlock();
}
}
use of herddb.model.Record in project herddb by diennea.
the class TableManager method checkForeignKeyConstraintsAsChildTable.
private void checkForeignKeyConstraintsAsChildTable(ForeignKeyDef fk, DataAccessor values, StatementEvaluationContext context, Transaction transaction) throws StatementExecutionException {
// We are creating a SQL query and then using DBManager
// using an SQL query will let us leverage the SQL Planner
// and use the best index to perform the execution
// the SQL Planner will cache the plan, and the plan will also be
// invalidated consistently during DML operations.
String query = childForeignKeyQueries.computeIfAbsent(fk.name, (l -> {
Table parentTable = tableSpaceManager.getTableManagerByUUID(fk.parentTableId).getTable();
// with '*' we are not going to perform projections or copies
StringBuilder q = new StringBuilder("SELECT * FROM ");
q.append(delimit(parentTable.tablespace));
q.append(".");
q.append(delimit(parentTable.name));
q.append(" WHERE ");
for (int i = 0; i < fk.parentTableColumns.length; i++) {
if (i > 0) {
q.append(" AND ");
}
q.append(delimit(fk.parentTableColumns[i]));
q.append("=?");
}
return q.toString();
}));
final List<Object> valuesToMatch = new ArrayList<>(fk.columns.length);
boolean allNulls = true;
for (int i = 0; i < fk.columns.length; i++) {
Object value = values.get(fk.columns[i]);
allNulls = allNulls && value == null;
valuesToMatch.add(value);
}
if (allNulls) {
// all of the values are null, so no check on the parent table
return;
}
TransactionContext tx = transaction != null ? new TransactionContext(transaction.transactionId) : TransactionContext.NO_TRANSACTION;
boolean fkOk;
try (DataScanner scan = tableSpaceManager.getDbmanager().executeSimpleQuery(tableSpaceManager.getTableSpaceName(), query, valuesToMatch, // only one record
1, // keep read locks in TransactionContext
true, tx, null)) {
List<DataAccessor> resultSet = scan.consume();
fkOk = !resultSet.isEmpty();
} catch (DataScannerException err) {
throw new StatementExecutionException(err);
}
if (!fkOk) {
throw new ForeignKeyViolationException(fk.name, "foreignKey " + table.name + "." + fk.name + " violated");
}
}
use of herddb.model.Record in project herddb by diennea.
the class TableManager method loadPageToMemory.
private DataPage loadPageToMemory(Long pageId, boolean recovery) throws DataStorageManagerException {
DataPage result = pages.get(pageId);
if (result != null) {
pageReplacementPolicy.pageHit(result);
return result;
}
long _start = System.currentTimeMillis();
long _ioAndLock = 0;
BooleanHolder computed = new BooleanHolder(false);
try {
result = pages.computeIfAbsent(pageId, (id) -> {
try {
computed.value = true;
List<Record> page;
maxCurrentPagesLoads.acquireUninterruptibly();
try {
page = dataStorageManager.readPage(tableSpaceUUID, table.uuid, pageId);
} finally {
maxCurrentPagesLoads.release();
}
loadedPagesCount.increment();
return buildImmutableDataPage(pageId, page);
} catch (DataStorageManagerException err) {
throw new RuntimeException(err);
}
});
if (computed.value) {
_ioAndLock = System.currentTimeMillis();
final Page.Metadata unload = pageReplacementPolicy.add(result);
if (unload != null) {
unload.owner.unload(unload.pageId);
}
}
} catch (RuntimeException error) {
if (error.getCause() != null) {
Throwable cause = error.getCause();
if (cause instanceof DataStorageManagerException) {
if (cause instanceof DataPageDoesNotExistException) {
return null;
}
throw (DataStorageManagerException) cause;
}
}
throw new DataStorageManagerException(error);
}
if (computed.value && LOGGER.isLoggable(Level.FINE)) {
long _stop = System.currentTimeMillis();
LOGGER.log(Level.FINE, "table {0}.{1}, loaded {2} records from page {3} in {4} ms, ({5} ms read + plock, {6} ms unlock)", new Object[] { table.tablespace, table.name, result.size(), pageId, (_stop - _start), (_ioAndLock - _start), (_stop - _ioAndLock) });
}
return result;
}
use of herddb.model.Record in project herddb by diennea.
the class TableManager method flushNewPage.
/**
* Remove the page from {@link #newPages}, set it as "unloaded" and write it to disk
* <p>
* Add as much spare data as possible to fillup the page. If added must change key to page pointers
* too for spare data
* </p>
*
* @param page new page to flush
* @param spareDataPage old spare data to fit in the new page if possible
* @return {@code false} if no flush has been done because the page isn't writable anymore,
* {@code true} otherwise
*/
private FlushNewPageResult flushNewPage(DataPage page, DataPage spareDataPage) {
if (page.immutable) {
LOGGER.log(Level.SEVERE, "Attempt to flush an immutable page {0} as it was mutable", page.pageId);
throw new IllegalStateException("page " + page.pageId + " is not a new page!");
}
page.pageLock.readLock().lock();
try {
if (!page.writable) {
LOGGER.log(Level.INFO, "Mutable page {0} already flushed in a concurrent thread", page.pageId);
return FlushNewPageResult.ALREADY_FLUSHED;
}
} finally {
page.pageLock.readLock().unlock();
}
/*
* We need to keep the page lock just to write the unloaded flag... after that write any other
* thread that check the page will avoid writes (thus using page data is safe).
*/
final Lock lock = page.pageLock.writeLock();
lock.lock();
try {
if (!page.writable) {
LOGGER.log(Level.INFO, "Mutable page {0} already flushed in a concurrent thread", page.pageId);
return FlushNewPageResult.ALREADY_FLUSHED;
}
/*
* If there isn't any data to flush we'll avoid flush at all and just remove the page. We avoid to
* copy data from spareDataPage to an empty page because it would just be an additional roundtrip
* for spare data with an additional PK write too (consider that you have just one empty page and
* an half full spareDataPage: why copy all data to the first and update PK records when you can
* just save spareDataPage directly?)
*/
boolean drop = page.isEmpty();
/* Set the new page as a fully active page */
if (!drop) {
pageSet.pageCreated(page.pageId, page);
}
/* Remove it from "new" pages */
DataPage remove = newPages.remove(page.pageId);
if (remove == null) {
LOGGER.log(Level.SEVERE, "Detected concurrent flush of page {0}, writable: {1}", new Object[] { page.pageId, page.writable });
throw new IllegalStateException("page " + page.pageId + " is not a new page!");
}
page.writable = false;
if (drop) {
LOGGER.log(Level.INFO, "Deleted empty mutable page {0} instead of flushing it", page.pageId);
return FlushNewPageResult.EMPTY_FLUSH;
}
} finally {
lock.unlock();
}
// Try to steal records from another temporary page
if (spareDataPage != null) {
/* Save current memory size */
final long usedMemory = page.getUsedMemory();
final long buildingPageMemory = spareDataPage.getUsedMemory();
/* Flag to enable spare data addition to currently flushed page */
boolean add = true;
final Iterator<Record> records = spareDataPage.getRecordsForFlush().iterator();
while (add && records.hasNext()) {
Record record = records.next().nonShared();
add = page.put(record);
if (add) {
boolean moved = keyToPage.put(record.key, page.pageId, spareDataPage.pageId);
if (!moved) {
LOGGER.log(Level.SEVERE, "Detected a dirty page as spare data page while flushing new page. Flushing new page {0}. Spare data page {1}", new Object[] { page, spareDataPage });
throw new IllegalStateException("Expected a clean page for stealing records, got a dirty record " + record.key + ". Flushing new page " + page.pageId + ". Spare data page " + spareDataPage.pageId);
}
/* We remove "cleaned" record from the stealingDataPage */
records.remove();
}
}
long spareUsedMemory = page.getUsedMemory() - usedMemory;
/* Fix spare page data removing used memory */
spareDataPage.setUsedMemory(buildingPageMemory - spareUsedMemory);
}
LOGGER.log(Level.FINER, "flushNewPage table {0}, pageId={1} with {2} records, {3} logical page size", new Object[] { table.name, page.pageId, page.size(), page.getUsedMemory() });
dataStorageManager.writePage(tableSpaceUUID, table.uuid, page.pageId, page.getRecordsForFlush());
return FlushNewPageResult.FLUSHED;
}
Aggregations