use of herddb.model.StatementEvaluationContext in project herddb by diennea.
the class RecordSetSuite method testApplyProjectionNoSwap.
@Test
public void testApplyProjectionNoSwap() throws Exception {
RecordSetFactory factory = buildRecordSetFactory(Integer.MAX_VALUE);
Column[] columns = new Column[2];
columns[0] = Column.column("s1", ColumnTypes.STRING);
columns[1] = Column.column("n1", ColumnTypes.LONG);
Set<String> expected_s2 = new HashSet<>();
Set<Integer> expected_n2 = new HashSet<>();
String[] fieldNames = Column.buildFieldNamesList(columns);
try (MaterializedRecordSet rs = factory.createRecordSet(fieldNames, columns)) {
for (int i = 0; i < 100; i++) {
Map<String, Object> record = new HashMap<>();
String s1 = "test_" + i;
record.put("s1", s1);
record.put("n1", i);
expected_s2.add(s1);
expected_n2.add(i);
rs.add(new Tuple(record, fieldNames));
}
rs.writeFinished();
Column[] columns_projected = new Column[2];
columns_projected[0] = Column.column("n2", ColumnTypes.LONG);
columns_projected[1] = Column.column("s2", ColumnTypes.STRING);
String[] fieldNames_projected = new String[] { "n2", "s2" };
rs.applyProjection(new Projection() {
@Override
public Column[] getColumns() {
return columns_projected;
}
@Override
public String[] getFieldNames() {
return fieldNames_projected;
}
@Override
public Tuple map(DataAccessor tuple, StatementEvaluationContext context) throws StatementExecutionException {
Object[] projected_values = new Object[2];
projected_values[0] = tuple.get("n1");
projected_values[1] = tuple.get("s1");
return new Tuple(fieldNames_projected, projected_values);
}
}, StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT());
for (DataAccessor t : rs) {
expected_s2.remove((String) t.get("s2"));
expected_n2.remove((Integer) t.get("n2"));
}
assertTrue(expected_s2.isEmpty());
assertTrue(expected_n2.isEmpty());
assertEquals("n2", rs.getColumns()[0].name);
assertEquals("s2", rs.getColumns()[1].name);
}
}
use of herddb.model.StatementEvaluationContext in project herddb by diennea.
the class ScanDuringCheckPointTest method bigTableScan.
@Test
public // @Ignore
void bigTableScan() throws Exception {
int testSize = 10000;
String nodeId = "localhost";
try (DBManager manager = new DBManager("localhost", new MemoryMetadataStorageManager(), new MemoryDataStorageManager(), new MemoryCommitLogManager(), null, null)) {
// manager.setMaxLogicalPageSize(10);
// manager.setMaxPagesUsedMemory(Long.MAX_VALUE);
manager.start();
CreateTableSpaceStatement st1 = new CreateTableSpaceStatement("tblspace1", Collections.singleton(nodeId), nodeId, 1, 0, 0);
manager.executeStatement(st1, StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
manager.waitForTablespace("tblspace1", 10000);
String tableSpaceUUID = manager.getTableSpaceManager("tblspace1").getTableSpaceUUID();
Table table = Table.builder().tablespace("tblspace1").name("t1").column("id", ColumnTypes.STRING).column("name", ColumnTypes.STRING).primaryKey("id").build();
CreateTableStatement st2 = new CreateTableStatement(table);
manager.executeStatement(st2, StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
for (int i = 0; i < testSize / 2; i++) {
InsertStatement insert = new InsertStatement(table.tablespace, table.name, RecordSerializer.makeRecord(table, "id", "k" + i, "name", RandomString.getInstance().nextString(50, new StringBuilder().append("testname").append(i).append("_")).toString()));
assertEquals(1, manager.executeUpdate(insert, StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION).getUpdateCount());
}
manager.checkpoint();
for (int i = testSize / 2; i < testSize; i++) {
InsertStatement insert = new InsertStatement(table.tablespace, table.name, RecordSerializer.makeRecord(table, "id", "k" + i, "name", RandomString.getInstance().nextString(50, new StringBuilder().append("testname").append(i).append("_")).toString()));
assertEquals(1, manager.executeUpdate(insert, StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION).getUpdateCount());
}
manager.checkpoint();
assertTrue(manager.getDataStorageManager().getActualNumberOfPages(tableSpaceUUID, table.uuid) > 1);
TableManagerStats stats = manager.getTableSpaceManager(table.tablespace).getTableManager(table.name).getStats();
assertEquals(testSize, stats.getTablesize());
assertTrue(testSize > 100);
ExecutorService service = Executors.newFixedThreadPool(1);
Runnable checkPointPerformer = new Runnable() {
@Override
public void run() {
CountDownLatch checkpointDone = new CountDownLatch(1);
Thread fakeCheckpointThread = new Thread(new Runnable() {
@Override
public void run() {
try {
manager.checkpoint();
} catch (Throwable t) {
t.printStackTrace();
fail();
}
checkpointDone.countDown();
}
});
fakeCheckpointThread.setDaemon(true);
try {
fakeCheckpointThread.start();
checkpointDone.await();
fakeCheckpointThread.join();
} catch (InterruptedException err) {
throw new RuntimeException(err);
}
}
};
AtomicInteger done = new AtomicInteger();
Predicate slowPredicate = new Predicate() {
int count = 0;
@Override
public boolean evaluate(Record record, StatementEvaluationContext context) throws StatementExecutionException {
if (count++ % 10 == 0) {
// checkpoint will flush buffers, in the middle of the scan
try {
System.out.println("GO checkpoint !");
service.submit(checkPointPerformer).get();
done.incrementAndGet();
} catch (ExecutionException | InterruptedException err) {
throw new StatementExecutionException(err);
}
}
return true;
}
};
try (DataScanner scan = manager.scan(new ScanStatement(table.tablespace, table, slowPredicate), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION)) {
AtomicInteger count = new AtomicInteger();
scan.forEach(tuple -> {
count.incrementAndGet();
});
assertEquals(testSize, count.get());
}
assertEquals(testSize, stats.getTablesize());
assertEquals(testSize / 10, done.get());
}
}
use of herddb.model.StatementEvaluationContext in project herddb by diennea.
the class ScanTest method testTransaction.
@Test
public void testTransaction() throws Exception {
for (int i = 0; i < 5; i++) {
Map<String, Object> data = new HashMap<>();
data.put("id", "key_" + i);
data.put("number", i);
Record record = RecordSerializer.toRecord(data, table);
InsertStatement st = new InsertStatement(tableSpace, tableName, record);
assertEquals(1, manager.executeUpdate(st, StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION).getUpdateCount());
}
BeginTransactionStatement begin = new BeginTransactionStatement(tableSpace);
long tx = ((TransactionResult) manager.executeStatement(begin, StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION)).getTransactionId();
for (int i = 5; i < 10; i++) {
Map<String, Object> data = new HashMap<>();
data.put("id", "key_" + i);
data.put("number", i);
Record record = RecordSerializer.toRecord(data, table);
InsertStatement st = new InsertStatement(tableSpace, tableName, record);
assertEquals(1, manager.executeUpdate(st, StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), new TransactionContext(tx)).getUpdateCount());
}
{
ScanStatement scan = new ScanStatement(tableSpace, table, new Predicate() {
@Override
public boolean evaluate(Record record, StatementEvaluationContext context) throws StatementExecutionException {
return true;
}
});
DataScanner scanner = manager.scan(scan, StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), new TransactionContext(tx));
List<?> result = scanner.consume();
assertEquals(10, result.size());
}
for (int i = 0; i < 10; i++) {
DeleteStatement st = new DeleteStatement(tableSpace, tableName, Bytes.from_string("key_" + i), null);
assertEquals(1, manager.executeUpdate(st, StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), new TransactionContext(tx)).getUpdateCount());
}
{
ScanStatement scan = new ScanStatement(tableSpace, table, new Predicate() {
@Override
public boolean evaluate(Record record, StatementEvaluationContext context) throws StatementExecutionException {
return true;
}
});
DataScanner scanner = manager.scan(scan, StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), new TransactionContext(tx));
List<?> result = scanner.consume();
assertEquals(0, result.size());
}
}
use of herddb.model.StatementEvaluationContext in project herddb by diennea.
the class TableManager method scanWithStream.
private DataScanner scanWithStream(ScanStatement statement, StatementEvaluationContext context, Transaction transaction, boolean lockRequired, boolean forWrite) throws StatementExecutionException {
final TupleComparator comparator = statement.getComparator();
boolean sorted = comparator != null;
boolean sortedByClusteredIndex = comparator != null && comparator.isOnlyPrimaryKeyAndAscending() && keyToPage.isSortedAscending();
final Projection projection = statement.getProjection();
final boolean applyProjectionDuringScan = projection != null && !sorted;
ScanLimits limits = statement.getLimits();
int maxRows = limits == null ? 0 : limits.computeMaxRows(context);
int offset = limits == null ? 0 : limits.computeOffset(context);
Stream<DataAccessor> result;
Function<Record, DataAccessor> mapper = (Record record) -> {
DataAccessor tuple;
if (applyProjectionDuringScan) {
tuple = projection.map(record.getDataAccessor(table), context);
} else {
tuple = record.getDataAccessor(table);
}
return tuple;
};
Stream<DataAccessor> fromTransactionSorted = streamTransactionData(transaction, statement.getPredicate(), context).map(mapper);
if (comparator != null) {
fromTransactionSorted = fromTransactionSorted.sorted(comparator);
}
Stream<DataAccessor> tableData = streamTableData(statement, context, transaction, lockRequired, forWrite).map(mapper);
if (maxRows > 0) {
if (sortedByClusteredIndex) {
// already sorted from index
tableData = tableData.limit(maxRows + offset);
// already sorted if needed
fromTransactionSorted = fromTransactionSorted.limit(maxRows + offset);
// we need to re-sort
result = Stream.concat(fromTransactionSorted, tableData).sorted(comparator);
} else if (sorted) {
// need to sort
tableData = tableData.sorted(comparator);
tableData = tableData.limit(maxRows + offset);
// already sorted if needed
fromTransactionSorted = fromTransactionSorted.limit(maxRows + offset);
// we need to re-sort
result = Stream.concat(fromTransactionSorted, tableData).sorted(comparator);
} else {
result = Stream.concat(fromTransactionSorted, tableData);
}
} else {
if (sortedByClusteredIndex) {
// already sorted from index
tableData = tableData.sorted(comparator);
// fromTransactionSorted is already sorted
// we need to re-sort
result = Stream.concat(fromTransactionSorted, tableData).sorted(comparator);
} else if (sorted) {
// tableData already sorted from index
// fromTransactionSorted is already sorted
// we need to re-sort
result = Stream.concat(fromTransactionSorted, tableData).sorted(comparator);
} else {
// no need to sort
result = Stream.concat(fromTransactionSorted, tableData);
}
}
if (offset > 0) {
result = result.skip(offset);
}
if (maxRows > 0) {
result = result.limit(maxRows);
}
if (!applyProjectionDuringScan && projection != null) {
result = result.map(r -> projection.map(r, context));
}
String[] fieldNames;
Column[] columns;
if (projection != null) {
fieldNames = projection.getFieldNames();
columns = projection.getColumns();
} else {
fieldNames = table.columnNames;
columns = table.columns;
}
return new StreamDataScanner(transaction != null ? transaction.transactionId : 0, fieldNames, columns, result);
}
use of herddb.model.StatementEvaluationContext in project herddb by diennea.
the class TableManager method streamTableData.
private Stream<Record> streamTableData(ScanStatement statement, StatementEvaluationContext context, Transaction transaction, boolean lockRequired, boolean forWrite) throws StatementExecutionException {
statement.validateContext(context);
Predicate predicate = statement.getPredicate();
boolean acquireLock = transaction != null || forWrite || lockRequired;
LocalScanPageCache lastPageRead = acquireLock ? null : new LocalScanPageCache();
IndexOperation indexOperation = predicate != null ? predicate.getIndexOperation() : null;
boolean primaryIndexSeek = indexOperation instanceof PrimaryIndexSeek;
AbstractIndexManager useIndex = getIndexForTbleAccess(indexOperation);
Stream<Map.Entry<Bytes, Long>> scanner = keyToPage.scanner(indexOperation, context, tableContext, useIndex);
Stream<Record> resultFromTable = scanner.map(entry -> {
return accessRecord(entry, predicate, context, transaction, lastPageRead, primaryIndexSeek, forWrite, acquireLock);
}).filter(r -> r != null);
return resultFromTable;
}
Aggregations