use of herddb.model.Record in project herddb by diennea.
the class AbstractSystemTableManager method scan.
@Override
public DataScanner scan(ScanStatement statement, StatementEvaluationContext context, Transaction transaction, boolean lockRequired, boolean forWrite) throws StatementExecutionException {
Predicate predicate = statement.getPredicate();
MaterializedRecordSet recordSet = tableSpaceManager.getDbmanager().getRecordSetFactory().createRecordSet(table.columnNames, table.columns);
Iterable<Record> data = buildVirtualRecordList(transaction);
StreamSupport.stream(data.spliterator(), false).filter(record -> {
return (predicate == null || predicate.evaluate(record, context));
}).sorted(// enforce sort by PK
sortByPk).map(r -> r.getDataAccessor(table)).forEach(recordSet::add);
recordSet.writeFinished();
recordSet.sort(statement.getComparator());
recordSet.applyLimits(statement.getLimits(), context);
recordSet.applyProjection(statement.getProjection(), context);
return new SimpleDataScanner(transaction, recordSet);
}
use of herddb.model.Record in project herddb by diennea.
the class RoutedClientSideConnection method requestReceived.
@Override
@SuppressFBWarnings(value = "SF_SWITCH_NO_DEFAULT")
@SuppressWarnings("empty-statement")
public void requestReceived(Pdu message, Channel channel) {
try {
switch(message.type) {
case Pdu.TYPE_TABLESPACE_DUMP_DATA:
{
String dumpId = PduCodec.TablespaceDumpData.readDumpId(message);
TableSpaceDumpReceiver receiver = dumpReceivers.get(dumpId);
LOGGER.log(Level.FINE, "receiver for {0}: {1}", new Object[] { dumpId, receiver });
if (receiver == null) {
if (channel != null) {
ByteBuf resp = PduCodec.ErrorResponse.write(message.messageId, "no such dump receiver " + dumpId);
channel.sendReplyMessage(message.messageId, resp);
}
return;
}
try {
String command = PduCodec.TablespaceDumpData.readCommand(message);
boolean sendAck = true;
switch(command) {
case "start":
{
long ledgerId = PduCodec.TablespaceDumpData.readLedgerId(message);
long offset = PduCodec.TablespaceDumpData.readOffset(message);
receiver.start(new LogSequenceNumber(ledgerId, offset));
break;
}
case "beginTable":
{
byte[] tableDefinition = PduCodec.TablespaceDumpData.readTableDefinition(message);
Table table = Table.deserialize(tableDefinition);
long estimatedSize = PduCodec.TablespaceDumpData.readEstimatedSize(message);
long dumpLedgerId = PduCodec.TablespaceDumpData.readLedgerId(message);
long dumpOffset = PduCodec.TablespaceDumpData.readOffset(message);
List<byte[]> indexesDef = PduCodec.TablespaceDumpData.readIndexesDefinition(message);
List<Index> indexes = indexesDef.stream().map(Index::deserialize).collect(Collectors.toList());
Map<String, Object> stats = new HashMap<>();
stats.put("estimatedSize", estimatedSize);
stats.put("dumpLedgerId", dumpLedgerId);
stats.put("dumpOffset", dumpOffset);
receiver.beginTable(new DumpedTableMetadata(table, new LogSequenceNumber(dumpLedgerId, dumpOffset), indexes), stats);
break;
}
case "endTable":
{
receiver.endTable();
break;
}
case "finish":
{
long ledgerId = PduCodec.TablespaceDumpData.readLedgerId(message);
long offset = PduCodec.TablespaceDumpData.readOffset(message);
receiver.finish(new LogSequenceNumber(ledgerId, offset));
sendAck = false;
break;
}
case "data":
{
List<Record> records = new ArrayList<>();
PduCodec.TablespaceDumpData.readRecords(message, (key, value) -> {
records.add(new Record(Bytes.from_array(key), Bytes.from_array(value)));
});
receiver.receiveTableDataChunk(records);
break;
}
case "txlog":
{
List<DumpedLogEntry> records = new ArrayList<>();
PduCodec.TablespaceDumpData.readRecords(message, (key, value) -> {
records.add(new DumpedLogEntry(LogSequenceNumber.deserialize(key), value));
});
receiver.receiveTransactionLogChunk(records);
break;
}
case "transactions":
{
List<Transaction> transactions = new ArrayList<>();
PduCodec.TablespaceDumpData.readRecords(message, (key, value) -> {
transactions.add(Transaction.deserialize(null, value));
});
receiver.receiveTransactionsAtDump(transactions);
break;
}
default:
throw new DataStorageManagerException("invalid dump command:" + command);
}
if (channel != null && sendAck) {
ByteBuf res = PduCodec.AckResponse.write(message.messageId);
channel.sendReplyMessage(message.messageId, res);
}
} catch (RuntimeException error) {
LOGGER.log(Level.SEVERE, "error while handling dump data", error);
receiver.onError(error);
if (channel != null) {
ByteBuf res = PduCodec.ErrorResponse.write(message.messageId, error);
channel.sendReplyMessage(message.messageId, res);
}
}
}
break;
}
} finally {
message.close();
}
}
use of herddb.model.Record in project herddb by diennea.
the class BookKeeperDataStorageManager method rawReadDataPage.
private static List<Record> rawReadDataPage(byte[] dataPage) throws IOException, DataStorageManagerException {
try (ByteArrayCursor dataIn = ByteArrayCursor.wrap(dataPage)) {
// version
long version = dataIn.readVLong();
// flags for future implementations
long flags = dataIn.readVLong();
if (version != 1 || flags != 0) {
throw new DataStorageManagerException("corrupted data");
}
int numRecords = dataIn.readInt();
List<Record> result = new ArrayList<>(numRecords);
for (int i = 0; i < numRecords; i++) {
Bytes key = dataIn.readBytesNoCopy();
Bytes value = dataIn.readBytesNoCopy();
result.add(new Record(key, value));
}
int pos = dataIn.getPosition();
long hashFromFile = dataIn.readLong();
// after the hash we will have zeroes or garbage
// the hash is not at the end of file, but after data
long hashFromDigest = XXHash64Utils.hash(dataPage, 0, pos);
if (hashFromDigest != hashFromFile) {
throw new DataStorageManagerException("Corrupted datafile. Bad hash " + hashFromFile + " <> " + hashFromDigest);
}
return result;
}
}
use of herddb.model.Record in project herddb by diennea.
the class ServerSideConnectionPeer method handlePushTableData.
private void handlePushTableData(Pdu message, Channel channel) {
try {
String tableSpace = PduCodec.PushTableData.readTablespace(message);
String table = PduCodec.PushTableData.readTablename(message);
long _start = System.currentTimeMillis();
List<Record> records = new ArrayList<>();
PduCodec.PushTableData.readRecords(message, (key, value) -> records.add(new Record(Bytes.from_array(key), Bytes.from_array(value))));
LOGGER.log(Level.INFO, "Received {0} records for restore of table {1} in tableSpace {2}", new Object[] { records.size(), table, tableSpace });
TableManager tableManager = (TableManager) server.getManager().getTableSpaceManager(tableSpace).getTableManager(table);
tableManager.writeFromDump(records);
long _stop = System.currentTimeMillis();
LOGGER.log(Level.INFO, "Time restore {0} records: data {1} ms", new Object[] { records.size(), _stop - _start });
ByteBuf res = PduCodec.AckResponse.write(message.messageId);
channel.sendReplyMessage(message.messageId, res);
} catch (StatementExecutionException err) {
ByteBuf res = composeErrorResponse(message.messageId, err);
channel.sendReplyMessage(message.messageId, res);
}
}
use of herddb.model.Record in project herddb by diennea.
the class ServerSideConnectionPeer method executeUpdate.
/**
* This method is like {@link #handleExecuteStatement(herddb.proto.Pdu, herddb.network.Channel) } but in "local" mode,
* we do not want here to marshal/unmarshal values, in order to save resources
*/
public DMLResult executeUpdate(String tablespace, String query, long txId, boolean returnValues, List<Object> parameters) throws HDBException {
// ensure we are dealing with the same data types that we see when the request id coming from the wire
parameters = PduCodec.normalizeParametersList(parameters);
TransactionContext transactionContext = new TransactionContext(txId);
TranslatedQuery translatedQuery;
try {
translatedQuery = server.getManager().getPlanner().translate(tablespace, query, parameters, false, true, returnValues, -1);
Statement statement = translatedQuery.plan.mainStatement;
CompletableFuture<StatementExecutionResult> res = server.getManager().executePlanAsync(translatedQuery.plan, translatedQuery.context, transactionContext);
CompletableFuture<DMLResult> finalResult = res.handle((result, err) -> {
if (err != null) {
while (err instanceof CompletionException) {
err = err.getCause();
}
if (err instanceof DuplicatePrimaryKeyException) {
throw new CompletionException(new SQLIntegrityConstraintViolationException(err));
} else {
throw new CompletionException(new SQLException(err));
}
}
if (result instanceof DMLStatementExecutionResult) {
DMLStatementExecutionResult dml = (DMLStatementExecutionResult) result;
if (returnValues && dml.getKey() != null) {
TableAwareStatement tableStatement = statement.unwrap(TableAwareStatement.class);
Table table = server.getManager().getTableSpaceManager(statement.getTableSpace()).getTableManager(tableStatement.getTable()).getTable();
final Map<RawString, Object> newRecord = new HashMap<>();
Object newKey = RecordSerializer.deserializePrimaryKey(dml.getKey(), table);
newRecord.put(RAWSTRING_KEY, newKey);
if (dml.getNewvalue() != null) {
Map<String, Object> toBean = RecordSerializer.toBean(new Record(dml.getKey(), dml.getNewvalue()), table);
toBean.forEach((k, v) -> {
newRecord.put(RawString.of(k), v);
});
}
return new DMLResult(dml.getUpdateCount(), newKey, newRecord, dml.transactionId);
} else {
return new DMLResult(dml.getUpdateCount(), null, null, dml.transactionId);
}
} else if (result instanceof DDLStatementExecutionResult) {
return new DMLResult(1, null, null, result.transactionId);
} else {
throw new CompletionException(new SQLException("Unknown result type " + result.getClass() + ": " + result));
}
});
return finalResult.get();
} catch (Throwable err) {
while (err instanceof CompletionException) {
err = err.getCause();
}
throw new HDBException(err);
}
}
Aggregations