use of herddb.network.Channel in project herddb by diennea.
the class RoutedClientSideConnection method restoreTableSpace.
@Override
public void restoreTableSpace(String tableSpace, TableSpaceRestoreSource source) throws HDBException, ClientSideMetadataProviderException {
List<DumpedTableMetadata> tables = new ArrayList<>();
try {
while (true) {
String entryType = source.nextEntryType();
LOGGER.log(Level.FINEST, "restore, entryType:{0}", entryType);
switch(entryType) {
case BackupFileConstants.ENTRY_TYPE_START:
{
break;
}
case BackupFileConstants.ENTRY_TYPE_TABLE:
{
DumpedTableMetadata table = source.nextTable();
Channel channel = ensureOpen();
long id = channel.generateRequestId();
ByteBuf message_create_table = PduCodec.RequestTableRestore.write(id, tableSpace, table.table.serialize(), table.logSequenceNumber.ledgerId, table.logSequenceNumber.offset);
sendMessageAndCheckNoError(channel, id, message_create_table);
List<KeyValue> chunk = source.nextTableDataChunk();
while (chunk != null) {
id = channel.generateRequestId();
ByteBuf message = PduCodec.PushTableData.write(id, tableSpace, table.table.name, chunk);
sendMessageAndCheckNoError(channel, id, message);
chunk = source.nextTableDataChunk();
}
tables.add(table);
break;
}
case BackupFileConstants.ENTRY_TYPE_TXLOGCHUNK:
{
Channel channel = ensureOpen();
List<KeyValue> chunk = source.nextTransactionLogChunk();
long id = channel.generateRequestId();
ByteBuf message = PduCodec.PushTxLogChunk.write(id, tableSpace, chunk);
sendMessageAndCheckNoError(channel, id, message);
break;
}
case BackupFileConstants.ENTRY_TYPE_TRANSACTIONS:
{
Channel channel = ensureOpen();
List<byte[]> chunk = source.nextTransactionsBlock();
long id = channel.generateRequestId();
ByteBuf message = PduCodec.PushTransactionsBlock.write(id, tableSpace, chunk);
sendMessageAndCheckNoError(channel, id, message);
break;
}
case BackupFileConstants.ENTRY_TYPE_END:
{
// send a 'table finished' event only at the end of the procedure
// the stream of transaction log entries is finished, so the data contained in the table is "final"
// we are going to create now all the indexes too
Channel channel = ensureOpen();
for (DumpedTableMetadata table : tables) {
List<byte[]> indexes = table.indexes.stream().map(Index::serialize).collect(Collectors.toList());
long id = channel.generateRequestId();
ByteBuf message_table_finished = PduCodec.TableRestoreFinished.write(id, tableSpace, table.table.name, indexes);
sendMessageAndCheckNoError(channel, id, message_table_finished);
}
long id = channel.generateRequestId();
ByteBuf message_restore_finished = PduCodec.RestoreFinished.write(id, tableSpace);
sendMessageAndCheckNoError(channel, id, message_restore_finished);
return;
}
default:
throw new HDBException("bad entryType " + entryType);
}
}
} catch (InterruptedException err) {
Thread.currentThread().interrupt();
throw new HDBException(err);
} catch (TimeoutException err) {
throw new HDBException(err);
}
}
use of herddb.network.Channel in project herddb by diennea.
the class RoutedClientSideConnection method executeScan.
@Override
public ScanResultSet executeScan(String tableSpace, String query, boolean usePreparedStatement, List<Object> params, long tx, int maxRows, int fetchSize, boolean keepReadLocks) throws HDBException, ClientSideMetadataProviderException {
Channel channel = ensureOpen();
Pdu reply = null;
try {
long scannerId = scannerIdGenerator.incrementAndGet();
long requestId = channel.generateRequestId();
long statementId = usePreparedStatement ? prepareQuery(tableSpace, query) : 0;
query = statementId > 0 ? "" : query;
ByteBuf message = PduCodec.OpenScanner.write(requestId, tableSpace, query, scannerId, tx, params, statementId, fetchSize, maxRows, keepReadLocks);
LOGGER.log(Level.FINEST, "open scanner {0} for query {1}, params {2}", new Object[] { scannerId, query, params });
reply = channel.sendMessageWithPduReply(requestId, message, timeout);
if (reply.type == Pdu.TYPE_ERROR) {
handleGenericError(reply, statementId, true);
// not possible
return null;
} else if (reply.type != Pdu.TYPE_RESULTSET_CHUNK) {
HDBException err = new HDBException(reply);
reply.close();
throw err;
}
boolean last = PduCodec.ResultSetChunk.readIsLast(reply);
long transactionId = PduCodec.ResultSetChunk.readTx(reply);
RecordsBatch data = PduCodec.ResultSetChunk.startReadingData(reply);
// LOGGER.log(Level.SEVERE, "received first " + initialFetchBuffer.size() + " records for query " + query);
ScanResultSetImpl impl = new ScanResultSetImpl(scannerId, data, fetchSize, last, transactionId, channel);
return impl;
} catch (InterruptedException err) {
if (reply != null) {
reply.close();
}
Thread.currentThread().interrupt();
throw new HDBException(err);
} catch (TimeoutException err) {
if (reply != null) {
reply.close();
}
throw new HDBException(err);
}
}
Aggregations