use of herddb.utils.Bytes in project herddb by diennea.
the class BLinkKeyToPageIndex method start.
@Override
public void start(LogSequenceNumber sequenceNumber) throws DataStorageManagerException {
LOGGER.log(Level.SEVERE, " start index {0}", new Object[] { indexName });
/* Actually the same size */
final long pageSize = memoryManager.getMaxLogicalPageSize();
if (LogSequenceNumber.START_OF_TIME.equals(sequenceNumber)) {
/* Empty index (booting from the start) */
tree = new BLink<>(pageSize, SizeEvaluatorImpl.INSTANCE, memoryManager.getPKPageReplacementPolicy(), indexDataStorage);
LOGGER.log(Level.SEVERE, "loaded empty index {0}", new Object[] { indexName });
} else {
IndexStatus status = dataStorageManager.getIndexStatus(tableSpace, indexName, sequenceNumber);
try {
BLinkMetadata<Bytes> metadata = MetadataSerializer.INSTANCE.read(status.indexData);
tree = new BLink<>(pageSize, SizeEvaluatorImpl.INSTANCE, memoryManager.getPKPageReplacementPolicy(), indexDataStorage, metadata);
} catch (IOException e) {
throw new DataStorageManagerException(e);
}
newPageId.set(status.newPageId);
LOGGER.log(Level.SEVERE, "loaded index {0}: {1} keys", new Object[] { indexName, tree.size() });
}
}
use of herddb.utils.Bytes in project herddb by diennea.
the class BRINIndexManager method recordUpdated.
@Override
public void recordUpdated(Bytes key, DataAccessor previousValues, DataAccessor newValues) {
Bytes indexKeyRemoved = RecordSerializer.serializePrimaryKey(previousValues, index, index.columnNames);
Bytes indexKeyAdded = RecordSerializer.serializePrimaryKey(newValues, index, index.columnNames);
if (Objects.equals(indexKeyRemoved, indexKeyAdded)) {
return;
}
// BEWARE that this operation is not atomic
if (indexKeyAdded != null) {
data.put(indexKeyAdded, key);
}
if (indexKeyRemoved != null) {
data.delete(indexKeyRemoved, key);
}
}
use of herddb.utils.Bytes in project herddb by diennea.
the class BRINIndexManager method recordInserted.
@Override
public void recordInserted(Bytes key, DataAccessor values) {
Bytes indexKey = RecordSerializer.serializePrimaryKey(values, index, index.columnNames);
data.put(indexKey, key);
}
use of herddb.utils.Bytes in project herddb by diennea.
the class RecordSerializer method serializePrimaryKey.
public static Bytes serializePrimaryKey(DataAccessor record, ColumnsList table, String[] columns) {
ByteArrayOutputStream key = new ByteArrayOutputStream();
String[] primaryKey = table.getPrimaryKey();
if (primaryKey.length == 1) {
String pkColumn = primaryKey[0];
if (columns.length != 1 && !columns[0].equals(pkColumn)) {
throw new IllegalArgumentException("SQLTranslator error, " + Arrays.toString(columns) + " != " + Arrays.asList(pkColumn));
}
Column c = table.getColumn(pkColumn);
Object v = record.get(c.name);
if (v == null) {
throw new IllegalArgumentException("key field " + pkColumn + " cannot be null. Record data: " + record);
}
byte[] fieldValue = serialize(v, c.type);
return new Bytes(fieldValue);
} else {
// beware that we can serialize even only a part of the PK, for instance of a prefix index scan
try (ExtendedDataOutputStream doo_key = new ExtendedDataOutputStream(key)) {
int i = 0;
for (String pkColumn : columns) {
if (!pkColumn.equals(primaryKey[i])) {
throw new IllegalArgumentException("SQLTranslator error, " + Arrays.toString(columns) + " != " + Arrays.asList(primaryKey));
}
Column c = table.getColumn(pkColumn);
Object v = record.get(c.name);
if (v == null) {
throw new IllegalArgumentException("key field " + pkColumn + " cannot be null. Record data: " + record);
}
byte[] fieldValue = serialize(v, c.type);
doo_key.writeArray(fieldValue);
i++;
}
} catch (IOException err) {
throw new RuntimeException(err);
}
return new Bytes(key.toByteArray());
}
}
use of herddb.utils.Bytes in project herddb by diennea.
the class RoutedClientSideConnection method messageReceived.
@Override
@SuppressFBWarnings(value = "SF_SWITCH_NO_DEFAULT")
public void messageReceived(Message message, Channel _channel) {
switch(message.type) {
case Message.TYPE_TABLESPACE_DUMP_DATA:
{
String dumpId = (String) message.parameters.get("dumpId");
TableSpaceDumpReceiver receiver = dumpReceivers.get(dumpId);
LOGGER.log(Level.FINE, "receiver for {0}: {1}", new Object[] { dumpId, receiver });
if (receiver == null) {
if (_channel != null) {
_channel.sendReplyMessage(message, Message.ERROR(clientId, new Exception("no such dump receiver " + dumpId)));
}
return;
}
try {
Map<String, Object> values = (Map<String, Object>) message.parameters.get("values");
String command = (String) values.get("command") + "";
boolean sendAck = true;
switch(command) {
case "start":
{
long ledgerId = (long) values.get("ledgerid");
long offset = (long) values.get("offset");
receiver.start(new LogSequenceNumber(ledgerId, offset));
break;
}
case "beginTable":
{
byte[] tableDefinition = (byte[]) values.get("table");
Table table = Table.deserialize(tableDefinition);
Long estimatedSize = (Long) values.get("estimatedSize");
long dumpLedgerId = (Long) values.get("dumpLedgerid");
long dumpOffset = (Long) values.get("dumpOffset");
List<byte[]> indexesDef = (List<byte[]>) values.get("indexes");
List<Index> indexes = indexesDef.stream().map(Index::deserialize).collect(Collectors.toList());
Map<String, Object> stats = new HashMap<>();
stats.put("estimatedSize", estimatedSize);
stats.put("dumpLedgerId", dumpLedgerId);
stats.put("dumpOffset", dumpOffset);
receiver.beginTable(new DumpedTableMetadata(table, new LogSequenceNumber(dumpLedgerId, dumpOffset), indexes), stats);
break;
}
case "endTable":
{
receiver.endTable();
break;
}
case "finish":
{
long ledgerId = (long) values.get("ledgerid");
long offset = (long) values.get("offset");
receiver.finish(new LogSequenceNumber(ledgerId, offset));
sendAck = false;
break;
}
case "data":
{
List<KeyValue> data = (List<KeyValue>) values.get("records");
List<Record> records = new ArrayList<>(data.size());
for (KeyValue kv : data) {
records.add(new Record(new Bytes(kv.key), new Bytes(kv.value)));
}
receiver.receiveTableDataChunk(records);
break;
}
case "txlog":
{
List<KeyValue> data = (List<KeyValue>) values.get("records");
List<DumpedLogEntry> records = new ArrayList<>(data.size());
for (KeyValue kv : data) {
records.add(new DumpedLogEntry(LogSequenceNumber.deserialize(kv.key), kv.value));
}
receiver.receiveTransactionLogChunk(records);
break;
}
case "transactions":
{
String tableSpace = (String) values.get("tableSpace");
List<byte[]> data = (List<byte[]>) values.get("transactions");
List<Transaction> transactions = data.stream().map(array -> {
return Transaction.deserialize(tableSpace, array);
}).collect(Collectors.toList());
receiver.receiveTransactionsAtDump(transactions);
break;
}
default:
throw new DataStorageManagerException("invalid dump command:" + command);
}
if (_channel != null && sendAck) {
_channel.sendReplyMessage(message, Message.ACK(clientId));
}
} catch (DataStorageManagerException error) {
LOGGER.log(Level.SEVERE, "error while handling dump data", error);
if (_channel != null) {
_channel.sendReplyMessage(message, Message.ERROR(clientId, error));
}
}
}
break;
}
}
Aggregations