use of herddb.backup.DumpedLogEntry in project herddb by diennea.
the class ServerSideConnectionPeer method handlePushTxLogChunk.
private void handlePushTxLogChunk(Message message, Channel _channel) {
try {
String tableSpace = (String) message.parameters.get("tableSpace");
List<KeyValue> data = (List<KeyValue>) message.parameters.get("data");
LOGGER.log(Level.INFO, "Received " + data.size() + " records for restore of txlog in tableSpace " + tableSpace);
List<DumpedLogEntry> entries = new ArrayList<>(data.size());
for (KeyValue kv : data) {
entries.add(new DumpedLogEntry(LogSequenceNumber.deserialize(kv.key), kv.value));
}
server.getManager().getTableSpaceManager(tableSpace).restoreRawDumpedEntryLogs(entries);
_channel.sendReplyMessage(message, Message.ACK(null));
} catch (Exception err) {
Message error = Message.ERROR(null, err);
if (err instanceof NotLeaderException) {
error.setParameter("notLeader", "true");
}
_channel.sendReplyMessage(message, error);
}
}
use of herddb.backup.DumpedLogEntry in project herddb by diennea.
the class TableSpaceManager method dumpTableSpace.
void dumpTableSpace(String dumpId, Channel _channel, int fetchSize, boolean includeLog) throws DataStorageManagerException, LogNotAvailableException {
LOGGER.log(Level.SEVERE, "dumpTableSpace dumpId:" + dumpId + " channel " + _channel + " fetchSize:" + fetchSize + ", includeLog:" + includeLog);
TableSpaceCheckpoint checkpoint;
List<DumpedLogEntry> txlogentries = new CopyOnWriteArrayList<>();
CommitLogListener logDumpReceiver = new CommitLogListener() {
@Override
public void logEntry(LogSequenceNumber logPos, LogEntry data) {
// we are going to capture all the changes to the tablespace during the dump, in order to replay
// eventually 'missed' changes during the dump
txlogentries.add(new DumpedLogEntry(logPos, data.serialize()));
LOGGER.log(Level.SEVERE, "dumping entry " + logPos + ", " + data + " nentries: " + txlogentries.size());
}
};
generalLock.writeLock().lock();
try {
if (includeLog) {
log.attachCommitLogListener(logDumpReceiver);
}
checkpoint = checkpoint(true, true);
/* Downgrade lock */
generalLock.readLock().lock();
} finally {
generalLock.writeLock().unlock();
}
try {
final int timeout = 60000;
Map<String, Object> startData = new HashMap<>();
startData.put("command", "start");
LogSequenceNumber logSequenceNumber = log.getLastSequenceNumber();
startData.put("ledgerid", logSequenceNumber.ledgerId);
startData.put("offset", logSequenceNumber.offset);
Message response_to_start = _channel.sendMessageWithReply(Message.TABLESPACE_DUMP_DATA(null, tableSpaceName, dumpId, startData), timeout);
if (response_to_start.type != Message.TYPE_ACK) {
LOGGER.log(Level.SEVERE, "error response at start command: " + response_to_start.parameters);
return;
}
if (includeLog) {
List<Transaction> transactionsSnapshot = new ArrayList<>();
dataStorageManager.loadTransactions(logSequenceNumber, tableSpaceUUID, transactionsSnapshot::add);
List<Transaction> batch = new ArrayList<>();
for (Transaction t : transactionsSnapshot) {
batch.add(t);
if (batch.size() == 10) {
sendTransactionsDump(batch, _channel, dumpId, timeout, response_to_start);
}
}
sendTransactionsDump(batch, _channel, dumpId, timeout, response_to_start);
}
for (Entry<String, LogSequenceNumber> entry : checkpoint.tablesCheckpoints.entrySet()) {
final AbstractTableManager tableManager = tables.get(entry.getKey());
final LogSequenceNumber sequenceNumber = entry.getValue();
if (tableManager.isSystemTable()) {
continue;
}
try {
FullTableScanConsumer sink = new SingleTableDumper(tableSpaceName, tableManager, _channel, dumpId, timeout, fetchSize);
tableManager.dump(sequenceNumber, sink);
} catch (DataStorageManagerException err) {
Map<String, Object> errorOnData = new HashMap<>();
errorOnData.put("command", "error");
_channel.sendMessageWithReply(Message.TABLESPACE_DUMP_DATA(null, tableSpaceName, dumpId, errorOnData), timeout);
LOGGER.log(Level.SEVERE, "error sending dump id " + dumpId, err);
return;
}
}
if (!txlogentries.isEmpty()) {
txlogentries.sort(Comparator.naturalOrder());
sendDumpedCommitLog(txlogentries, _channel, dumpId, timeout);
}
Map<String, Object> finishData = new HashMap<>();
LogSequenceNumber finishLogSequenceNumber = log.getLastSequenceNumber();
finishData.put("ledgerid", finishLogSequenceNumber.ledgerId);
finishData.put("offset", finishLogSequenceNumber.offset);
finishData.put("command", "finish");
_channel.sendOneWayMessage(Message.TABLESPACE_DUMP_DATA(null, tableSpaceName, dumpId, finishData), new SendResultCallback() {
@Override
public void messageSent(Message originalMessage, Throwable error) {
}
});
} catch (InterruptedException | TimeoutException error) {
LOGGER.log(Level.SEVERE, "error sending dump id " + dumpId);
} finally {
generalLock.readLock().unlock();
if (includeLog) {
log.removeCommitLogListener(logDumpReceiver);
}
for (Entry<String, LogSequenceNumber> entry : checkpoint.tablesCheckpoints.entrySet()) {
dataStorageManager.unPinTableCheckpoint(tableSpaceUUID, entry.getKey(), entry.getValue());
}
}
}
use of herddb.backup.DumpedLogEntry in project herddb by diennea.
the class TableSpaceManager method sendDumpedCommitLog.
private void sendDumpedCommitLog(List<DumpedLogEntry> txlogentries, Channel _channel, String dumpId, final int timeout) throws TimeoutException, InterruptedException {
Map<String, Object> data = new HashMap<>();
List<KeyValue> batch = new ArrayList<>();
for (DumpedLogEntry e : txlogentries) {
batch.add(new KeyValue(e.logSequenceNumber.serialize(), e.entryData));
}
data.put("command", "txlog");
data.put("records", batch);
_channel.sendMessageWithReply(Message.TABLESPACE_DUMP_DATA(null, tableSpaceName, dumpId, data), timeout);
}
use of herddb.backup.DumpedLogEntry in project herddb by diennea.
the class RoutedClientSideConnection method messageReceived.
@Override
@SuppressFBWarnings(value = "SF_SWITCH_NO_DEFAULT")
public void messageReceived(Message message, Channel _channel) {
switch(message.type) {
case Message.TYPE_TABLESPACE_DUMP_DATA:
{
String dumpId = (String) message.parameters.get("dumpId");
TableSpaceDumpReceiver receiver = dumpReceivers.get(dumpId);
LOGGER.log(Level.FINE, "receiver for {0}: {1}", new Object[] { dumpId, receiver });
if (receiver == null) {
if (_channel != null) {
_channel.sendReplyMessage(message, Message.ERROR(clientId, new Exception("no such dump receiver " + dumpId)));
}
return;
}
try {
Map<String, Object> values = (Map<String, Object>) message.parameters.get("values");
String command = (String) values.get("command") + "";
boolean sendAck = true;
switch(command) {
case "start":
{
long ledgerId = (long) values.get("ledgerid");
long offset = (long) values.get("offset");
receiver.start(new LogSequenceNumber(ledgerId, offset));
break;
}
case "beginTable":
{
byte[] tableDefinition = (byte[]) values.get("table");
Table table = Table.deserialize(tableDefinition);
Long estimatedSize = (Long) values.get("estimatedSize");
long dumpLedgerId = (Long) values.get("dumpLedgerid");
long dumpOffset = (Long) values.get("dumpOffset");
List<byte[]> indexesDef = (List<byte[]>) values.get("indexes");
List<Index> indexes = indexesDef.stream().map(Index::deserialize).collect(Collectors.toList());
Map<String, Object> stats = new HashMap<>();
stats.put("estimatedSize", estimatedSize);
stats.put("dumpLedgerId", dumpLedgerId);
stats.put("dumpOffset", dumpOffset);
receiver.beginTable(new DumpedTableMetadata(table, new LogSequenceNumber(dumpLedgerId, dumpOffset), indexes), stats);
break;
}
case "endTable":
{
receiver.endTable();
break;
}
case "finish":
{
long ledgerId = (long) values.get("ledgerid");
long offset = (long) values.get("offset");
receiver.finish(new LogSequenceNumber(ledgerId, offset));
sendAck = false;
break;
}
case "data":
{
List<KeyValue> data = (List<KeyValue>) values.get("records");
List<Record> records = new ArrayList<>(data.size());
for (KeyValue kv : data) {
records.add(new Record(new Bytes(kv.key), new Bytes(kv.value)));
}
receiver.receiveTableDataChunk(records);
break;
}
case "txlog":
{
List<KeyValue> data = (List<KeyValue>) values.get("records");
List<DumpedLogEntry> records = new ArrayList<>(data.size());
for (KeyValue kv : data) {
records.add(new DumpedLogEntry(LogSequenceNumber.deserialize(kv.key), kv.value));
}
receiver.receiveTransactionLogChunk(records);
break;
}
case "transactions":
{
String tableSpace = (String) values.get("tableSpace");
List<byte[]> data = (List<byte[]>) values.get("transactions");
List<Transaction> transactions = data.stream().map(array -> {
return Transaction.deserialize(tableSpace, array);
}).collect(Collectors.toList());
receiver.receiveTransactionsAtDump(transactions);
break;
}
default:
throw new DataStorageManagerException("invalid dump command:" + command);
}
if (_channel != null && sendAck) {
_channel.sendReplyMessage(message, Message.ACK(clientId));
}
} catch (DataStorageManagerException error) {
LOGGER.log(Level.SEVERE, "error while handling dump data", error);
if (_channel != null) {
_channel.sendReplyMessage(message, Message.ERROR(clientId, error));
}
}
}
break;
}
}
Aggregations