use of herddb.log.LogSequenceNumber in project herddb by diennea.
the class MemoryDataStorageManager method indexCheckpoint.
@Override
public List<PostCheckpointAction> indexCheckpoint(String tableSpace, String indexName, IndexStatus indexStatus, boolean pin) throws DataStorageManagerException {
/* Checkpoint pinning */
final Map<Long, Integer> pins = pinIndexAndGetPages(tableSpace, indexName, indexStatus, pin);
final Set<LogSequenceNumber> checkpoints = pinIndexAndGetCheckpoints(tableSpace, indexName, indexStatus, pin);
List<Long> pagesForIndex = new ArrayList<>();
String prefix = tableSpace + "." + indexName + "_";
for (String key : indexpages.keySet()) {
if (key.startsWith(prefix)) {
long pageId = Long.parseLong(key.substring(prefix.length()));
if (!pins.containsKey(pageId)) {
pagesForIndex.add(pageId);
}
}
}
pagesForIndex.removeAll(indexStatus.activePages);
List<PostCheckpointAction> result = new ArrayList<>();
for (long pageId : pagesForIndex) {
result.add(new PostCheckpointAction(indexName, "drop page " + pageId) {
@Override
public void run() {
// remove only after checkpoint completed
indexpages.remove(prefix + pageId);
}
});
}
for (String oldStatus : indexStatuses.keySet()) {
if (oldStatus.startsWith(prefix)) {
/* Check for checkpoint skip only if match expected structure */
final LogSequenceNumber log = evaluateLogSequenceNumber(prefix.substring(0, prefix.length()));
if (log != null) {
/* If is pinned skip this status*/
if (checkpoints.contains(log)) {
continue;
}
}
result.add(new PostCheckpointAction(indexName, "drop index checkpoint " + oldStatus) {
@Override
public void run() {
// remove only after checkpoint completed
indexStatuses.remove(oldStatus);
}
});
}
}
VisibleByteArrayOutputStream oo = new VisibleByteArrayOutputStream(1024);
try (ExtendedDataOutputStream dataOutputKeys = new ExtendedDataOutputStream(oo)) {
indexStatus.serialize(dataOutputKeys);
dataOutputKeys.flush();
oo.write(oo.xxhash64());
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
/* Uses a copy to limit byte[] size at the min needed */
indexStatuses.put(checkpointName(tableSpace, indexName, indexStatus.sequenceNumber), oo.toByteArray());
return result;
}
use of herddb.log.LogSequenceNumber in project herddb by diennea.
the class MemoryDataStorageManager method tableCheckpoint.
@Override
public List<PostCheckpointAction> tableCheckpoint(String tableSpace, String tableName, TableStatus tableStatus, boolean pin) throws DataStorageManagerException {
/* Checkpoint pinning */
final Map<Long, Integer> pins = pinTableAndGetPages(tableSpace, tableName, tableStatus, pin);
final Set<LogSequenceNumber> checkpoints = pinTableAndGetCheckpoints(tableSpace, tableName, tableStatus, pin);
List<Long> pagesForTable = new ArrayList<>();
String prefix = tableSpace + "." + tableName + "_";
for (String key : pages.keySet()) {
if (key.startsWith(prefix)) {
long pageId = Long.parseLong(key.substring(prefix.length()));
if (!pins.containsKey(pageId)) {
pagesForTable.add(pageId);
}
}
}
pagesForTable.removeAll(tableStatus.activePages.keySet());
List<PostCheckpointAction> result = new ArrayList<>();
for (long pageId : pagesForTable) {
result.add(new PostCheckpointAction(tableName, "drop page " + pageId) {
@Override
public void run() {
// remove only after checkpoint completed
pages.remove(prefix + pageId);
LOGGER.log(Level.SEVERE, "removing " + (prefix + pageId));
}
});
}
for (String oldStatus : tableStatuses.keySet()) {
if (oldStatus.startsWith(prefix)) {
/* Check for checkpoint skip only if match expected structure */
final LogSequenceNumber log = evaluateLogSequenceNumber(prefix.substring(0, prefix.length()));
if (log != null) {
/* If is pinned skip this status*/
if (checkpoints.contains(log)) {
continue;
}
}
result.add(new PostCheckpointAction(tableName, "drop table checkpoint " + oldStatus) {
@Override
public void run() {
// remove only after checkpoint completed
tableStatuses.remove(oldStatus);
}
});
}
}
VisibleByteArrayOutputStream oo = new VisibleByteArrayOutputStream(1024);
try (ExtendedDataOutputStream dataOutputKeys = new ExtendedDataOutputStream(oo)) {
tableStatus.serialize(dataOutputKeys);
dataOutputKeys.flush();
oo.write(oo.xxhash64());
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
/* Uses a copy to limit byte[] size at the min needed */
tableStatuses.put(checkpointName(tableSpace, tableName, tableStatus.sequenceNumber), oo.toByteArray());
return result;
}
use of herddb.log.LogSequenceNumber in project herddb by diennea.
the class ServerSideConnectionPeer method handleRequestTableRestore.
private void handleRequestTableRestore(Message message, Channel _channel) {
try {
String tableSpace = (String) message.parameters.get("tableSpace");
byte[] table = (byte[]) message.parameters.get("table");
long dumpLedgerId = (long) message.parameters.get("dumpLedgerId");
long dumpOffset = (long) message.parameters.get("dumpOffset");
Table tableSchema = Table.deserialize(table);
tableSchema = Table.builder().cloning(tableSchema).tablespace(tableSpace).build();
server.getManager().getTableSpaceManager(tableSpace).beginRestoreTable(tableSchema.serialize(), new LogSequenceNumber(dumpLedgerId, dumpOffset));
_channel.sendReplyMessage(message, Message.ACK(null));
} catch (StatementExecutionException err) {
Message error = Message.ERROR(null, err);
if (err instanceof NotLeaderException) {
error.setParameter("notLeader", "true");
}
_channel.sendReplyMessage(message, error);
}
}
use of herddb.log.LogSequenceNumber in project herddb by diennea.
the class TableStatus method deserialize.
public static TableStatus deserialize(ExtendedDataInputStream in) throws IOException {
// version
long version = in.readVLong();
// flags for future implementations
long flags = in.readVLong();
if (version != 1 || flags != 0) {
throw new DataStorageManagerException("corrupted table status");
}
String tableName = in.readUTF();
long ledgerId = in.readLong();
long offset = in.readLong();
long nextPageId = in.readLong();
byte[] nextPrimaryKeyValue = in.readArray();
int numActivePages = in.readVInt();
Map<Long, DataPageMetaData> activePages = new HashMap<>(numActivePages);
for (int i = 0; i < numActivePages; i++) {
activePages.put(in.readVLong(), DataPageMetaData.deserialize(in));
}
return new TableStatus(tableName, new LogSequenceNumber(ledgerId, offset), nextPrimaryKeyValue, nextPageId, activePages);
}
use of herddb.log.LogSequenceNumber in project herddb by diennea.
the class DownloadTableSpaceTest method downloadTablespaceTest.
@Test
public void downloadTablespaceTest() throws Exception {
ServerConfiguration serverconfig_1 = new ServerConfiguration(folder.newFolder().toPath());
serverconfig_1.set(ServerConfiguration.PROPERTY_NODEID, "server1");
serverconfig_1.set(ServerConfiguration.PROPERTY_PORT, 7867);
serverconfig_1.set(ServerConfiguration.PROPERTY_MODE, ServerConfiguration.PROPERTY_MODE_CLUSTER);
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_ADDRESS, testEnv.getAddress());
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_PATH, testEnv.getPath());
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_SESSIONTIMEOUT, testEnv.getTimeout());
try (Server server_1 = new Server(serverconfig_1)) {
server_1.start();
server_1.waitForStandaloneBoot();
Table table = Table.builder().name("t1").column("c", ColumnTypes.INTEGER).primaryKey("c").build();
server_1.getManager().executeStatement(new CreateTableStatement(table), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
for (int i = 0; i < 1000; i++) {
server_1.getManager().executeUpdate(new InsertStatement(TableSpace.DEFAULT, "t1", RecordSerializer.makeRecord(table, "c", i)), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
}
List<Map<String, Object>> logical_data = new ArrayList<>();
AtomicBoolean start = new AtomicBoolean();
try (HDBClient client = new HDBClient(new ClientConfiguration(folder.newFolder().toPath()));
HDBConnection con = client.openConnection()) {
client.setClientSideMetadataProvider(new ZookeeperClientSideMetadataProvider(testEnv.getAddress(), testEnv.getTimeout(), testEnv.getPath()));
CountDownLatch count = new CountDownLatch(1);
con.dumpTableSpace(TableSpace.DEFAULT, new TableSpaceDumpReceiver() {
Table table;
@Override
public void start(LogSequenceNumber logSequenceNumber) throws DataStorageManagerException {
System.out.println("start at " + logSequenceNumber);
start.set(true);
}
@Override
public void finish(LogSequenceNumber logSequenceNumber) {
System.out.println("finish!");
count.countDown();
}
@Override
public void endTable() {
System.out.println("endTable");
table = null;
}
@Override
public void receiveTableDataChunk(List<Record> records) {
// System.out.println("receiveTableDataChunk " + records);
for (Record r : records) {
Map<String, Object> bean = r.toBean(table);
// System.out.println("received:" + bean);
logical_data.add(bean);
}
}
@Override
public void beginTable(DumpedTableMetadata table, Map<String, Object> stats) {
System.out.println("beginTable " + table);
this.table = table.table;
}
}, 89, false);
assertTrue(count.await(20, TimeUnit.SECONDS));
assertEquals(1000, logical_data.size());
assertTrue(start.get());
}
}
}
Aggregations