Search in sources :

Example 1 with FullTableScanConsumer

use of herddb.storage.FullTableScanConsumer in project herddb by diennea.

the class TableManager method start.

@Override
public void start() throws DataStorageManagerException {
    Map<Long, DataPageMetaData> activePagesAtBoot = new HashMap<>();
    bootSequenceNumber = LogSequenceNumber.START_OF_TIME;
    boolean requireLoadAtStartup = keyToPage.requireLoadAtStartup();
    if (requireLoadAtStartup) {
        // non persistent primary key index, we need a full table scan
        LOGGER.log(Level.SEVERE, "loading in memory all the keys for table {0}", new Object[] { table.name });
        dataStorageManager.fullTableScan(tableSpaceUUID, table.uuid, new FullTableScanConsumer() {

            Long currentPage;

            @Override
            public void acceptTableStatus(TableStatus tableStatus) {
                LOGGER.log(Level.SEVERE, "recovery table at " + tableStatus.sequenceNumber);
                nextPrimaryKeyValue.set(Bytes.toLong(tableStatus.nextPrimaryKeyValue, 0));
                nextPageId = tableStatus.nextPageId;
                bootSequenceNumber = tableStatus.sequenceNumber;
                activePagesAtBoot.putAll(tableStatus.activePages);
            }

            @Override
            public void startPage(long pageId) {
                currentPage = pageId;
            }

            @Override
            public void acceptRecord(Record record) {
                if (currentPage < 0) {
                    throw new IllegalStateException();
                }
                keyToPage.put(record.key, currentPage);
            }

            @Override
            public void endPage() {
                currentPage = null;
            }

            @Override
            public void endTable() {
            }
        });
    } else {
        LOGGER.log(Level.SEVERE, "loading table {0}, uuid {1}", new Object[] { table.name, table.uuid });
        TableStatus tableStatus = dataStorageManager.getLatestTableStatus(tableSpaceUUID, table.uuid);
        LOGGER.log(Level.SEVERE, "recovery table at " + tableStatus.sequenceNumber);
        nextPrimaryKeyValue.set(Bytes.toLong(tableStatus.nextPrimaryKeyValue, 0));
        nextPageId = tableStatus.nextPageId;
        bootSequenceNumber = tableStatus.sequenceNumber;
        activePagesAtBoot.putAll(tableStatus.activePages);
    }
    keyToPage.start(bootSequenceNumber);
    dataStorageManager.cleanupAfterBoot(tableSpaceUUID, table.uuid, activePagesAtBoot.keySet());
    pageSet.setActivePagesAtBoot(activePagesAtBoot);
    initNewPage();
    LOGGER.log(Level.SEVERE, "loaded {0} keys for table {1}, newPageId {2}, nextPrimaryKeyValue {3}, activePages {4}", new Object[] { keyToPage.size(), table.name, nextPageId, nextPrimaryKeyValue.get(), pageSet.getActivePages() + "" });
    started = true;
}
Also used : DataPageMetaData(herddb.core.PageSet.DataPageMetaData) FullTableScanConsumer(herddb.storage.FullTableScanConsumer) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) AtomicLong(java.util.concurrent.atomic.AtomicLong) TableStatus(herddb.storage.TableStatus) Record(herddb.model.Record)

Example 2 with FullTableScanConsumer

use of herddb.storage.FullTableScanConsumer in project herddb by diennea.

the class TableSpaceManager method dumpTableSpace.

void dumpTableSpace(String dumpId, Channel _channel, int fetchSize, boolean includeLog) throws DataStorageManagerException, LogNotAvailableException {
    LOGGER.log(Level.SEVERE, "dumpTableSpace dumpId:" + dumpId + " channel " + _channel + " fetchSize:" + fetchSize + ", includeLog:" + includeLog);
    TableSpaceCheckpoint checkpoint;
    List<DumpedLogEntry> txlogentries = new CopyOnWriteArrayList<>();
    CommitLogListener logDumpReceiver = new CommitLogListener() {

        @Override
        public void logEntry(LogSequenceNumber logPos, LogEntry data) {
            // we are going to capture all the changes to the tablespace during the dump, in order to replay
            // eventually 'missed' changes during the dump
            txlogentries.add(new DumpedLogEntry(logPos, data.serialize()));
            LOGGER.log(Level.SEVERE, "dumping entry " + logPos + ", " + data + " nentries: " + txlogentries.size());
        }
    };
    generalLock.writeLock().lock();
    try {
        if (includeLog) {
            log.attachCommitLogListener(logDumpReceiver);
        }
        checkpoint = checkpoint(true, true);
        /* Downgrade lock */
        generalLock.readLock().lock();
    } finally {
        generalLock.writeLock().unlock();
    }
    try {
        final int timeout = 60000;
        Map<String, Object> startData = new HashMap<>();
        startData.put("command", "start");
        LogSequenceNumber logSequenceNumber = log.getLastSequenceNumber();
        startData.put("ledgerid", logSequenceNumber.ledgerId);
        startData.put("offset", logSequenceNumber.offset);
        Message response_to_start = _channel.sendMessageWithReply(Message.TABLESPACE_DUMP_DATA(null, tableSpaceName, dumpId, startData), timeout);
        if (response_to_start.type != Message.TYPE_ACK) {
            LOGGER.log(Level.SEVERE, "error response at start command: " + response_to_start.parameters);
            return;
        }
        if (includeLog) {
            List<Transaction> transactionsSnapshot = new ArrayList<>();
            dataStorageManager.loadTransactions(logSequenceNumber, tableSpaceUUID, transactionsSnapshot::add);
            List<Transaction> batch = new ArrayList<>();
            for (Transaction t : transactionsSnapshot) {
                batch.add(t);
                if (batch.size() == 10) {
                    sendTransactionsDump(batch, _channel, dumpId, timeout, response_to_start);
                }
            }
            sendTransactionsDump(batch, _channel, dumpId, timeout, response_to_start);
        }
        for (Entry<String, LogSequenceNumber> entry : checkpoint.tablesCheckpoints.entrySet()) {
            final AbstractTableManager tableManager = tables.get(entry.getKey());
            final LogSequenceNumber sequenceNumber = entry.getValue();
            if (tableManager.isSystemTable()) {
                continue;
            }
            try {
                FullTableScanConsumer sink = new SingleTableDumper(tableSpaceName, tableManager, _channel, dumpId, timeout, fetchSize);
                tableManager.dump(sequenceNumber, sink);
            } catch (DataStorageManagerException err) {
                Map<String, Object> errorOnData = new HashMap<>();
                errorOnData.put("command", "error");
                _channel.sendMessageWithReply(Message.TABLESPACE_DUMP_DATA(null, tableSpaceName, dumpId, errorOnData), timeout);
                LOGGER.log(Level.SEVERE, "error sending dump id " + dumpId, err);
                return;
            }
        }
        if (!txlogentries.isEmpty()) {
            txlogentries.sort(Comparator.naturalOrder());
            sendDumpedCommitLog(txlogentries, _channel, dumpId, timeout);
        }
        Map<String, Object> finishData = new HashMap<>();
        LogSequenceNumber finishLogSequenceNumber = log.getLastSequenceNumber();
        finishData.put("ledgerid", finishLogSequenceNumber.ledgerId);
        finishData.put("offset", finishLogSequenceNumber.offset);
        finishData.put("command", "finish");
        _channel.sendOneWayMessage(Message.TABLESPACE_DUMP_DATA(null, tableSpaceName, dumpId, finishData), new SendResultCallback() {

            @Override
            public void messageSent(Message originalMessage, Throwable error) {
            }
        });
    } catch (InterruptedException | TimeoutException error) {
        LOGGER.log(Level.SEVERE, "error sending dump id " + dumpId);
    } finally {
        generalLock.readLock().unlock();
        if (includeLog) {
            log.removeCommitLogListener(logDumpReceiver);
        }
        for (Entry<String, LogSequenceNumber> entry : checkpoint.tablesCheckpoints.entrySet()) {
            dataStorageManager.unPinTableCheckpoint(tableSpaceUUID, entry.getKey(), entry.getValue());
        }
    }
}
Also used : DataStorageManagerException(herddb.storage.DataStorageManagerException) DumpedLogEntry(herddb.backup.DumpedLogEntry) Message(herddb.network.Message) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) FullTableScanConsumer(herddb.storage.FullTableScanConsumer) SendResultCallback(herddb.network.SendResultCallback) LogEntry(herddb.log.LogEntry) DumpedLogEntry(herddb.backup.DumpedLogEntry) TimeoutException(java.util.concurrent.TimeoutException) CommitLogListener(herddb.log.CommitLogListener) LogSequenceNumber(herddb.log.LogSequenceNumber) TableCheckpoint(herddb.core.AbstractTableManager.TableCheckpoint) Transaction(herddb.model.Transaction) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList)

Example 3 with FullTableScanConsumer

use of herddb.storage.FullTableScanConsumer in project herddb by diennea.

the class SimpleClusterTest method test.

@Test
public void test() throws Exception {
    {
        Record record = new Record(Bytes.from_string("key1"), Bytes.from_string("0"));
        InsertStatement st = new InsertStatement(tableSpace, tableName, record);
        assertEquals(1, manager.executeUpdate(st, StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION).getUpdateCount());
    }
    assertEquals(0, dataStorageManager.getActualNumberOfPages(tableSpaceUUID, tableName));
    manager.checkpoint();
    String tableUuid = manager.getTableSpaceManager(tableSpace).getTableManager(tableName).getTable().uuid;
    assertNotNull(dataStorageManager.readPage(tableSpaceUUID, tableUuid, 1L));
    assertEquals(1, dataStorageManager.getActualNumberOfPages(tableSpaceUUID, tableUuid));
    {
        GetResult result = manager.get(new GetStatement(tableSpace, tableName, Bytes.from_string("key1"), null, false), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
        assertTrue(result.found());
    }
    manager.checkpoint();
    assertEquals(1, dataStorageManager.getActualNumberOfPages(tableSpaceUUID, tableUuid));
    {
        Record record = new Record(Bytes.from_string("key1"), Bytes.from_string("5"));
        UpdateStatement st = new UpdateStatement(tableSpace, tableName, record, null);
        assertEquals(1, manager.executeUpdate(st, StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION).getUpdateCount());
    }
    // a new page must be allocated
    manager.checkpoint();
    assertEquals(1, dataStorageManager.getActualNumberOfPages(tableSpaceUUID, tableUuid));
    {
        Record record = new Record(Bytes.from_string("key1"), Bytes.from_string("6"));
        UpdateStatement st = new UpdateStatement(tableSpace, tableName, record, null);
        assertEquals(1, manager.executeUpdate(st, StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION).getUpdateCount());
    }
    {
        Record record = new Record(Bytes.from_string("key1"), Bytes.from_string("7"));
        UpdateStatement st = new UpdateStatement(tableSpace, tableName, record, null);
        assertEquals(1, manager.executeUpdate(st, StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION).getUpdateCount());
    }
    // only a new page must be allocated, not two more
    manager.checkpoint();
    assertEquals(1, dataStorageManager.getActualNumberOfPages(tableSpaceUUID, tableUuid));
    {
        DeleteStatement st = new DeleteStatement(tableSpace, tableName, Bytes.from_string("key1"), null);
        assertEquals(1, manager.executeUpdate(st, StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION).getUpdateCount());
        GetResult result = manager.get(new GetStatement(tableSpace, tableName, Bytes.from_string("key1"), null, false), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
        assertFalse(result.found());
    }
    // a delete does not trigger new pages in this case
    manager.checkpoint();
    assertEquals(0, dataStorageManager.getActualNumberOfPages(tableSpaceUUID, tableUuid));
    {
        assertEquals(1, manager.executeUpdate(new InsertStatement(tableSpace, tableName, new Record(Bytes.from_string("key2"), Bytes.from_string("50"))), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION).getUpdateCount());
        assertEquals(1, manager.executeUpdate(new InsertStatement(tableSpace, tableName, new Record(Bytes.from_string("key3"), Bytes.from_string("60"))), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION).getUpdateCount());
    }
    manager.checkpoint();
    assertEquals(1, dataStorageManager.getActualNumberOfPages(tableSpaceUUID, tableUuid));
    {
        DeleteStatement st = new DeleteStatement(tableSpace, tableName, Bytes.from_string("key2"), null);
        assertEquals(1, manager.executeUpdate(st, StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION).getUpdateCount());
    }
    // a new page, containg the key3 record is needed
    manager.checkpoint();
    assertEquals(1, dataStorageManager.getActualNumberOfPages(tableSpaceUUID, tableUuid));
    Holder<TableStatus> _tableStatus = new Holder<>();
    dataStorageManager.fullTableScan(tableSpaceUUID, tableUuid, new FullTableScanConsumer() {

        @Override
        public void acceptTableStatus(TableStatus tableStatus) {
            _tableStatus.value = tableStatus;
        }

        @Override
        public void startPage(long pageId) {
        }

        @Override
        public void acceptRecord(Record record) {
        }

        @Override
        public void endPage() {
        }

        @Override
        public void endTable() {
        }
    });
    for (long pageId : _tableStatus.value.activePages.keySet()) {
        List<Record> records = dataStorageManager.readPage(tableSpaceUUID, tableUuid, pageId);
        System.out.println("PAGE #" + pageId + " records :" + records);
    }
    assertEquals(1, _tableStatus.value.activePages.size());
}
Also used : UpdateStatement(herddb.model.commands.UpdateStatement) GetResult(herddb.model.GetResult) Holder(herddb.utils.Holder) DeleteStatement(herddb.model.commands.DeleteStatement) InsertStatement(herddb.model.commands.InsertStatement) FullTableScanConsumer(herddb.storage.FullTableScanConsumer) GetStatement(herddb.model.commands.GetStatement) TableStatus(herddb.storage.TableStatus) Record(herddb.model.Record) Test(org.junit.Test)

Aggregations

FullTableScanConsumer (herddb.storage.FullTableScanConsumer)3 Record (herddb.model.Record)2 TableStatus (herddb.storage.TableStatus)2 HashMap (java.util.HashMap)2 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2 DumpedLogEntry (herddb.backup.DumpedLogEntry)1 TableCheckpoint (herddb.core.AbstractTableManager.TableCheckpoint)1 DataPageMetaData (herddb.core.PageSet.DataPageMetaData)1 CommitLogListener (herddb.log.CommitLogListener)1 LogEntry (herddb.log.LogEntry)1 LogSequenceNumber (herddb.log.LogSequenceNumber)1 GetResult (herddb.model.GetResult)1 Transaction (herddb.model.Transaction)1 DeleteStatement (herddb.model.commands.DeleteStatement)1 GetStatement (herddb.model.commands.GetStatement)1 InsertStatement (herddb.model.commands.InsertStatement)1 UpdateStatement (herddb.model.commands.UpdateStatement)1 Message (herddb.network.Message)1 SendResultCallback (herddb.network.SendResultCallback)1 DataStorageManagerException (herddb.storage.DataStorageManagerException)1