Search in sources :

Example 26 with Index

use of herddb.model.Index in project herddb by diennea.

the class UpgradeFrom050WithBrinIndexesTest method test.

private void test(String file) throws Exception {
    File dbdatadir = folder.newFolder("dbdata050_" + file);
    try (InputStream in = UpgradeFrom050WithBrinIndexesTest.class.getResourceAsStream(file)) {
        ZIPUtils.unZip(in, dbdatadir);
    }
    final Path dbdata = dbdatadir.toPath().resolve("dbdata");
    Path metadataPath = dbdata.resolve("metadata");
    Path dataPath = dbdata.resolve("data");
    Path logsPath = dbdata.resolve("txlog");
    Path tmoDir = dbdata.resolve("tmp");
    assertTrue(Files.isDirectory(metadataPath));
    assertTrue(Files.isDirectory(dataPath));
    assertTrue(Files.isDirectory(logsPath));
    Path nodeid = dataPath.resolve("nodeid");
    assertTrue(Files.isRegularFile(nodeid));
    String id = new String(Files.readAllBytes(nodeid), StandardCharsets.UTF_8);
    System.out.println("id:" + id);
    String expectedNodeId = "capra";
    assertTrue(id.endsWith("\n" + expectedNodeId));
    try (DBManager manager = new DBManager(expectedNodeId, new FileMetadataStorageManager(metadataPath), new FileDataStorageManager(dataPath), new FileCommitLogManager(logsPath), tmoDir, null)) {
        manager.start();
        final String tableSpace = "herd";
        final String tableName = "testtable";
        assertEquals(expectedNodeId, manager.getNodeId());
        assertTrue(manager.waitForTablespace(tableSpace, 10000));
        TableSpaceManager tableSpaceManager = manager.getTableSpaceManager(tableSpace);
        AbstractTableManager tableManager = tableSpaceManager.getTableManager(tableName);
        List<Index> indexes = tableManager.getAvailableIndexes();
        for (Index e : indexes) {
            System.out.println("INDEX: " + e);
            assertEquals(e.type, Index.TYPE_BRIN);
        }
        assertEquals(4, indexes.size());
        for (Column c : tableManager.getTable().getColumns()) {
            System.out.println("COLUMN :" + c);
        }
        {
            TranslatedQuery translated = manager.getPlanner().translate(tableSpace, "SELECT * FROM " + tableName + " ORDER BY pk,n1,n2", Collections.emptyList(), true, true, false, -1);
            ScanStatement scan = translated.plan.mainStatement.unwrap(ScanStatement.class);
            System.out.println("TABLE CONTENTS");
            try (DataScanner scan1 = manager.scan(scan, translated.context, TransactionContext.NO_TRANSACTION)) {
                for (DataAccessor r : scan1.consume()) {
                    System.out.println("RECORD " + r.toMap());
                }
            }
        }
        test(new TestCase("SELECT * FROM " + tableSpace + "." + tableName + " WHERE n1=1", SecondaryIndexSeek.class, 4), manager, tableSpace);
        // this could be SecondaryIndexSeek but we have more indexes and the planner is not so smart
        test(new TestCase("SELECT * FROM " + tableSpace + "." + tableName + " WHERE n2=3", SecondaryIndexPrefixScan.class, 2), manager, tableSpace);
        test(new TestCase("SELECT * FROM " + tableSpace + "." + tableName + " WHERE n2>=3", SecondaryIndexRangeScan.class, 3), manager, tableSpace);
        test(new TestCase("SELECT * FROM " + tableSpace + "." + tableName + " WHERE n1=1 and n2=3", SecondaryIndexPrefixScan.class, 1), manager, tableSpace);
    }
}
Also used : Path(java.nio.file.Path) TranslatedQuery(herddb.sql.TranslatedQuery) SecondaryIndexRangeScan(herddb.index.SecondaryIndexRangeScan) InputStream(java.io.InputStream) FileMetadataStorageManager(herddb.file.FileMetadataStorageManager) DataAccessor(herddb.utils.DataAccessor) Index(herddb.model.Index) FileCommitLogManager(herddb.file.FileCommitLogManager) DBManager(herddb.core.DBManager) DataScanner(herddb.model.DataScanner) SecondaryIndexSeek(herddb.index.SecondaryIndexSeek) AbstractTableManager(herddb.core.AbstractTableManager) Column(herddb.model.Column) FileDataStorageManager(herddb.file.FileDataStorageManager) TableSpaceManager(herddb.core.TableSpaceManager) SecondaryIndexPrefixScan(herddb.index.SecondaryIndexPrefixScan) File(java.io.File) ScanStatement(herddb.model.commands.ScanStatement)

Example 27 with Index

use of herddb.model.Index in project herddb by diennea.

the class FileDataStorageManager method loadIndexes.

@Override
public List<Index> loadIndexes(LogSequenceNumber sequenceNumber, String tableSpace) throws DataStorageManagerException {
    try {
        Path tableSpaceDirectory = getTablespaceDirectory(tableSpace);
        Files.createDirectories(tableSpaceDirectory);
        Path file = getTablespaceIndexesMetadataFile(tableSpace, sequenceNumber);
        LOGGER.log(Level.INFO, "loadIndexes for tableSpace " + tableSpace + " from " + file.toAbsolutePath().toString() + ", sequenceNumber:" + sequenceNumber);
        if (!Files.isRegularFile(file)) {
            if (sequenceNumber.isStartOfTime()) {
                LOGGER.log(Level.INFO, "file " + file.toAbsolutePath().toString() + " not found");
                return Collections.emptyList();
            } else {
                throw new DataStorageManagerException("local index data not available for tableSpace " + tableSpace + ", recovering from sequenceNumber " + sequenceNumber);
            }
        }
        try (InputStream input = new BufferedInputStream(Files.newInputStream(file, StandardOpenOption.READ), 4 * 1024 * 1024);
            ExtendedDataInputStream din = new ExtendedDataInputStream(input)) {
            // version
            long version = din.readVLong();
            // flags for future implementations
            long flags = din.readVLong();
            if (version != 1 || flags != 0) {
                throw new DataStorageManagerException("corrupted index list file " + file.toAbsolutePath());
            }
            String readname = din.readUTF();
            if (!readname.equals(tableSpace)) {
                throw new DataStorageManagerException("file " + file.toAbsolutePath() + " is not for spablespace " + tableSpace);
            }
            long ledgerId = din.readZLong();
            long offset = din.readZLong();
            if (ledgerId != sequenceNumber.ledgerId || offset != sequenceNumber.offset) {
                throw new DataStorageManagerException("file " + file.toAbsolutePath() + " is not for sequence number " + sequenceNumber);
            }
            int numTables = din.readInt();
            List<Index> res = new ArrayList<>();
            for (int i = 0; i < numTables; i++) {
                byte[] indexData = din.readArray();
                Index table = Index.deserialize(indexData);
                res.add(table);
            }
            return Collections.unmodifiableList(res);
        }
    } catch (IOException err) {
        throw new DataStorageManagerException(err);
    }
}
Also used : Path(java.nio.file.Path) DataStorageManagerException(herddb.storage.DataStorageManagerException) BufferedInputStream(java.io.BufferedInputStream) ODirectFileInputStream(herddb.utils.ODirectFileInputStream) ExtendedDataInputStream(herddb.utils.ExtendedDataInputStream) SimpleByteArrayInputStream(herddb.utils.SimpleByteArrayInputStream) InputStream(java.io.InputStream) ArrayList(java.util.ArrayList) Index(herddb.model.Index) BLinkKeyToPageIndex(herddb.index.blink.BLinkKeyToPageIndex) KeyToPageIndex(herddb.index.KeyToPageIndex) IOException(java.io.IOException) ExtendedDataInputStream(herddb.utils.ExtendedDataInputStream) BufferedInputStream(java.io.BufferedInputStream)

Example 28 with Index

use of herddb.model.Index in project herddb by diennea.

the class FileDataStorageManager method writeTables.

@Override
public Collection<PostCheckpointAction> writeTables(String tableSpace, LogSequenceNumber sequenceNumber, List<Table> tables, List<Index> indexlist, boolean prepareActions) throws DataStorageManagerException {
    if (sequenceNumber.isStartOfTime() && !tables.isEmpty()) {
        throw new DataStorageManagerException("impossible to write a non empty table list at start-of-time");
    }
    Path tableSpaceDirectory = getTablespaceDirectory(tableSpace);
    try {
        Files.createDirectories(tableSpaceDirectory);
        Path fileTables = getTablespaceTablesMetadataFile(tableSpace, sequenceNumber);
        Path fileIndexes = getTablespaceIndexesMetadataFile(tableSpace, sequenceNumber);
        Path parent = getParent(fileTables);
        Files.createDirectories(parent);
        LOGGER.log(Level.FINE, "writeTables for tableSpace " + tableSpace + " sequenceNumber " + sequenceNumber + " to " + fileTables.toAbsolutePath().toString());
        try (ManagedFile file = ManagedFile.open(fileTables, requirefsync);
            SimpleBufferedOutputStream buffer = new SimpleBufferedOutputStream(file.getOutputStream(), COPY_BUFFERS_SIZE);
            ExtendedDataOutputStream dout = new ExtendedDataOutputStream(buffer)) {
            // version
            dout.writeVLong(1);
            // flags for future implementations
            dout.writeVLong(0);
            dout.writeUTF(tableSpace);
            dout.writeZLong(sequenceNumber.ledgerId);
            dout.writeZLong(sequenceNumber.offset);
            dout.writeInt(tables.size());
            for (Table t : tables) {
                byte[] tableSerialized = t.serialize();
                dout.writeArray(tableSerialized);
            }
            dout.flush();
            file.sync();
        } catch (IOException err) {
            throw new DataStorageManagerException(err);
        }
        try (ManagedFile file = ManagedFile.open(fileIndexes, requirefsync);
            SimpleBufferedOutputStream buffer = new SimpleBufferedOutputStream(file.getOutputStream(), COPY_BUFFERS_SIZE);
            ExtendedDataOutputStream dout = new ExtendedDataOutputStream(buffer)) {
            // version
            dout.writeVLong(1);
            // flags for future implementations
            dout.writeVLong(0);
            dout.writeUTF(tableSpace);
            dout.writeZLong(sequenceNumber.ledgerId);
            dout.writeZLong(sequenceNumber.offset);
            if (indexlist != null) {
                dout.writeInt(indexlist.size());
                for (Index t : indexlist) {
                    byte[] indexSerialized = t.serialize();
                    dout.writeArray(indexSerialized);
                }
            } else {
                dout.writeInt(0);
            }
            dout.flush();
            file.sync();
        } catch (IOException err) {
            throw new DataStorageManagerException(err);
        }
    } catch (IOException err) {
        throw new DataStorageManagerException(err);
    }
    Collection<PostCheckpointAction> result = new ArrayList<>();
    if (prepareActions) {
        try (DirectoryStream<Path> stream = Files.newDirectoryStream(tableSpaceDirectory)) {
            for (Path p : stream) {
                if (isTablespaceIndexesMetadataFile(p)) {
                    try {
                        LogSequenceNumber logPositionInFile = readLogSequenceNumberFromIndexMetadataFile(tableSpace, p);
                        if (sequenceNumber.after(logPositionInFile)) {
                            LOGGER.log(Level.FINEST, "indexes metadata file " + p.toAbsolutePath() + ". will be deleted after checkpoint end");
                            result.add(new DeleteFileAction(tableSpace, "indexes", "delete indexesmetadata file " + p.toAbsolutePath(), p));
                        }
                    } catch (DataStorageManagerException ignore) {
                        LOGGER.log(Level.SEVERE, "Unparsable indexesmetadata file " + p.toAbsolutePath(), ignore);
                        result.add(new DeleteFileAction(tableSpace, "indexes", "delete unparsable indexesmetadata file " + p.toAbsolutePath(), p));
                    }
                } else if (isTablespaceTablesMetadataFile(p)) {
                    try {
                        LogSequenceNumber logPositionInFile = readLogSequenceNumberFromTablesMetadataFile(tableSpace, p);
                        if (sequenceNumber.after(logPositionInFile)) {
                            LOGGER.log(Level.FINEST, "tables metadata file " + p.toAbsolutePath() + ". will be deleted after checkpoint end");
                            result.add(new DeleteFileAction(tableSpace, "tables", "delete tablesmetadata file " + p.toAbsolutePath(), p));
                        }
                    } catch (DataStorageManagerException ignore) {
                        LOGGER.log(Level.SEVERE, "Unparsable tablesmetadata file " + p.toAbsolutePath(), ignore);
                        result.add(new DeleteFileAction(tableSpace, "transactions", "delete unparsable tablesmetadata file " + p.toAbsolutePath(), p));
                    }
                }
            }
        } catch (IOException err) {
            LOGGER.log(Level.SEVERE, "Could not list dir " + tableSpaceDirectory, err);
        }
    }
    return result;
}
Also used : Path(java.nio.file.Path) DataStorageManagerException(herddb.storage.DataStorageManagerException) Table(herddb.model.Table) ArrayList(java.util.ArrayList) LogSequenceNumber(herddb.log.LogSequenceNumber) Index(herddb.model.Index) BLinkKeyToPageIndex(herddb.index.blink.BLinkKeyToPageIndex) KeyToPageIndex(herddb.index.KeyToPageIndex) IOException(java.io.IOException) ManagedFile(herddb.utils.ManagedFile) ExtendedDataOutputStream(herddb.utils.ExtendedDataOutputStream) PostCheckpointAction(herddb.core.PostCheckpointAction) SimpleBufferedOutputStream(herddb.utils.SimpleBufferedOutputStream)

Example 29 with Index

use of herddb.model.Index in project herddb by diennea.

the class RoutedClientSideConnection method requestReceived.

@Override
@SuppressFBWarnings(value = "SF_SWITCH_NO_DEFAULT")
@SuppressWarnings("empty-statement")
public void requestReceived(Pdu message, Channel channel) {
    try {
        switch(message.type) {
            case Pdu.TYPE_TABLESPACE_DUMP_DATA:
                {
                    String dumpId = PduCodec.TablespaceDumpData.readDumpId(message);
                    TableSpaceDumpReceiver receiver = dumpReceivers.get(dumpId);
                    LOGGER.log(Level.FINE, "receiver for {0}: {1}", new Object[] { dumpId, receiver });
                    if (receiver == null) {
                        if (channel != null) {
                            ByteBuf resp = PduCodec.ErrorResponse.write(message.messageId, "no such dump receiver " + dumpId);
                            channel.sendReplyMessage(message.messageId, resp);
                        }
                        return;
                    }
                    try {
                        String command = PduCodec.TablespaceDumpData.readCommand(message);
                        boolean sendAck = true;
                        switch(command) {
                            case "start":
                                {
                                    long ledgerId = PduCodec.TablespaceDumpData.readLedgerId(message);
                                    long offset = PduCodec.TablespaceDumpData.readOffset(message);
                                    receiver.start(new LogSequenceNumber(ledgerId, offset));
                                    break;
                                }
                            case "beginTable":
                                {
                                    byte[] tableDefinition = PduCodec.TablespaceDumpData.readTableDefinition(message);
                                    Table table = Table.deserialize(tableDefinition);
                                    long estimatedSize = PduCodec.TablespaceDumpData.readEstimatedSize(message);
                                    long dumpLedgerId = PduCodec.TablespaceDumpData.readLedgerId(message);
                                    long dumpOffset = PduCodec.TablespaceDumpData.readOffset(message);
                                    List<byte[]> indexesDef = PduCodec.TablespaceDumpData.readIndexesDefinition(message);
                                    List<Index> indexes = indexesDef.stream().map(Index::deserialize).collect(Collectors.toList());
                                    Map<String, Object> stats = new HashMap<>();
                                    stats.put("estimatedSize", estimatedSize);
                                    stats.put("dumpLedgerId", dumpLedgerId);
                                    stats.put("dumpOffset", dumpOffset);
                                    receiver.beginTable(new DumpedTableMetadata(table, new LogSequenceNumber(dumpLedgerId, dumpOffset), indexes), stats);
                                    break;
                                }
                            case "endTable":
                                {
                                    receiver.endTable();
                                    break;
                                }
                            case "finish":
                                {
                                    long ledgerId = PduCodec.TablespaceDumpData.readLedgerId(message);
                                    long offset = PduCodec.TablespaceDumpData.readOffset(message);
                                    receiver.finish(new LogSequenceNumber(ledgerId, offset));
                                    sendAck = false;
                                    break;
                                }
                            case "data":
                                {
                                    List<Record> records = new ArrayList<>();
                                    PduCodec.TablespaceDumpData.readRecords(message, (key, value) -> {
                                        records.add(new Record(Bytes.from_array(key), Bytes.from_array(value)));
                                    });
                                    receiver.receiveTableDataChunk(records);
                                    break;
                                }
                            case "txlog":
                                {
                                    List<DumpedLogEntry> records = new ArrayList<>();
                                    PduCodec.TablespaceDumpData.readRecords(message, (key, value) -> {
                                        records.add(new DumpedLogEntry(LogSequenceNumber.deserialize(key), value));
                                    });
                                    receiver.receiveTransactionLogChunk(records);
                                    break;
                                }
                            case "transactions":
                                {
                                    List<Transaction> transactions = new ArrayList<>();
                                    PduCodec.TablespaceDumpData.readRecords(message, (key, value) -> {
                                        transactions.add(Transaction.deserialize(null, value));
                                    });
                                    receiver.receiveTransactionsAtDump(transactions);
                                    break;
                                }
                            default:
                                throw new DataStorageManagerException("invalid dump command:" + command);
                        }
                        if (channel != null && sendAck) {
                            ByteBuf res = PduCodec.AckResponse.write(message.messageId);
                            channel.sendReplyMessage(message.messageId, res);
                        }
                    } catch (RuntimeException error) {
                        LOGGER.log(Level.SEVERE, "error while handling dump data", error);
                        receiver.onError(error);
                        if (channel != null) {
                            ByteBuf res = PduCodec.ErrorResponse.write(message.messageId, error);
                            channel.sendReplyMessage(message.messageId, res);
                        }
                    }
                }
                break;
        }
    } finally {
        message.close();
    }
}
Also used : DataStorageManagerException(herddb.storage.DataStorageManagerException) Table(herddb.model.Table) DumpedLogEntry(herddb.backup.DumpedLogEntry) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ArrayList(java.util.ArrayList) LogSequenceNumber(herddb.log.LogSequenceNumber) Index(herddb.model.Index) RawString(herddb.utils.RawString) ByteBuf(io.netty.buffer.ByteBuf) DumpedTableMetadata(herddb.backup.DumpedTableMetadata) Transaction(herddb.model.Transaction) Record(herddb.model.Record) SuppressFBWarnings(edu.umd.cs.findbugs.annotations.SuppressFBWarnings)

Example 30 with Index

use of herddb.model.Index in project herddb by diennea.

the class BookKeeperDataStorageManager method writeTables.

@Override
public Collection<PostCheckpointAction> writeTables(String tableSpace, LogSequenceNumber sequenceNumber, List<Table> tables, List<Index> indexlist, boolean prepareActions) throws DataStorageManagerException {
    if (sequenceNumber.isStartOfTime() && !tables.isEmpty()) {
        throw new DataStorageManagerException("impossible to write a non empty table list at start-of-time");
    }
    // we need to flush current mappings, because here we are flushing
    // the status of all of the tables and indexes
    persistTableSpaceMapping(tableSpace);
    String tableSpaceDirectory = getTableSpaceZNode(tableSpace);
    String fileTables = getTablespaceTablesMetadataFile(tableSpace, sequenceNumber);
    String fileIndexes = getTablespaceIndexesMetadataFile(tableSpace, sequenceNumber);
    LOGGER.log(Level.FINE, "writeTables for tableSpace " + tableSpace + " sequenceNumber " + sequenceNumber + " to " + fileTables);
    try (VisibleByteArrayOutputStream buffer = new VisibleByteArrayOutputStream();
        ExtendedDataOutputStream dout = new ExtendedDataOutputStream(buffer)) {
        // version
        dout.writeVLong(1);
        // flags for future implementations
        dout.writeVLong(0);
        dout.writeUTF(tableSpace);
        dout.writeZLong(sequenceNumber.ledgerId);
        dout.writeZLong(sequenceNumber.offset);
        dout.writeInt(tables.size());
        for (Table t : tables) {
            byte[] tableSerialized = t.serialize();
            dout.writeArray(tableSerialized);
        }
        dout.flush();
        writeZNodeEnforceOwnership(tableSpace, fileTables, buffer.toByteArray(), null);
    } catch (IOException err) {
        throw new DataStorageManagerException(err);
    }
    try (VisibleByteArrayOutputStream buffer = new VisibleByteArrayOutputStream();
        ExtendedDataOutputStream dout = new ExtendedDataOutputStream(buffer)) {
        // version
        dout.writeVLong(1);
        // flags for future implementations
        dout.writeVLong(0);
        dout.writeUTF(tableSpace);
        dout.writeZLong(sequenceNumber.ledgerId);
        dout.writeZLong(sequenceNumber.offset);
        if (indexlist != null) {
            dout.writeInt(indexlist.size());
            for (Index t : indexlist) {
                byte[] indexSerialized = t.serialize();
                dout.writeArray(indexSerialized);
            }
        } else {
            dout.writeInt(0);
        }
        dout.flush();
        writeZNodeEnforceOwnership(tableSpace, fileIndexes, buffer.toByteArray(), null);
    } catch (IOException err) {
        throw new DataStorageManagerException(err);
    }
    Collection<PostCheckpointAction> result = new ArrayList<>();
    if (prepareActions) {
        List<String> stream = zkGetChildren(tableSpaceDirectory);
        for (String p : stream) {
            if (isTablespaceIndexesMetadataFile(p)) {
                try {
                    byte[] content = readZNode(p, new Stat());
                    if (content != null) {
                        LogSequenceNumber logPositionInFile = readLogSequenceNumberFromIndexMetadataFile(tableSpace, content, p);
                        if (sequenceNumber.after(logPositionInFile)) {
                            LOGGER.log(Level.FINEST, "indexes metadata file " + p + ". will be deleted after checkpoint end");
                            result.add(new DeleteZNodeAction(tableSpace, "indexes", "delete indexesmetadata file " + p, p));
                        }
                    }
                } catch (DataStorageManagerException ignore) {
                    LOGGER.log(Level.SEVERE, "Unparsable indexesmetadata file " + p, ignore);
                    result.add(new DeleteZNodeAction(tableSpace, "indexes", "delete unparsable indexesmetadata file " + p, p));
                }
            } else if (isTablespaceTablesMetadataFile(p)) {
                try {
                    byte[] content = readZNode(p, new Stat());
                    if (content != null) {
                        LogSequenceNumber logPositionInFile = readLogSequenceNumberFromTablesMetadataFile(tableSpace, content, p);
                        if (sequenceNumber.after(logPositionInFile)) {
                            LOGGER.log(Level.FINEST, "tables metadata file " + p + ". will be deleted after checkpoint end");
                            result.add(new DeleteZNodeAction(tableSpace, "tables", "delete tablesmetadata file " + p, p));
                        }
                    }
                } catch (DataStorageManagerException ignore) {
                    LOGGER.log(Level.SEVERE, "Unparsable tablesmetadata file " + p, ignore);
                    result.add(new DeleteZNodeAction(tableSpace, "transactions", "delete unparsable tablesmetadata file " + p, p));
                }
            }
        }
    }
    return result;
}
Also used : DataStorageManagerException(herddb.storage.DataStorageManagerException) Table(herddb.model.Table) ArrayList(java.util.ArrayList) VisibleByteArrayOutputStream(herddb.utils.VisibleByteArrayOutputStream) LogSequenceNumber(herddb.log.LogSequenceNumber) Index(herddb.model.Index) BLinkKeyToPageIndex(herddb.index.blink.BLinkKeyToPageIndex) KeyToPageIndex(herddb.index.KeyToPageIndex) IOException(java.io.IOException) ExtendedDataOutputStream(herddb.utils.ExtendedDataOutputStream) PostCheckpointAction(herddb.core.PostCheckpointAction) Stat(org.apache.zookeeper.data.Stat)

Aggregations

Index (herddb.model.Index)101 Table (herddb.model.Table)92 CreateIndexStatement (herddb.model.commands.CreateIndexStatement)71 CreateTableStatement (herddb.model.commands.CreateTableStatement)71 DataScanner (herddb.model.DataScanner)67 ScanStatement (herddb.model.commands.ScanStatement)66 TranslatedQuery (herddb.sql.TranslatedQuery)61 Test (org.junit.Test)61 CreateTableSpaceStatement (herddb.model.commands.CreateTableSpaceStatement)52 SecondaryIndexSeek (herddb.index.SecondaryIndexSeek)51 MemoryCommitLogManager (herddb.mem.MemoryCommitLogManager)39 MemoryDataStorageManager (herddb.mem.MemoryDataStorageManager)39 MemoryMetadataStorageManager (herddb.mem.MemoryMetadataStorageManager)39 InsertStatement (herddb.model.commands.InsertStatement)31 DBManager (herddb.core.DBManager)24 GetStatement (herddb.model.commands.GetStatement)23 GetResult (herddb.model.GetResult)22 TransactionContext (herddb.model.TransactionContext)22 Bytes (herddb.utils.Bytes)22 ServerConfiguration (herddb.server.ServerConfiguration)21