use of herddb.core.stats.TableManagerStats in project herddb by diennea.
the class ScanDuringCheckPointTest method bigTableScan.
@Test
public // @Ignore
void bigTableScan() throws Exception {
int testSize = 10000;
String nodeId = "localhost";
try (DBManager manager = new DBManager("localhost", new MemoryMetadataStorageManager(), new MemoryDataStorageManager(), new MemoryCommitLogManager(), null, null)) {
// manager.setMaxLogicalPageSize(10);
// manager.setMaxPagesUsedMemory(Long.MAX_VALUE);
manager.start();
CreateTableSpaceStatement st1 = new CreateTableSpaceStatement("tblspace1", Collections.singleton(nodeId), nodeId, 1, 0, 0);
manager.executeStatement(st1, StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
manager.waitForTablespace("tblspace1", 10000);
String tableSpaceUUID = manager.getTableSpaceManager("tblspace1").getTableSpaceUUID();
Table table = Table.builder().tablespace("tblspace1").name("t1").column("id", ColumnTypes.STRING).column("name", ColumnTypes.STRING).primaryKey("id").build();
CreateTableStatement st2 = new CreateTableStatement(table);
manager.executeStatement(st2, StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
for (int i = 0; i < testSize / 2; i++) {
InsertStatement insert = new InsertStatement(table.tablespace, table.name, RecordSerializer.makeRecord(table, "id", "k" + i, "name", RandomString.getInstance().nextString(50, new StringBuilder().append("testname").append(i).append("_")).toString()));
assertEquals(1, manager.executeUpdate(insert, StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION).getUpdateCount());
}
manager.checkpoint();
for (int i = testSize / 2; i < testSize; i++) {
InsertStatement insert = new InsertStatement(table.tablespace, table.name, RecordSerializer.makeRecord(table, "id", "k" + i, "name", RandomString.getInstance().nextString(50, new StringBuilder().append("testname").append(i).append("_")).toString()));
assertEquals(1, manager.executeUpdate(insert, StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION).getUpdateCount());
}
manager.checkpoint();
assertTrue(manager.getDataStorageManager().getActualNumberOfPages(tableSpaceUUID, table.uuid) > 1);
TableManagerStats stats = manager.getTableSpaceManager(table.tablespace).getTableManager(table.name).getStats();
assertEquals(testSize, stats.getTablesize());
assertTrue(testSize > 100);
ExecutorService service = Executors.newFixedThreadPool(1);
Runnable checkPointPerformer = new Runnable() {
@Override
public void run() {
CountDownLatch checkpointDone = new CountDownLatch(1);
Thread fakeCheckpointThread = new Thread(new Runnable() {
@Override
public void run() {
try {
manager.checkpoint();
} catch (Throwable t) {
t.printStackTrace();
fail();
}
checkpointDone.countDown();
}
});
fakeCheckpointThread.setDaemon(true);
try {
fakeCheckpointThread.start();
checkpointDone.await();
fakeCheckpointThread.join();
} catch (InterruptedException err) {
throw new RuntimeException(err);
}
}
};
AtomicInteger done = new AtomicInteger();
Predicate slowPredicate = new Predicate() {
int count = 0;
@Override
public boolean evaluate(Record record, StatementEvaluationContext context) throws StatementExecutionException {
if (count++ % 10 == 0) {
// checkpoint will flush buffers, in the middle of the scan
try {
System.out.println("GO checkpoint !");
service.submit(checkPointPerformer).get();
done.incrementAndGet();
} catch (ExecutionException | InterruptedException err) {
throw new StatementExecutionException(err);
}
}
return true;
}
};
try (DataScanner scan = manager.scan(new ScanStatement(table.tablespace, table, slowPredicate), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION)) {
AtomicInteger count = new AtomicInteger();
scan.forEach(tuple -> {
count.incrementAndGet();
});
assertEquals(testSize, count.get());
}
assertEquals(testSize, stats.getTablesize());
assertEquals(testSize / 10, done.get());
}
}
use of herddb.core.stats.TableManagerStats in project herddb by diennea.
the class HistoryChangelogTest method test.
@Test
@Ignore
public void test() throws Exception {
Path baseDir = folder.newFolder().toPath();
ServerConfiguration serverConfiguration = new ServerConfiguration(baseDir);
serverConfiguration.set(ServerConfiguration.PROPERTY_MAX_LOGICAL_PAGE_SIZE, 10 * 1024);
serverConfiguration.set(ServerConfiguration.PROPERTY_MAX_DATA_MEMORY, 1024 * 1024);
serverConfiguration.set(ServerConfiguration.PROPERTY_MAX_PK_MEMORY, 1024 * 1024);
serverConfiguration.set(ServerConfiguration.PROPERTY_CHECKPOINT_PERIOD, 0);
serverConfiguration.set(ServerConfiguration.PROPERTY_DATADIR, folder.newFolder().getAbsolutePath());
serverConfiguration.set(ServerConfiguration.PROPERTY_LOGDIR, folder.newFolder().getAbsolutePath());
ConcurrentSkipListSet<Long> doneElements = new ConcurrentSkipListSet<>();
ConcurrentHashMap<Long, Element> elements = new ConcurrentHashMap<>();
try (Server server = new Server(serverConfiguration)) {
server.start();
server.waitForStandaloneBoot();
ClientConfiguration clientConfiguration = new ClientConfiguration(folder.newFolder().toPath());
try (HDBClient client = new HDBClient(clientConfiguration);
HDBConnection connection = client.openConnection()) {
client.setClientSideMetadataProvider(new StaticClientSideMetadataProvider(server));
long resultCreateTable = connection.executeUpdate(TableSpace.DEFAULT, "CREATE TABLE mytable (id long primary key, hid long, status integer)", 0, false, Collections.emptyList()).updateCount;
Assert.assertEquals(1, resultCreateTable);
long resultCreateTableHistory = connection.executeUpdate(TableSpace.DEFAULT, "CREATE TABLE history (id long, hid long, status integer, primary key (id,hid) )", 0, false, Collections.emptyList()).updateCount;
Assert.assertEquals(1, resultCreateTableHistory);
long tx = connection.beginTransaction(TableSpace.DEFAULT);
for (long i = 0; i < TABLESIZE; i++) {
int status = 0;
connection.executeUpdate(TableSpace.DEFAULT, "INSERT INTO mytable (id,hid,status) values(?,?,?)", tx, false, Arrays.asList(i, 0, status));
elements.put(i, new Element(0, status));
}
connection.commitTransaction(TableSpace.DEFAULT, tx);
ExecutorService threadPool = Executors.newFixedThreadPool(THREADPOLSIZE);
try {
List<Future> futures = new ArrayList<>();
AtomicLong updates = new AtomicLong();
for (int i = 0; i < TABLESIZE * MULTIPLIER; i++) {
futures.add(threadPool.submit(new Runnable() {
@Override
public void run() {
try {
long id = ThreadLocalRandom.current().nextInt(TABLESIZE);
doneElements.add(id);
Element element = elements.remove(id);
if (element == null) {
return;
}
int new_status = element.status + 1;
long next_hid = element.hid + 1;
long transactionId;
updates.incrementAndGet();
DMLResult updateResult = connection.executeUpdate(TableSpace.DEFAULT, "UPDATE mytable set hid=?,status=? WHERE id=?", TransactionContext.AUTOTRANSACTION_ID, false, Arrays.asList(next_hid, new_status, id));
transactionId = updateResult.transactionId;
if (updateResult.updateCount <= 0) {
throw new RuntimeException("not updated ?");
}
DMLResult insertResult = connection.executeUpdate(TableSpace.DEFAULT, "INSERT INTO history (id,hid,status) values (?,?,?)", transactionId, false, Arrays.asList(id, next_hid, new_status));
if (insertResult.updateCount <= 0) {
throw new RuntimeException("not inserted ?");
}
connection.commitTransaction(TableSpace.DEFAULT, transactionId);
// make the element available again
elements.put(id, new Element(new_status, next_hid));
} catch (Exception err) {
err.printStackTrace();
throw new RuntimeException(err);
}
}
}));
}
for (Future f : futures) {
f.get();
}
System.out.println("stats::updates:" + updates);
assertTrue(updates.get() > 0);
TableManagerStats stats = server.getManager().getTableSpaceManager(TableSpace.DEFAULT).getTableManager("mytable").getStats();
System.out.println("stats::tablesize:" + stats.getTablesize());
System.out.println("stats::dirty records:" + stats.getDirtyrecords());
System.out.println("stats::unload count:" + stats.getUnloadedPagesCount());
System.out.println("stats::load count:" + stats.getLoadedPagesCount());
System.out.println("stats::buffers used mem:" + stats.getBuffersUsedMemory());
assertEquals(TABLESIZE, stats.getTablesize());
for (Map.Entry<Long, Element> entry : elements.entrySet()) {
{
GetResult res = connection.executeGet(TableSpace.DEFAULT, "SELECT status,hid FROM mytable where id=?", TransactionContext.NOTRANSACTION_ID, Arrays.asList(entry.getKey()));
assertNotNull(res.data);
assertEquals(entry.getValue().status, res.data.get("status"));
assertEquals(entry.getValue().hid, res.data.get("hid"));
}
if (doneElements.contains(entry.getKey())) {
ScanResultSet res = connection.executeScan(TableSpace.DEFAULT, "SELECT id, status, hid, (SELECT MAX(hid) as mm from history where history.id=mytable.id) as maxhid " + "FROM mytable where id=?", Arrays.asList(entry.getKey()), TransactionContext.NOTRANSACTION_ID, -1, 10000);
List<Map<String, Object>> consume = res.consume();
assertEquals(1, consume.size());
Map<String, Object> data = consume.get(0);
System.out.println("data:" + data);
assertEquals(entry.getValue().status, data.get("status"));
assertEquals(entry.getValue().hid, data.get("hid"));
assertEquals(entry.getValue().hid, data.get("maxhid"));
assertEquals(entry.getKey(), data.get("id"));
}
}
} finally {
threadPool.shutdown();
threadPool.awaitTermination(1, TimeUnit.MINUTES);
}
}
}
// restart and recovery
try (Server server = new Server(serverConfiguration)) {
server.start();
server.waitForStandaloneBoot();
ClientConfiguration clientConfiguration = new ClientConfiguration(folder.newFolder().toPath());
try (HDBClient client = new HDBClient(clientConfiguration);
HDBConnection connection = client.openConnection()) {
client.setClientSideMetadataProvider(new StaticClientSideMetadataProvider(server));
for (Map.Entry<Long, Element> entry : elements.entrySet()) {
{
GetResult res = connection.executeGet(TableSpace.DEFAULT, "SELECT status,hid FROM mytable where id=?", TransactionContext.NOTRANSACTION_ID, Arrays.asList(entry.getKey()));
assertNotNull(res.data);
assertEquals(entry.getValue().status, res.data.get("status"));
assertEquals(entry.getValue().hid, res.data.get("hid"));
}
if (doneElements.contains(entry.getKey())) {
ScanResultSet res = connection.executeScan(TableSpace.DEFAULT, "SELECT id, status, hid, (SELECT MAX(hid) as mm from history where history.id=mytable.id) as maxhid " + "FROM mytable where id=?", Arrays.asList(entry.getKey()), TransactionContext.NOTRANSACTION_ID, -1, 10000);
List<Map<String, Object>> consume = res.consume();
assertEquals(1, consume.size());
Map<String, Object> data = consume.get(0);
System.out.println("data:" + data);
assertEquals(entry.getValue().status, data.get("status"));
assertEquals(entry.getValue().hid, data.get("hid"));
assertEquals(entry.getValue().hid, data.get("maxhid"));
assertEquals(entry.getKey(), data.get("id"));
}
}
}
}
}
use of herddb.core.stats.TableManagerStats in project herddb by diennea.
the class SingleTableDumper method acceptTableStatus.
@Override
public void acceptTableStatus(TableStatus tableStatus) {
try {
Table table = tableManager.getTable();
byte[] serialized = table.serialize();
Map<String, Object> beginTableData = new HashMap<>();
TableManagerStats stats = tableManager.getStats();
beginTableData.put("command", "beginTable");
beginTableData.put("table", serialized);
beginTableData.put("estimatedSize", stats.getTablesize());
beginTableData.put("dumpLedgerid", tableStatus.sequenceNumber.ledgerId);
beginTableData.put("dumpOffset", tableStatus.sequenceNumber.offset);
List<byte[]> indexes = tableManager.getAvailableIndexes().stream().map(Index::serialize).collect(Collectors.toList());
beginTableData.put("indexes", indexes);
_channel.sendMessageWithReply(Message.TABLESPACE_DUMP_DATA(null, tableSpaceName, dumpId, beginTableData), timeout);
} catch (InterruptedException | TimeoutException err) {
throw new HerdDBInternalException(err);
}
}
use of herddb.core.stats.TableManagerStats in project herddb by diennea.
the class TableSpaceManager method handleLocalMemoryUsage.
long handleLocalMemoryUsage() {
long result = 0;
for (AbstractTableManager tableManager : tables.values()) {
TableManagerStats stats = tableManager.getStats();
result += stats.getBuffersUsedMemory();
result += stats.getKeysUsedMemory();
result += stats.getDirtyUsedMemory();
}
return result;
}
use of herddb.core.stats.TableManagerStats in project herddb by diennea.
the class SystablestatsTableManager method buildVirtualRecordList.
@Override
protected Iterable<Record> buildVirtualRecordList() {
List<Table> tables = tableSpaceManager.getAllCommittedTables();
List<Record> result = new ArrayList<>();
for (Table r : tables) {
AbstractTableManager tableManager = tableSpaceManager.getTableManager(r.name);
if (tableManager != null && !tableManager.isSystemTable()) {
TableManagerStats stats = tableManager.getStats();
result.add(RecordSerializer.makeRecord(table, "tablespace", r.tablespace, "table_name", r.name, "systemtable", r.name.startsWith("sys") ? "true" : "false", "tablesize", stats.getTablesize(), "loadedpages", stats.getLoadedpages(), "loadedpagescount", stats.getLoadedPagesCount(), "unloadedpagescount", stats.getUnloadedPagesCount(), "dirtypages", stats.getDirtypages(), "dirtyrecords", stats.getDirtyrecords(), "maxlogicalpagesize", stats.getMaxLogicalPageSize(), "keysmemory", stats.getKeysUsedMemory(), "buffersmemory", stats.getBuffersUsedMemory()));
}
}
return result;
}
Aggregations