use of org.apache.bookkeeper.test.TestStatsProvider in project bookkeeper by apache.
the class CompactionTest method testSuspendGarbageCollection.
private void testSuspendGarbageCollection(ServerConfiguration conf, LedgerManager lm) throws Exception {
LedgerDirsManager dirManager = new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold()));
CheckpointSource cp = new CheckpointSource() {
@Override
public Checkpoint newCheckpoint() {
// Do nothing.
return null;
}
@Override
public void checkpointComplete(Checkpoint checkPoint, boolean compact) throws IOException {
// Do nothing.
}
};
for (File journalDir : conf.getJournalDirs()) {
Bookie.checkDirectoryStructure(journalDir);
}
for (File dir : dirManager.getAllLedgerDirs()) {
Bookie.checkDirectoryStructure(dir);
}
InterleavedLedgerStorage storage = new InterleavedLedgerStorage();
TestStatsProvider stats = new TestStatsProvider();
storage.initialize(conf, lm, dirManager, dirManager, null, cp, Checkpointer.NULL, stats.getStatsLogger("storage"));
storage.start();
int majorCompactions = stats.getCounter("storage.gc." + MAJOR_COMPACTION_COUNT).get().intValue();
int minorCompactions = stats.getCounter("storage.gc." + MINOR_COMPACTION_COUNT).get().intValue();
Thread.sleep(conf.getMajorCompactionInterval() * 1000 + conf.getGcWaitTime());
assertTrue("Major compaction should have happened", stats.getCounter("storage.gc." + MAJOR_COMPACTION_COUNT).get() > majorCompactions);
// test suspend Major GC.
storage.gcThread.suspendMajorGC();
Thread.sleep(1000);
long startTime = MathUtils.now();
majorCompactions = stats.getCounter("storage.gc." + MAJOR_COMPACTION_COUNT).get().intValue();
Thread.sleep(conf.getMajorCompactionInterval() * 1000 + conf.getGcWaitTime());
assertTrue("major compaction triggered while suspended", storage.gcThread.lastMajorCompactionTime < startTime);
assertTrue("major compaction triggered while suspended", stats.getCounter("storage.gc." + MAJOR_COMPACTION_COUNT).get() == majorCompactions);
// test suspend Major GC.
Thread.sleep(conf.getMinorCompactionInterval() * 1000 + conf.getGcWaitTime());
assertTrue("Minor compaction should have happened", stats.getCounter("storage.gc." + MINOR_COMPACTION_COUNT).get() > minorCompactions);
// test suspend Minor GC.
storage.gcThread.suspendMinorGC();
Thread.sleep(1000);
startTime = MathUtils.now();
minorCompactions = stats.getCounter("storage.gc." + MINOR_COMPACTION_COUNT).get().intValue();
Thread.sleep(conf.getMajorCompactionInterval() * 1000 + conf.getGcWaitTime());
assertTrue("minor compaction triggered while suspended", storage.gcThread.lastMinorCompactionTime < startTime);
assertTrue("minor compaction triggered while suspended", stats.getCounter("storage.gc." + MINOR_COMPACTION_COUNT).get() == minorCompactions);
// test resume
storage.gcThread.resumeMinorGC();
storage.gcThread.resumeMajorGC();
Thread.sleep((conf.getMajorCompactionInterval() + conf.getMinorCompactionInterval()) * 1000 + (conf.getGcWaitTime() * 2));
assertTrue("Major compaction should have happened", stats.getCounter("storage.gc." + MAJOR_COMPACTION_COUNT).get() > majorCompactions);
assertTrue("Minor compaction should have happened", stats.getCounter("storage.gc." + MINOR_COMPACTION_COUNT).get() > minorCompactions);
assertTrue("gcThreadRunttime should be non-zero", stats.getOpStatsLogger("storage.gc." + THREAD_RUNTIME).getSuccessCount() > 0);
}
use of org.apache.bookkeeper.test.TestStatsProvider in project herddb by diennea.
the class FileCommitLogTest method testMaxBatchSize.
@Test
public void testMaxBatchSize() throws Exception {
TestStatsProvider testStatsProvider = new TestStatsProvider();
TestStatsProvider.TestStatsLogger statsLogger = testStatsProvider.getStatsLogger("test");
try (FileCommitLogManager manager = new FileCommitLogManager(folder.newFolder().toPath(), ServerConfiguration.PROPERTY_MAX_LOG_FILE_SIZE_DEFAULT, // flush only when we have 2 entries in the queue
2, // no flush by size
Integer.MAX_VALUE, // no flush by time
Integer.MAX_VALUE, true, /* require fsync */
false, /* O_DIRECT */
ServerConfiguration.PROPERTY_DEFERRED_SYNC_PERIOD_DEFAULT, statsLogger)) {
manager.start();
int writeCount = 0;
final long _startWrite = System.currentTimeMillis();
try (FileCommitLog log = manager.createCommitLog("tt", "aa", "nodeid")) {
log.startWriting(1);
CopyOnWriteArrayList<LogSequenceNumber> completed = new CopyOnWriteArrayList<>();
CommitLogResult future = log.log(LogEntryFactory.beginTransaction(0), true);
future.logSequenceNumber.thenAccept(completed::add);
assertFalse(future.logSequenceNumber.isDone());
CommitLogResult future2 = log.log(LogEntryFactory.beginTransaction(0), true);
future2.logSequenceNumber.thenAccept(completed::add);
future.logSequenceNumber.get(10, TimeUnit.SECONDS);
future2.logSequenceNumber.get(10, TimeUnit.SECONDS);
TestUtils.waitForCondition(() -> {
return completed.size() == 2;
}, NOOP, 100);
writeCount = completed.size();
assertTrue(completed.get(1).after(completed.get(0)));
}
final long _endWrite = System.currentTimeMillis();
AtomicInteger readCount = new AtomicInteger();
try (CommitLog log = manager.createCommitLog("tt", "aa", "nodeid")) {
log.recovery(LogSequenceNumber.START_OF_TIME, new BiConsumer<LogSequenceNumber, LogEntry>() {
@Override
public void accept(LogSequenceNumber t, LogEntry u) {
readCount.incrementAndGet();
}
}, true);
}
final long _endRead = System.currentTimeMillis();
assertEquals(writeCount, readCount.get());
System.out.println("Write time: " + (_endWrite - _startWrite) + " ms");
System.out.println("Read time: " + (_endRead - _endWrite) + " ms");
}
}
use of org.apache.bookkeeper.test.TestStatsProvider in project herddb by diennea.
the class FileCommitLogTest method testLogMultiFiles_O_DIRECT.
@Test
public void testLogMultiFiles_O_DIRECT() throws Exception {
TestStatsProvider testStatsProvider = new TestStatsProvider();
TestStatsProvider.TestStatsLogger statsLogger = testStatsProvider.getStatsLogger("test");
try (FileCommitLogManager manager = new FileCommitLogManager(folder.newFolder().toPath(), // 2K Bbyte files,
1024 * 2, ServerConfiguration.PROPERTY_MAX_UNSYNCHED_BATCH_DEFAULT, ServerConfiguration.PROPERTY_MAX_UNSYNCHED_BATCH_BYTES_DEFAULT, ServerConfiguration.PROPERTY_MAX_SYNC_TIME_DEFAULT, false, true, /* O_DIRECT */
ServerConfiguration.PROPERTY_DEFERRED_SYNC_PERIOD_DEFAULT, statsLogger)) {
manager.start();
int writeCount = 0;
final long _startWrite = System.currentTimeMillis();
try (FileCommitLog log = manager.createCommitLog("tt", "aa", "nodeid")) {
log.startWriting(1);
for (int i = 0; i < 10_000; i++) {
log.log(LogEntryFactory.beginTransaction(0), false);
writeCount++;
}
TestUtils.waitForCondition(() -> {
int qsize = log.getQueueSize();
return qsize == 0;
}, TestUtils.NOOP, 100);
}
final long _endWrite = System.currentTimeMillis();
AtomicInteger readCount = new AtomicInteger();
try (CommitLog log = manager.createCommitLog("tt", "aa", "nodeid")) {
log.recovery(LogSequenceNumber.START_OF_TIME, new BiConsumer<LogSequenceNumber, LogEntry>() {
@Override
public void accept(LogSequenceNumber t, LogEntry u) {
readCount.incrementAndGet();
}
}, true);
}
final long _endRead = System.currentTimeMillis();
assertEquals(writeCount, readCount.get());
System.out.println("Write time: " + (_endWrite - _startWrite) + " ms");
System.out.println("Read time: " + (_endRead - _endWrite) + " ms");
// this number really depends on disk format
// this test in the future will be updated when we change the format
assertEquals(145L, statsLogger.scope("aa").getCounter("newfiles").get().longValue());
}
}
use of org.apache.bookkeeper.test.TestStatsProvider in project herddb by diennea.
the class RetryOnLeaderChangedTest method testExpectedReplicaCount.
@Test
public void testExpectedReplicaCount() throws Exception {
TestStatsProvider statsProvider = new TestStatsProvider();
ServerConfiguration serverconfig_1 = newServerConfigurationWithAutoPort(folder.newFolder().toPath());
serverconfig_1.set(ServerConfiguration.PROPERTY_NODEID, "server1");
serverconfig_1.set(ServerConfiguration.PROPERTY_MODE, ServerConfiguration.PROPERTY_MODE_CLUSTER);
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_ADDRESS, testEnv.getAddress());
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_PATH, testEnv.getPath());
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_SESSIONTIMEOUT, testEnv.getTimeout());
ServerConfiguration serverconfig_2 = serverconfig_1.copy().set(ServerConfiguration.PROPERTY_NODEID, "server2").set(ServerConfiguration.PROPERTY_BASEDIR, folder.newFolder().toPath().toAbsolutePath());
ServerConfiguration serverconfig_3 = serverconfig_1.copy().set(ServerConfiguration.PROPERTY_NODEID, "server3").set(ServerConfiguration.PROPERTY_BASEDIR, folder.newFolder().toPath().toAbsolutePath());
try (Server server_1 = new Server(serverconfig_1);
Server server_2 = new Server(serverconfig_2);
Server server_3 = new Server(serverconfig_3)) {
server_1.start();
server_1.waitForStandaloneBoot();
server_2.start();
server_3.start();
// wait for all of the three nodes to announce
herddb.utils.TestUtils.waitForCondition(() -> {
List<NodeMetadata> listNodes = server_3.getMetadataStorageManager().listNodes();
System.out.println("NODES: " + listNodes);
return listNodes.size() == 3;
}, herddb.utils.TestUtils.NOOP, 100);
// create the tablespace
TestUtils.execute(server_1.getManager(), "CREATE TABLESPACE 'ttt','leader:" + server_1.getNodeId() + "','expectedreplicacount:2'", Collections.emptyList());
server_2.getManager().triggerActivator(ActivatorRunRequest.FULL);
// wait for the cluster to settle to 2 replicas
herddb.utils.TestUtils.waitForCondition(() -> {
TableSpace ts = server_3.getMetadataStorageManager().describeTableSpace("ttt");
System.out.println("TS: " + ts);
assertTrue(ts.replicas.size() <= 2);
return ts.replicas.size() == 2;
}, herddb.utils.TestUtils.NOOP, 100);
}
}
use of org.apache.bookkeeper.test.TestStatsProvider in project herddb by diennea.
the class RetryOnLeaderChangedTest method testKillLeader.
private void testKillLeader(Consumer<HDBConnection> operation) throws Exception {
TestStatsProvider statsProvider = new TestStatsProvider();
ServerConfiguration serverconfig_1 = newServerConfigurationWithAutoPort(folder.newFolder().toPath());
serverconfig_1.set(ServerConfiguration.PROPERTY_NODEID, "server1");
serverconfig_1.set(ServerConfiguration.PROPERTY_MODE, ServerConfiguration.PROPERTY_MODE_CLUSTER);
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_ADDRESS, testEnv.getAddress());
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_PATH, testEnv.getPath());
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_SESSIONTIMEOUT, testEnv.getTimeout());
ServerConfiguration serverconfig_2 = serverconfig_1.copy().set(ServerConfiguration.PROPERTY_NODEID, "server2").set(ServerConfiguration.PROPERTY_BASEDIR, folder.newFolder().toPath().toAbsolutePath());
try (Server server_1 = new Server(serverconfig_1)) {
server_1.start();
server_1.waitForStandaloneBoot();
try (Server server_2 = new Server(serverconfig_2)) {
server_2.start();
TestUtils.execute(server_1.getManager(), "CREATE TABLESPACE 'ttt','leader:" + server_2.getNodeId() + "','expectedreplicacount:2'", Collections.emptyList());
// wait for server_2 to wake up
for (int i = 0; i < 40; i++) {
TableSpaceManager tableSpaceManager2 = server_2.getManager().getTableSpaceManager("ttt");
if (tableSpaceManager2 != null && tableSpaceManager2.isLeader()) {
break;
}
Thread.sleep(500);
}
assertTrue(server_2.getManager().getTableSpaceManager("ttt") != null && server_2.getManager().getTableSpaceManager("ttt").isLeader());
// wait for server_1 to announce as follower
waitClusterStatus(server_1.getManager(), server_1.getNodeId(), "follower");
ClientConfiguration clientConfiguration = new ClientConfiguration();
clientConfiguration.set(ClientConfiguration.PROPERTY_MODE, ClientConfiguration.PROPERTY_MODE_CLUSTER);
clientConfiguration.set(ClientConfiguration.PROPERTY_ZOOKEEPER_ADDRESS, testEnv.getAddress());
clientConfiguration.set(ClientConfiguration.PROPERTY_ZOOKEEPER_PATH, testEnv.getPath());
clientConfiguration.set(ClientConfiguration.PROPERTY_ZOOKEEPER_SESSIONTIMEOUT, testEnv.getTimeout());
clientConfiguration.set(ClientConfiguration.PROPERTY_MAX_OPERATION_RETRY_COUNT, 1000);
StatsLogger logger = statsProvider.getStatsLogger("ds");
try (HDBClient client1 = new HDBClient(clientConfiguration, logger)) {
try (HDBConnection connection = client1.openConnection()) {
// create table and insert data
connection.executeUpdate(TableSpace.DEFAULT, "CREATE TABLE ttt.t1(k1 int primary key, n1 int)", TransactionContext.NOTRANSACTION_ID, false, false, Collections.emptyList());
connection.executeUpdate(TableSpace.DEFAULT, "INSERT INTO ttt.t1(k1,n1) values(1,1)", TransactionContext.NOTRANSACTION_ID, false, false, Collections.emptyList());
connection.executeUpdate(TableSpace.DEFAULT, "INSERT INTO ttt.t1(k1,n1) values(2,1)", TransactionContext.NOTRANSACTION_ID, false, false, Collections.emptyList());
connection.executeUpdate(TableSpace.DEFAULT, "INSERT INTO ttt.t1(k1,n1) values(3,1)", TransactionContext.NOTRANSACTION_ID, false, false, Collections.emptyList());
// use client2, so that it opens a connection to current leader
try (ScanResultSet scan = connection.executeScan(TableSpace.DEFAULT, "SELECT * FROM ttt.t1", false, Collections.emptyList(), TransactionContext.NOTRANSACTION_ID, 0, 0, true)) {
assertEquals(3, scan.consume().size());
}
// change leader
switchLeader(server_1.getNodeId(), null, server_1.getManager());
// make server_2 (current leader) die
server_2.close();
// perform operation, it will eventually succeed, because
// the client will automatically wait for the new leader to be up
operation.accept(connection);
}
}
}
}
}
Aggregations