use of org.apache.bookkeeper.common.util.OrderedScheduler in project bookkeeper by apache.
the class TestBKDistributedLogManager method testTruncationValidation.
@Test(timeout = 60000)
public void testTruncationValidation() throws Exception {
String name = "distrlog-truncation-validation";
URI uri = createDLMURI("/" + name);
ZooKeeperClient zookeeperClient = TestZooKeeperClientBuilder.newBuilder().uri(uri).build();
OrderedScheduler scheduler = OrderedScheduler.newSchedulerBuilder().name("test-truncation-validation").numThreads(1).build();
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.loadConf(conf);
confLocal.setDLLedgerMetadataLayoutVersion(LogSegmentMetadata.LEDGER_METADATA_CURRENT_LAYOUT_VERSION);
confLocal.setOutputBufferSize(0);
confLocal.setLogSegmentCacheEnabled(false);
LogSegmentMetadataStore metadataStore = new ZKLogSegmentMetadataStore(confLocal, zookeeperClient, scheduler);
BKDistributedLogManager dlm = createNewDLM(confLocal, name);
DLSN truncDLSN = DLSN.InitialDLSN;
DLSN beyondTruncDLSN = DLSN.InitialDLSN;
long beyondTruncTxId = 1;
long txid = 1;
for (long i = 0; i < 3; i++) {
long start = txid;
BKAsyncLogWriter writer = dlm.startAsyncLogSegmentNonPartitioned();
for (long j = 1; j <= 10; j++) {
LogRecord record = DLMTestUtil.getLargeLogRecordInstance(txid++);
CompletableFuture<DLSN> dlsn = writer.write(record);
if (i == 1 && j == 2) {
truncDLSN = Utils.ioResult(dlsn);
} else if (i == 2 && j == 3) {
beyondTruncDLSN = Utils.ioResult(dlsn);
beyondTruncTxId = record.getTransactionId();
} else if (j == 10) {
Utils.ioResult(dlsn);
}
}
writer.close();
}
{
LogReader reader = dlm.getInputStream(DLSN.InitialDLSN);
LogRecordWithDLSN record = reader.readNext(false);
assertTrue((record != null) && (record.getDlsn().compareTo(DLSN.InitialDLSN) == 0));
reader.close();
}
Map<Long, LogSegmentMetadata> segmentList = DLMTestUtil.readLogSegments(zookeeperClient, LogMetadata.getLogSegmentsPath(uri, name, confLocal.getUnpartitionedStreamName()));
LOG.info("Read segments before truncating first segment : {}", segmentList);
MetadataUpdater updater = LogSegmentMetadataStoreUpdater.createMetadataUpdater(confLocal, metadataStore);
Utils.ioResult(updater.setLogSegmentTruncated(segmentList.get(1L)));
segmentList = DLMTestUtil.readLogSegments(zookeeperClient, LogMetadata.getLogSegmentsPath(uri, name, confLocal.getUnpartitionedStreamName()));
LOG.info("Read segments after truncated first segment : {}", segmentList);
{
LogReader reader = dlm.getInputStream(DLSN.InitialDLSN);
LogRecordWithDLSN record = reader.readNext(false);
assertTrue("Unexpected record : " + record, (record != null) && (record.getDlsn().compareTo(new DLSN(2, 0, 0)) == 0));
reader.close();
}
{
LogReader reader = dlm.getInputStream(1);
LogRecordWithDLSN record = reader.readNext(false);
assertTrue((record != null) && (record.getDlsn().compareTo(new DLSN(2, 0, 0)) == 0));
reader.close();
}
updater = LogSegmentMetadataStoreUpdater.createMetadataUpdater(confLocal, metadataStore);
Utils.ioResult(updater.setLogSegmentActive(segmentList.get(1L)));
segmentList = DLMTestUtil.readLogSegments(zookeeperClient, LogMetadata.getLogSegmentsPath(uri, name, confLocal.getUnpartitionedStreamName()));
LOG.info("Read segments after marked first segment as active : {}", segmentList);
updater = LogSegmentMetadataStoreUpdater.createMetadataUpdater(confLocal, metadataStore);
Utils.ioResult(updater.setLogSegmentTruncated(segmentList.get(2L)));
segmentList = DLMTestUtil.readLogSegments(zookeeperClient, LogMetadata.getLogSegmentsPath(uri, name, confLocal.getUnpartitionedStreamName()));
LOG.info("Read segments after truncated second segment : {}", segmentList);
{
AsyncLogReader reader = dlm.getAsyncLogReader(DLSN.InitialDLSN);
long expectedTxId = 1L;
boolean exceptionEncountered = false;
try {
for (int i = 0; i < 3 * 10; i++) {
LogRecordWithDLSN record = Utils.ioResult(reader.readNext());
DLMTestUtil.verifyLargeLogRecord(record);
assertEquals(expectedTxId, record.getTransactionId());
expectedTxId++;
}
} catch (AlreadyTruncatedTransactionException exc) {
exceptionEncountered = true;
}
assertTrue(exceptionEncountered);
Utils.close(reader);
}
updater = LogSegmentMetadataStoreUpdater.createMetadataUpdater(conf, metadataStore);
Utils.ioResult(updater.setLogSegmentActive(segmentList.get(2L)));
BKAsyncLogWriter writer = dlm.startAsyncLogSegmentNonPartitioned();
assertTrue(Utils.ioResult(writer.truncate(truncDLSN)));
BKLogWriteHandler handler = writer.getCachedWriteHandler();
List<LogSegmentMetadata> cachedSegments = handler.getCachedLogSegments(LogSegmentMetadata.COMPARATOR);
for (LogSegmentMetadata segment : cachedSegments) {
if (segment.getLastDLSN().compareTo(truncDLSN) < 0) {
assertTrue(segment.isTruncated());
assertTrue(!segment.isPartiallyTruncated());
} else if (segment.getFirstDLSN().compareTo(truncDLSN) < 0) {
assertTrue(!segment.isTruncated());
assertTrue(segment.isPartiallyTruncated());
} else {
assertTrue(!segment.isTruncated());
assertTrue(!segment.isPartiallyTruncated());
}
}
segmentList = DLMTestUtil.readLogSegments(zookeeperClient, LogMetadata.getLogSegmentsPath(uri, name, conf.getUnpartitionedStreamName()));
assertTrue(segmentList.get(truncDLSN.getLogSegmentSequenceNo()).getMinActiveDLSN().compareTo(truncDLSN) == 0);
{
LogReader reader = dlm.getInputStream(DLSN.InitialDLSN);
LogRecordWithDLSN record = reader.readNext(false);
assertTrue(record != null);
assertEquals(truncDLSN, record.getDlsn());
reader.close();
}
{
LogReader reader = dlm.getInputStream(1);
LogRecordWithDLSN record = reader.readNext(false);
assertTrue(record != null);
assertEquals(truncDLSN, record.getDlsn());
reader.close();
}
{
AsyncLogReader reader = dlm.getAsyncLogReader(DLSN.InitialDLSN);
LogRecordWithDLSN record = Utils.ioResult(reader.readNext());
assertTrue(record != null);
assertEquals(truncDLSN, record.getDlsn());
Utils.close(reader);
}
{
LogReader reader = dlm.getInputStream(beyondTruncDLSN);
LogRecordWithDLSN record = reader.readNext(false);
assertTrue(record != null);
assertEquals(beyondTruncDLSN, record.getDlsn());
reader.close();
}
{
LogReader reader = dlm.getInputStream(beyondTruncTxId);
LogRecordWithDLSN record = reader.readNext(false);
assertTrue(record != null);
assertEquals(beyondTruncDLSN, record.getDlsn());
assertEquals(beyondTruncTxId, record.getTransactionId());
reader.close();
}
{
AsyncLogReader reader = dlm.getAsyncLogReader(beyondTruncDLSN);
LogRecordWithDLSN record = Utils.ioResult(reader.readNext());
assertTrue(record != null);
assertEquals(beyondTruncDLSN, record.getDlsn());
Utils.close(reader);
}
zookeeperClient.close();
}
use of org.apache.bookkeeper.common.util.OrderedScheduler in project bookkeeper by apache.
the class AuditorLedgerCheckerTest method testTriggerAuditorWithNoPendingAuditTask.
@Test
public void testTriggerAuditorWithNoPendingAuditTask() throws Exception {
// wait for a second so that the initial periodic check finishes
Thread.sleep(1000);
int lostBookieRecoveryDelayConfValue = baseConf.getLostBookieRecoveryDelay();
Auditor auditorBookiesAuditor = getAuditorBookiesAuditor();
Future<?> auditTask = auditorBookiesAuditor.getAuditTask();
int lostBookieRecoveryDelayBeforeChange = auditorBookiesAuditor.getLostBookieRecoveryDelayBeforeChange();
Assert.assertEquals("auditTask is supposed to be null", null, auditTask);
Assert.assertEquals("lostBookieRecoveryDelayBeforeChange of Auditor should be equal to BaseConf's lostBookieRecoveryDelay", lostBookieRecoveryDelayConfValue, lostBookieRecoveryDelayBeforeChange);
@Cleanup("shutdown") OrderedScheduler scheduler = OrderedScheduler.newSchedulerBuilder().name("test-scheduler").numThreads(1).build();
@Cleanup MetadataClientDriver driver = MetadataDrivers.getClientDriver(URI.create(baseClientConf.getMetadataServiceUri()));
driver.initialize(baseClientConf, scheduler, NullStatsLogger.INSTANCE, Optional.of(zkc));
// there is no easy way to validate if the Auditor has executed Audit process (Auditor.startAudit),
// without shuttingdown Bookie. To test if by resetting LostBookieRecoveryDelay it does Auditing
// even when there is no pending AuditTask, following approach is needed.
// Here we are creating few ledgers ledgermetadata with non-existing bookies as its ensemble.
// When Auditor does audit it recognizes these ledgers as underreplicated and mark them as
// under-replicated, since these bookies are not available.
int numofledgers = 5;
Random rand = new Random();
for (int i = 0; i < numofledgers; i++) {
LedgerMetadata metadata = new LedgerMetadata(3, 2, 2, DigestType.CRC32, "passwd".getBytes());
ArrayList<BookieSocketAddress> ensemble = new ArrayList<BookieSocketAddress>();
ensemble.add(new BookieSocketAddress("99.99.99.99:9999"));
ensemble.add(new BookieSocketAddress("11.11.11.11:1111"));
ensemble.add(new BookieSocketAddress("88.88.88.88:8888"));
metadata.addEnsemble(0, ensemble);
MutableInt ledgerCreateRC = new MutableInt(-1);
CountDownLatch latch = new CountDownLatch(1);
long ledgerId = (Math.abs(rand.nextLong())) % 100000000;
try (LedgerManager lm = driver.getLedgerManagerFactory().newLedgerManager()) {
lm.createLedgerMetadata(ledgerId, metadata, (rc, result) -> {
ledgerCreateRC.setValue(rc);
latch.countDown();
});
}
Assert.assertTrue("Ledger creation should complete within 2 secs", latch.await(2000, TimeUnit.MILLISECONDS));
Assert.assertEquals("LedgerCreate should succeed and return OK rc value", BKException.Code.OK, ledgerCreateRC.getValue());
ledgerList.add(ledgerId);
}
final CountDownLatch underReplicaLatch = registerUrLedgerWatcher(ledgerList.size());
urLedgerMgr.setLostBookieRecoveryDelay(lostBookieRecoveryDelayBeforeChange);
assertTrue("Audit should be triggered and created ledgers should be marked as underreplicated", underReplicaLatch.await(2, TimeUnit.SECONDS));
assertEquals("All the ledgers should be marked as underreplicated", ledgerList.size(), urLedgerList.size());
auditTask = auditorBookiesAuditor.getAuditTask();
Assert.assertEquals("auditTask is supposed to be null", null, auditTask);
Assert.assertEquals("lostBookieRecoveryDelayBeforeChange of Auditor should be equal to BaseConf's lostBookieRecoveryDelay", lostBookieRecoveryDelayBeforeChange, auditorBookiesAuditor.getLostBookieRecoveryDelayBeforeChange());
}
use of org.apache.bookkeeper.common.util.OrderedScheduler in project bookkeeper by apache.
the class TestDefaultStorageContainerFactory method testCreate.
@Test
public void testCreate() throws Exception {
OrderedScheduler scheduler = mock(OrderedScheduler.class);
OrderedScheduler snapshotScheduler = mock(OrderedScheduler.class);
MVCCStoreFactory storeFactory = mock(MVCCStoreFactory.class);
ListeningScheduledExecutorService snapshotExecutor = mock(ListeningScheduledExecutorService.class);
when(snapshotScheduler.chooseThread(anyLong())).thenReturn(snapshotExecutor);
Mockito.doReturn(mock(ListenableScheduledFuture.class)).when(snapshotExecutor).scheduleWithFixedDelay(any(Runnable.class), anyInt(), anyInt(), any(TimeUnit.class));
DefaultStorageContainerFactory factory = new DefaultStorageContainerFactory(new StorageConfiguration(new CompositeConfiguration()), (streamId, rangeId) -> streamId, scheduler, storeFactory, URI.create("distributedlog://127.0.0.1/stream/storage"));
StorageContainer sc = factory.createStorageContainer(1234L);
assertTrue(sc instanceof StorageContainerImpl);
assertEquals(1234L, sc.getId());
}
use of org.apache.bookkeeper.common.util.OrderedScheduler in project incubator-pulsar by apache.
the class ZookeeperCacheTest method testZkCallbackThreadStuck.
/**
* Verifies that blocking call on zkCache-callback will not introduce deadlock because zkCache completes
* future-result with different thread than zookeeper-client thread.
*
* @throws Exception
*/
@Test(timeOut = 2000)
void testZkCallbackThreadStuck() throws Exception {
OrderedScheduler executor = OrderedScheduler.newSchedulerBuilder().build();
ScheduledExecutorService scheduledExecutor = Executors.newScheduledThreadPool(2);
ExecutorService zkExecutor = Executors.newSingleThreadExecutor(new DefaultThreadFactory("mockZk"));
// add readOpDelayMs so, main thread will not serve zkCacahe-returned future and let zkExecutor-thread handle
// callback-result process
MockZooKeeper zkClient = MockZooKeeper.newInstance(zkExecutor, 100);
ZooKeeperCache zkCacheService = new LocalZooKeeperCache(zkClient, executor, scheduledExecutor);
ZooKeeperDataCache<String> zkCache = new ZooKeeperDataCache<String>(zkCacheService) {
@Override
public String deserialize(String key, byte[] content) throws Exception {
return new String(content);
}
};
String value = "test";
String key = "/" + UUID.randomUUID().toString().substring(0, 8);
String key1 = "/" + UUID.randomUUID().toString().substring(0, 8);
String key2 = "/" + UUID.randomUUID().toString().substring(0, 8);
zkClient.create(key, value.getBytes(), null, null);
zkClient.create(key1, value.getBytes(), null, null);
zkClient.create(key2, value.getBytes(), null, null);
CountDownLatch latch = new CountDownLatch(1);
zkCache.getAsync(key).thenAccept(val -> {
try {
zkCache.get(key1);
} catch (Exception e) {
fail("failed to get " + key2, e);
}
latch.countDown();
});
latch.await();
executor.shutdown();
zkExecutor.shutdown();
scheduledExecutor.shutdown();
}
use of org.apache.bookkeeper.common.util.OrderedScheduler in project incubator-pulsar by apache.
the class ZooKeeperClientAspectJTest method testZkClientAspectJTrigger.
/**
* Verifies that aspect-advice calculates the latency of of zk-operation
*
* @throws Exception
*/
@Test(enabled = false, timeOut = 7000)
void testZkClientAspectJTrigger() throws Exception {
OrderedScheduler executor = OrderedScheduler.newSchedulerBuilder().build();
ZooKeeperClientFactory zkf = new ZookeeperBkClientFactoryImpl(executor);
CompletableFuture<ZooKeeper> zkFuture = zkf.create("127.0.0.1:" + LOCAL_ZOOKEEPER_PORT, SessionType.ReadWrite, (int) ZOOKEEPER_SESSION_TIMEOUT_MILLIS);
localZkc = zkFuture.get(ZOOKEEPER_SESSION_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
try {
assertTrue(localZkc.getState().isConnected());
assertNotEquals(localZkc.getState(), States.CONNECTEDREADONLY);
final AtomicInteger writeCount = new AtomicInteger(0);
final AtomicInteger readCount = new AtomicInteger(0);
EventListner listener = new EventListner() {
@Override
public void recordLatency(EventType eventType, long latencyMiliSecond) {
if (eventType.equals(EventType.write)) {
writeCount.incrementAndGet();
} else if (eventType.equals(EventType.read)) {
readCount.incrementAndGet();
}
}
};
ClientCnxnAspect.addListener(listener);
CountDownLatch createLatch = new CountDownLatch(1);
CountDownLatch deleteLatch = new CountDownLatch(1);
CountDownLatch readLatch = new CountDownLatch(1);
CountDownLatch existLatch = new CountDownLatch(1);
localZkc.create("/createTest", "data".getBytes(), Acl, CreateMode.EPHEMERAL, (rc, path, ctx, name) -> {
createLatch.countDown();
}, "create");
localZkc.delete("/deleteTest", -1, (rc, path, ctx) -> {
deleteLatch.countDown();
}, "delete");
localZkc.exists("/createTest", null, (int rc, String path, Object ctx, Stat stat) -> {
existLatch.countDown();
}, null);
localZkc.getData("/createTest", null, (int rc, String path, Object ctx, byte[] data, Stat stat) -> {
readLatch.countDown();
}, null);
createLatch.await();
deleteLatch.await();
existLatch.await();
readLatch.await();
Thread.sleep(500);
Assert.assertEquals(writeCount.get(), 2);
Assert.assertEquals(readCount.get(), 2);
ClientCnxnAspect.removeListener(listener);
} finally {
if (localZkc != null) {
localZkc.close();
}
executor.shutdown();
}
}
Aggregations