use of org.apache.bookkeeper.client.LedgerHandle in project distributedlog by twitter.
the class TestLedgerHandleCache method testOpenLedgerWhenZkClosed.
@Test(timeout = 60000, expected = BKException.ZKException.class)
public void testOpenLedgerWhenZkClosed() throws Exception {
ZooKeeperClient newZkc = TestZooKeeperClientBuilder.newBuilder().name("zkc-openledger-when-zk-closed").zkServers(zkServers).build();
BookKeeperClient newBkc = BookKeeperClientBuilder.newBuilder().name("bkc-openledger-when-zk-closed").zkc(newZkc).ledgersPath(ledgersPath).dlConfig(conf).build();
try {
LedgerHandle lh = newBkc.get().createLedger(BookKeeper.DigestType.CRC32, "zkcClosed".getBytes(UTF_8));
lh.close();
newZkc.close();
LedgerHandleCache cache = LedgerHandleCache.newBuilder().bkc(newBkc).conf(conf).build();
// open ledger after zkc closed
cache.openLedger(new LogSegmentMetadata.LogSegmentMetadataBuilder("", 2, lh.getId(), 1).setLogSegmentSequenceNo(lh.getId()).build(), false);
} finally {
newBkc.close();
}
}
use of org.apache.bookkeeper.client.LedgerHandle in project distributedlog by twitter.
the class TestLedgerHandleCache method testOpenAndCloseLedger.
@Test(timeout = 60000)
public void testOpenAndCloseLedger() throws Exception {
LedgerHandle lh = bkc.get().createLedger(1, 1, 1, BookKeeper.DigestType.CRC32, conf.getBKDigestPW().getBytes(UTF_8));
LedgerHandleCache cache = LedgerHandleCache.newBuilder().bkc(bkc).conf(conf).build();
LogSegmentMetadata segment = new LogSegmentMetadata.LogSegmentMetadataBuilder("/data", LogSegmentMetadata.LogSegmentMetadataVersion.VERSION_V5_SEQUENCE_ID, lh.getId(), 0L).build();
LedgerDescriptor desc1 = cache.openLedger(segment, false);
assertTrue(cache.handlesMap.containsKey(desc1));
LedgerHandleCache.RefCountedLedgerHandle refLh = cache.handlesMap.get(desc1);
assertEquals(1, refLh.getRefCount());
cache.openLedger(segment, false);
assertTrue(cache.handlesMap.containsKey(desc1));
assertEquals(2, refLh.getRefCount());
// close the ledger
cache.closeLedger(desc1);
assertTrue(cache.handlesMap.containsKey(desc1));
assertEquals(1, refLh.getRefCount());
cache.closeLedger(desc1);
assertFalse(cache.handlesMap.containsKey(desc1));
assertEquals(0, refLh.getRefCount());
}
use of org.apache.bookkeeper.client.LedgerHandle in project distributedlog by twitter.
the class LedgerReadBenchmark method benchmark.
@Override
protected void benchmark(DistributedLogNamespace namespace, String logName, StatsLogger statsLogger) {
DistributedLogManager dlm = null;
while (null == dlm) {
try {
dlm = namespace.openLog(streamName);
} catch (IOException ioe) {
logger.warn("Failed to create dlm for stream {} : ", streamName, ioe);
}
if (null == dlm) {
try {
TimeUnit.MILLISECONDS.sleep(conf.getZKSessionTimeoutMilliseconds());
} catch (InterruptedException e) {
}
}
}
logger.info("Created dlm for stream {}.", streamName);
List<LogSegmentMetadata> segments = null;
while (null == segments) {
try {
segments = dlm.getLogSegments();
} catch (IOException ioe) {
logger.warn("Failed to get log segments for stream {} : ", streamName, ioe);
}
if (null == segments) {
try {
TimeUnit.MILLISECONDS.sleep(conf.getZKSessionTimeoutMilliseconds());
} catch (InterruptedException e) {
}
}
}
final Counter readCounter = statsLogger.getCounter("reads");
logger.info("Reading from log segments : {}", segments);
ZooKeeperClient zkc = ZooKeeperClientBuilder.newBuilder().uri(uri).name("benchmark-zkc").sessionTimeoutMs(conf.getZKSessionTimeoutMilliseconds()).zkAclId(null).build();
BKDLConfig bkdlConfig;
try {
bkdlConfig = BKDLConfig.resolveDLConfig(zkc, uri);
} catch (IOException e) {
return;
}
BookKeeper bk;
try {
bk = BookKeeperClientBuilder.newBuilder().name("benchmark-bkc").dlConfig(conf).zkServers(bkdlConfig.getBkZkServersForReader()).ledgersPath(bkdlConfig.getBkLedgersPath()).build().get();
} catch (IOException e) {
return;
}
final int readConcurrency = conf.getInt("ledger_read_concurrency", 1000);
boolean streamRead = conf.getBoolean("ledger_stream_read", true);
try {
for (LogSegmentMetadata segment : segments) {
Stopwatch stopwatch = Stopwatch.createStarted();
long lid = segment.getLedgerId();
LedgerHandle lh = bk.openLedgerNoRecovery(lid, BookKeeper.DigestType.CRC32, conf.getBKDigestPW().getBytes(UTF_8));
logger.info("It took {} ms to open log segment {}", new Object[] { stopwatch.elapsed(TimeUnit.MILLISECONDS), (lh.getLastAddConfirmed() + 1), segment });
stopwatch.reset().start();
Runnable reader;
if (streamRead) {
reader = new LedgerStreamReader(lh, new BookkeeperInternalCallbacks.ReadEntryListener() {
@Override
public void onEntryComplete(int rc, LedgerHandle lh, LedgerEntry entry, Object ctx) {
readCounter.inc();
}
}, readConcurrency);
} else {
reader = new LedgerStreamReader(lh, new BookkeeperInternalCallbacks.ReadEntryListener() {
@Override
public void onEntryComplete(int rc, LedgerHandle lh, LedgerEntry entry, Object ctx) {
readCounter.inc();
}
}, readConcurrency);
}
reader.run();
logger.info("It took {} ms to complete reading {} entries from log segment {}", new Object[] { stopwatch.elapsed(TimeUnit.MILLISECONDS), (lh.getLastAddConfirmed() + 1), segment });
}
} catch (Exception e) {
logger.error("Error on reading bk ", e);
}
}
use of org.apache.bookkeeper.client.LedgerHandle in project distributedlog by twitter.
the class DLMTestUtil method fenceStream.
public static void fenceStream(DistributedLogConfiguration conf, URI uri, String name) throws Exception {
BKDistributedLogManager dlm = (BKDistributedLogManager) createNewDLM(name, conf, uri);
try {
BKLogReadHandler readHandler = dlm.createReadHandler();
List<LogSegmentMetadata> ledgerList = readHandler.getFullLedgerList(true, true);
LogSegmentMetadata lastSegment = ledgerList.get(ledgerList.size() - 1);
BookKeeperClient bkc = dlm.getWriterBKC();
LedgerHandle lh = bkc.get().openLedger(lastSegment.getLedgerId(), BookKeeper.DigestType.CRC32, conf.getBKDigestPW().getBytes(UTF_8));
lh.close();
} finally {
dlm.close();
}
}
use of org.apache.bookkeeper.client.LedgerHandle in project distributedlog by twitter.
the class TestBKLogSegmentWriter method testCloseShouldFlush.
/**
* Close a segment log writer should flush buffered data.
*
* @throws Exception
*/
@Test(timeout = 60000)
public void testCloseShouldFlush() throws Exception {
DistributedLogConfiguration confLocal = newLocalConf();
confLocal.setImmediateFlushEnabled(false);
confLocal.setOutputBufferSize(Integer.MAX_VALUE);
confLocal.setPeriodicFlushFrequencyMilliSeconds(0);
ZKDistributedLock lock = createLock("/test/lock-" + runtime.getMethodName(), zkc, true);
BKLogSegmentWriter writer = createLogSegmentWriter(confLocal, 0L, -1L, lock);
// Use another lock to wait for writer releasing lock
ZKDistributedLock lock0 = createLock("/test/lock-" + runtime.getMethodName(), zkc0, false);
Future<ZKDistributedLock> lockFuture0 = lock0.asyncAcquire();
// add 10 records
int numRecords = 10;
List<Future<DLSN>> futureList = new ArrayList<Future<DLSN>>(numRecords);
for (int i = 0; i < numRecords; i++) {
futureList.add(writer.asyncWrite(DLMTestUtil.getLogRecordInstance(i)));
}
assertEquals("Last tx id should be " + (numRecords - 1), numRecords - 1, writer.getLastTxId());
assertEquals("Last acked tx id should be -1", -1L, writer.getLastTxIdAcknowledged());
assertEquals("Last DLSN should be " + DLSN.InvalidDLSN, DLSN.InvalidDLSN, writer.getLastDLSN());
assertEquals("Position should be " + numRecords, 10, writer.getPositionWithinLogSegment());
// close the writer should flush buffered data and release lock
closeWriterAndLock(writer, lock);
Await.result(lockFuture0);
lock0.checkOwnership();
assertEquals("Last tx id should still be " + (numRecords - 1), numRecords - 1, writer.getLastTxId());
assertEquals("Last acked tx id should become " + (numRecords - 1), numRecords - 1, writer.getLastTxIdAcknowledged());
assertEquals("Position should still be " + numRecords, 10, writer.getPositionWithinLogSegment());
List<DLSN> dlsns = Await.result(Future.collect(futureList));
assertEquals("All records should be written", numRecords, dlsns.size());
for (int i = 0; i < numRecords; i++) {
DLSN dlsn = dlsns.get(i);
assertEquals("Incorrent ledger sequence number", 0L, dlsn.getLogSegmentSequenceNo());
assertEquals("Incorrent entry id", 0L, dlsn.getEntryId());
assertEquals("Inconsistent slot id", i, dlsn.getSlotId());
}
assertEquals("Last DLSN should be " + dlsns.get(dlsns.size() - 1), dlsns.get(dlsns.size() - 1), writer.getLastDLSN());
LedgerHandle lh = getLedgerHandle(writer);
LedgerHandle readLh = openLedgerNoRecovery(lh);
assertTrue("Ledger " + lh.getId() + " should be closed", readLh.isClosed());
assertEquals("There should be two entries in ledger " + lh.getId(), 1L, readLh.getLastAddConfirmed());
}
Aggregations