use of org.apache.bookkeeper.client.BookKeeper in project bookkeeper by apache.
the class TestHttpService method testGetLastLogMarkService.
@Test
public void testGetLastLogMarkService() throws Exception {
baseConf.setZkServers(zkUtil.getZooKeeperConnectString());
BookKeeper.DigestType digestType = BookKeeper.DigestType.CRC32;
int numLedgers = 4;
int numMsgs = 100;
LedgerHandle[] lh = new LedgerHandle[numLedgers];
// create ledgers
for (int i = 0; i < numLedgers; i++) {
lh[i] = bkc.createLedger(digestType, "".getBytes());
}
String content = "Apache BookKeeper is cool!";
// add entries
for (int i = 0; i < numMsgs; i++) {
for (int j = 0; j < numLedgers; j++) {
lh[j].addEntry(content.getBytes());
}
}
// close ledgers
for (int i = 0; i < numLedgers; i++) {
lh[i].close();
}
HttpEndpointService getLastLogMarkService = bkHttpServiceProvider.provideHttpEndpointService(HttpServer.ApiType.LAST_LOG_MARK);
// 1, null parameters of PUT, should fail
HttpServiceRequest request1 = new HttpServiceRequest(null, HttpServer.Method.PUT, null);
HttpServiceResponse response1 = getLastLogMarkService.handle(request1);
assertEquals(HttpServer.StatusCode.NOT_FOUND.getValue(), response1.getStatusCode());
// 2, null parameters of GET, should return 1 file
HttpServiceRequest request2 = new HttpServiceRequest(null, HttpServer.Method.GET, null);
HttpServiceResponse response2 = getLastLogMarkService.handle(request2);
assertEquals(HttpServer.StatusCode.OK.getValue(), response2.getStatusCode());
@SuppressWarnings("unchecked") HashMap<String, String> respBody = JsonUtil.fromJson(response2.getBody(), HashMap.class);
assertEquals(1, respBody.size());
}
use of org.apache.bookkeeper.client.BookKeeper in project bookkeeper by apache.
the class TestSmoke method testBootWriteReadShutdown.
@Test
public void testBootWriteReadShutdown() throws Exception {
Assert.assertTrue(BookKeeperClusterUtils.startAllBookiesWithVersion(docker, currentVersion));
String zookeeper = BookKeeperClusterUtils.zookeeperConnectString(docker);
BookKeeper bk = new BookKeeper(zookeeper);
long ledgerId;
try (LedgerHandle writelh = bk.createLedger(BookKeeper.DigestType.CRC32, PASSWD)) {
ledgerId = writelh.getId();
for (int i = 0; i < 100; i++) {
writelh.addEntry(("entry-" + i).getBytes());
}
}
try (LedgerHandle readlh = bk.openLedger(ledgerId, BookKeeper.DigestType.CRC32, PASSWD)) {
long lac = readlh.getLastAddConfirmed();
int i = 0;
Enumeration<LedgerEntry> entries = readlh.readEntries(0, lac);
while (entries.hasMoreElements()) {
LedgerEntry e = entries.nextElement();
String readBack = new String(e.getEntry());
Assert.assertEquals(readBack, "entry-" + i++);
}
Assert.assertEquals(i, 100);
}
bk.close();
Assert.assertTrue(BookKeeperClusterUtils.stopAllBookies(docker));
}
use of org.apache.bookkeeper.client.BookKeeper in project pravega by pravega.
the class BookKeeperListAllLedgersCommand method execute.
@Override
public void execute() throws Exception {
ensureArgCount(0);
@Cleanup val context = createContext();
ClientConfiguration config = new ClientConfiguration().setMetadataServiceUri("zk://" + this.getServiceConfig().getZkURL() + context.bookKeeperConfig.getBkLedgerPath());
@Cleanup BookKeeper bkClient = BookKeeper.forConfig(config).build();
@Cleanup LedgerManager manager = bkClient.getLedgerManager();
LedgerManager.LedgerRangeIterator ledgerRangeIterator = manager.getLedgerRanges(Long.MAX_VALUE);
List<ReadHandle> candidateLedgers = new ArrayList<>();
try {
while (ledgerRangeIterator.hasNext()) {
LedgerManager.LedgerRange lr = ledgerRangeIterator.next();
for (long ledgerId : lr.getLedgers()) {
candidateLedgers.add(Ledgers.openRead(ledgerId, bkClient, context.bookKeeperConfig));
}
}
// Output all the ledgers found.
output("List of ledgers in the system: ");
for (ReadHandle rh : candidateLedgers) {
output("%s, length: %d, lastEntryConfirmed: %d, ledgerMetadata: %s, bookieLogID: %d", rh.toString(), rh.getLength(), rh.readLastAddConfirmed(), rh.getLedgerMetadata().toSafeString(), Ledgers.getBookKeeperLogId(rh));
}
} finally {
// Closing opened ledgers.
closeBookkeeperReadHandles(candidateLedgers);
}
}
use of org.apache.bookkeeper.client.BookKeeper in project distributedlog by twitter.
the class BookKeeperClient method createLedger.
// Util functions
public Future<LedgerHandle> createLedger(int ensembleSize, int writeQuorumSize, int ackQuorumSize) {
BookKeeper bk;
try {
bk = get();
} catch (IOException ioe) {
return Future.exception(ioe);
}
final Promise<LedgerHandle> promise = new Promise<LedgerHandle>();
bk.asyncCreateLedger(ensembleSize, writeQuorumSize, ackQuorumSize, BookKeeper.DigestType.CRC32, passwd, new AsyncCallback.CreateCallback() {
@Override
public void createComplete(int rc, LedgerHandle lh, Object ctx) {
if (BKException.Code.OK == rc) {
promise.updateIfEmpty(new Return<LedgerHandle>(lh));
} else {
promise.updateIfEmpty(new Throw<LedgerHandle>(BKException.create(rc)));
}
}
}, null);
return promise;
}
use of org.apache.bookkeeper.client.BookKeeper in project distributedlog by twitter.
the class DistributedLogInputFormat method getSplits.
@Override
public List<InputSplit> getSplits(JobContext jobContext) throws IOException, InterruptedException {
List<LogSegmentMetadata> segments = dlm.getLogSegments();
List<InputSplit> inputSplits = Lists.newArrayListWithCapacity(segments.size());
BookKeeper bk = namespace.getReaderBKC().get();
LedgerManager lm = BookKeeperAccessor.getLedgerManager(bk);
final AtomicInteger rcHolder = new AtomicInteger(0);
final AtomicReference<LedgerMetadata> metadataHolder = new AtomicReference<LedgerMetadata>(null);
for (LogSegmentMetadata segment : segments) {
final CountDownLatch latch = new CountDownLatch(1);
lm.readLedgerMetadata(segment.getLedgerId(), new BookkeeperInternalCallbacks.GenericCallback<LedgerMetadata>() {
@Override
public void operationComplete(int rc, LedgerMetadata ledgerMetadata) {
metadataHolder.set(ledgerMetadata);
rcHolder.set(rc);
latch.countDown();
}
});
latch.await();
if (BKException.Code.OK != rcHolder.get()) {
throw new IOException("Faild to get log segment metadata for " + segment + " : " + BKException.getMessage(rcHolder.get()));
}
inputSplits.add(new LogSegmentSplit(segment, metadataHolder.get()));
}
return inputSplits;
}
Aggregations