use of org.apache.distributedlog.DistributedLogConfiguration in project bookkeeper by apache.
the class TestBKLogSegmentEntryReader method testReadEntriesFromInprogressSegment.
@Test(timeout = 60000)
public void testReadEntriesFromInprogressSegment() throws Exception {
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.addConfiguration(conf);
confLocal.setOutputBufferSize(0);
confLocal.setPeriodicFlushFrequencyMilliSeconds(0);
confLocal.setImmediateFlushEnabled(false);
confLocal.setNumPrefetchEntriesPerLogSegment(20);
confLocal.setMaxPrefetchEntriesPerLogSegment(20);
DistributedLogManager dlm = createNewDLM(confLocal, runtime.getMethodName());
AsyncLogWriter writer = createInprogressLogSegment(dlm, confLocal, 5);
List<LogSegmentMetadata> segments = dlm.getLogSegments();
assertEquals(segments.size() + " log segments found, expected to be only one", 1, segments.size());
BKLogSegmentEntryReader reader = createEntryReader(segments.get(0), 0, confLocal);
reader.start();
long expectedLastAddConfirmed = 8L;
// wait until sending out all prefetch requests
while (reader.readAheadEntries.size() < expectedLastAddConfirmed + 2) {
TimeUnit.MILLISECONDS.sleep(10);
}
assertEquals(expectedLastAddConfirmed + 2, reader.getNextEntryId());
long txId = 1L;
long entryId = 0L;
while (true) {
Entry.Reader entryReader = Utils.ioResult(reader.readNext(1)).get(0);
LogRecordWithDLSN record = entryReader.nextRecord();
while (null != record) {
if (!record.isControl()) {
DLMTestUtil.verifyLogRecord(record);
assertEquals(txId, record.getTransactionId());
++txId;
}
DLSN dlsn = record.getDlsn();
assertEquals(1L, dlsn.getLogSegmentSequenceNo());
assertEquals(entryId, dlsn.getEntryId());
record = entryReader.nextRecord();
}
++entryId;
if (entryId == expectedLastAddConfirmed + 1) {
break;
}
}
assertEquals(6L, txId);
CompletableFuture<List<Entry.Reader>> nextReadFuture = reader.readNext(1);
// write another record to commit previous writes
Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(txId)));
// the long poll will be satisfied
List<Entry.Reader> nextReadEntries = Utils.ioResult(nextReadFuture);
assertEquals(1, nextReadEntries.size());
assertTrue(reader.hasCaughtUpOnInprogress());
Entry.Reader entryReader = nextReadEntries.get(0);
LogRecordWithDLSN record = entryReader.nextRecord();
assertNotNull(record);
assertTrue(record.isControl());
assertNull(entryReader.nextRecord());
// once the read is advanced, we will prefetch next record
while (reader.getNextEntryId() <= entryId) {
TimeUnit.MILLISECONDS.sleep(10);
}
assertEquals(entryId + 2, reader.getNextEntryId());
assertEquals(1, reader.readAheadEntries.size());
Utils.close(reader);
Utils.close(writer);
}
use of org.apache.distributedlog.DistributedLogConfiguration in project bookkeeper by apache.
the class TestBKLogSegmentEntryReader method testMaxPrefetchEntriesLargeBatch.
@Test(timeout = 60000)
public void testMaxPrefetchEntriesLargeBatch() throws Exception {
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.addConfiguration(conf);
confLocal.setOutputBufferSize(0);
confLocal.setPeriodicFlushFrequencyMilliSeconds(0);
confLocal.setImmediateFlushEnabled(false);
confLocal.setNumPrefetchEntriesPerLogSegment(10);
confLocal.setMaxPrefetchEntriesPerLogSegment(5);
DistributedLogManager dlm = createNewDLM(confLocal, runtime.getMethodName());
generateCompletedLogSegments(dlm, confLocal, 1, 20);
List<LogSegmentMetadata> segments = dlm.getLogSegments();
assertEquals(segments.size() + " log segments found, expected to be only one", 1, segments.size());
BKLogSegmentEntryReader reader = createEntryReader(segments.get(0), 0, confLocal);
reader.start();
// wait for the read ahead entries to become available
while (reader.readAheadEntries.size() < 5) {
TimeUnit.MILLISECONDS.sleep(10);
}
long txId = 1L;
long entryId = 0L;
assertEquals(5, reader.readAheadEntries.size());
assertEquals(5, reader.getNextEntryId());
// read first entry
Entry.Reader entryReader = Utils.ioResult(reader.readNext(1)).get(0);
LogRecordWithDLSN record = entryReader.nextRecord();
while (null != record) {
if (!record.isControl()) {
DLMTestUtil.verifyLogRecord(record);
assertEquals(txId, record.getTransactionId());
++txId;
}
DLSN dlsn = record.getDlsn();
assertEquals(1L, dlsn.getLogSegmentSequenceNo());
assertEquals(entryId, dlsn.getEntryId());
record = entryReader.nextRecord();
}
++entryId;
assertEquals(2L, txId);
// wait for the read ahead entries to become 10 again
while (reader.readAheadEntries.size() < 5) {
TimeUnit.MILLISECONDS.sleep(10);
}
assertEquals(5, reader.readAheadEntries.size());
assertEquals(6, reader.getNextEntryId());
assertFalse(reader.hasCaughtUpOnInprogress());
Utils.close(reader);
}
use of org.apache.distributedlog.DistributedLogConfiguration in project bookkeeper by apache.
the class TestZKLogStreamMetadataStore method testCreateLogMetadataWithCustomMetadata.
@SuppressWarnings("deprecation")
@Test(timeout = 60000)
public void testCreateLogMetadataWithCustomMetadata() throws Exception {
String logName = testName.getMethodName();
String logIdentifier = "<default>";
List<String> pathsToDelete = Lists.newArrayList();
DLMetadata.create(new BKDLConfig(zkServers, "/ledgers")).update(uri);
Namespace namespace = NamespaceBuilder.newBuilder().conf(new DistributedLogConfiguration()).uri(uri).build();
org.apache.distributedlog.api.MetadataAccessor accessor = namespace.getNamespaceDriver().getMetadataAccessor(logName);
accessor.createOrUpdateMetadata(logName.getBytes("UTF-8"));
accessor.close();
testCreateLogMetadataWithMissingPaths(uri, logName, logIdentifier, pathsToDelete, true, false);
}
use of org.apache.distributedlog.DistributedLogConfiguration in project bookkeeper by apache.
the class TestZkMetadataResolver method testFirstLogSegmentSequenceNumber.
@Test(timeout = 60000)
public void testFirstLogSegmentSequenceNumber() throws Exception {
DistributedLogConfiguration dlConf = new DistributedLogConfiguration();
URI uri = createURI("/messaging/distributedlog-testfirstledgerseqno/dl1");
DLMetadata meta1 = DLMetadata.create(new BKDLConfig("127.0.0.1:7000", "ledgers"));
meta1.create(uri);
BKDLConfig read1 = BKDLConfig.resolveDLConfig(zkc, uri);
BKDLConfig.propagateConfiguration(read1, dlConf);
assertEquals(DistributedLogConstants.FIRST_LOGSEGMENT_SEQNO, dlConf.getFirstLogSegmentSequenceNumber());
BKDLConfig.clearCachedDLConfigs();
DLMetadata meta2 = DLMetadata.create(new BKDLConfig("127.0.0.1:7000", "ledgers").setFirstLogSegmentSeqNo(9999L));
meta2.update(uri);
BKDLConfig read2 = BKDLConfig.resolveDLConfig(zkc, uri);
BKDLConfig.propagateConfiguration(read2, dlConf);
assertEquals(9999L, dlConf.getFirstLogSegmentSequenceNumber());
BKDLConfig.clearCachedDLConfigs();
DLMetadata meta3 = DLMetadata.create(new BKDLConfig("127.0.0.1:7000", "ledgers").setFirstLogSegmentSeqNo(99L));
meta3.update(uri);
BKDLConfig read3 = BKDLConfig.resolveDLConfig(zkc, uri);
BKDLConfig.propagateConfiguration(read3, dlConf);
assertEquals(99L, dlConf.getFirstLogSegmentSequenceNumber());
BKDLConfig.clearCachedDLConfigs();
}
use of org.apache.distributedlog.DistributedLogConfiguration in project bookkeeper by apache.
the class TestFederatedZKLogMetadataStore method testCreateLog.
@Test(timeout = 60000)
public void testCreateLog() throws Exception {
DistributedLogConfiguration conf = new DistributedLogConfiguration();
conf.addConfiguration(baseConf);
ZooKeeperClient anotherZkc = TestZooKeeperClientBuilder.newBuilder().uri(uri).sessionTimeoutMs(zkSessionTimeoutMs).build();
FederatedZKLogMetadataStore anotherMetadataStore = new FederatedZKLogMetadataStore(conf, uri, anotherZkc, scheduler);
for (int i = 0; i < 2 * maxLogsPerSubnamespace; i++) {
LogMetadataStore createStore, checkStore;
if (i % 2 == 0) {
createStore = metadataStore;
checkStore = anotherMetadataStore;
} else {
createStore = anotherMetadataStore;
checkStore = metadataStore;
}
String logName = "test-create-log-" + i;
URI logUri = Utils.ioResult(createStore.createLog(logName));
Optional<URI> logLocation = Utils.ioResult(checkStore.getLogLocation(logName));
assertTrue("Log " + logName + " doesn't exist", logLocation.isPresent());
assertEquals("Different log location " + logLocation.get() + " is found", logUri, logLocation.get());
}
assertEquals(2, metadataStore.getSubnamespaces().size());
assertEquals(2, anotherMetadataStore.getSubnamespaces().size());
}
Aggregations