use of org.apache.distributedlog.LogSegmentMetadata in project bookkeeper by apache.
the class TestZKLogSegmentMetadataStore method testLogSegmentNamesListenerOnDeletion.
@Test(timeout = 60000)
public void testLogSegmentNamesListenerOnDeletion() throws Exception {
int numSegments = 3;
Transaction<Object> createTxn = lsmStore.transaction();
for (int i = 0; i < numSegments; i++) {
LogSegmentMetadata segment = createLogSegment(i);
lsmStore.createLogSegment(createTxn, segment, null);
}
Utils.ioResult(createTxn.execute());
String rootPath = "/" + runtime.getMethodName();
List<String> children = zkc.get().getChildren(rootPath, false);
Collections.sort(children);
final AtomicInteger numNotifications = new AtomicInteger(0);
final List<List<String>> segmentLists = Lists.newArrayListWithExpectedSize(2);
LogSegmentNamesListener listener = new LogSegmentNamesListener() {
@Override
public void onSegmentsUpdated(Versioned<List<String>> segments) {
logger.info("Received segments : {}", segments);
segmentLists.add(segments.getValue());
numNotifications.incrementAndGet();
}
@Override
public void onLogStreamDeleted() {
// no-op;
}
};
lsmStore.getLogSegmentNames(rootPath, listener);
assertEquals(1, lsmStore.listeners.size());
assertTrue("Should contain listener", lsmStore.listeners.containsKey(rootPath));
assertTrue("Should contain listener", lsmStore.listeners.get(rootPath).containsKey(listener));
while (numNotifications.get() < 1) {
TimeUnit.MILLISECONDS.sleep(10);
}
assertEquals("Should receive one segment list update", 1, numNotifications.get());
List<String> firstSegmentList = segmentLists.get(0);
Collections.sort(firstSegmentList);
assertEquals("List of segments should be same", children, firstSegmentList);
// delete all log segments, it should trigger segment list updated
Transaction<Object> deleteTxn = lsmStore.transaction();
for (int i = 0; i < numSegments; i++) {
LogSegmentMetadata segment = createLogSegment(i);
lsmStore.deleteLogSegment(deleteTxn, segment, null);
}
Utils.ioResult(deleteTxn.execute());
List<String> newChildren = zkc.get().getChildren(rootPath, false);
Collections.sort(newChildren);
while (numNotifications.get() < 2) {
TimeUnit.MILLISECONDS.sleep(10);
}
assertEquals("Should receive second segment list update", 2, numNotifications.get());
List<String> secondSegmentList = segmentLists.get(1);
Collections.sort(secondSegmentList);
assertEquals("List of segments should be updated", 0, secondSegmentList.size());
assertEquals("List of segments should be updated", newChildren, secondSegmentList);
// delete the root path
zkc.get().delete(rootPath, -1);
while (!lsmStore.listeners.isEmpty()) {
TimeUnit.MILLISECONDS.sleep(10);
}
assertTrue("listener should be removed after root path is deleted", lsmStore.listeners.isEmpty());
}
use of org.apache.distributedlog.LogSegmentMetadata in project bookkeeper by apache.
the class DistributedLogAdmin method repairStream.
private static boolean repairStream(MetadataUpdater metadataUpdater, StreamCandidate streamCandidate, boolean verbose, boolean interactive) throws Exception {
if (verbose) {
System.out.println("Stream " + streamCandidate.streamName + " : ");
for (LogSegmentCandidate segmentCandidate : streamCandidate.segmentCandidates) {
System.out.println(" " + segmentCandidate.metadata.getLogSegmentSequenceNumber() + " : metadata = " + segmentCandidate.metadata + ", last dlsn = " + segmentCandidate.lastRecord.getDlsn());
}
System.out.println("-------------------------------------------");
}
if (interactive && !IOUtils.confirmPrompt("Do you want to fix the stream " + streamCandidate.streamName + " (Y/N) : ")) {
return false;
}
for (LogSegmentCandidate segmentCandidate : streamCandidate.segmentCandidates) {
LogSegmentMetadata newMetadata = FutureUtils.result(metadataUpdater.updateLastRecord(segmentCandidate.metadata, segmentCandidate.lastRecord));
if (verbose) {
System.out.println(" Fixed segment " + segmentCandidate.metadata.getLogSegmentSequenceNumber() + " : ");
System.out.println(" old metadata : " + segmentCandidate.metadata);
System.out.println(" new metadata : " + newMetadata);
}
}
if (verbose) {
System.out.println("-------------------------------------------");
}
return true;
}
use of org.apache.distributedlog.LogSegmentMetadata in project bookkeeper by apache.
the class TestBKLogSegmentEntryReader method testReadEntriesOnStateChange.
@Test(timeout = 60000)
public void testReadEntriesOnStateChange() throws Exception {
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.addConfiguration(conf);
confLocal.setOutputBufferSize(0);
confLocal.setPeriodicFlushFrequencyMilliSeconds(0);
confLocal.setImmediateFlushEnabled(false);
confLocal.setNumPrefetchEntriesPerLogSegment(20);
confLocal.setMaxPrefetchEntriesPerLogSegment(20);
DistributedLogManager dlm = createNewDLM(confLocal, runtime.getMethodName());
AsyncLogWriter writer = createInprogressLogSegment(dlm, confLocal, 5);
List<LogSegmentMetadata> segments = dlm.getLogSegments();
assertEquals(segments.size() + " log segments found, expected to be only one", 1, segments.size());
BKLogSegmentEntryReader reader = createEntryReader(segments.get(0), 0, confLocal);
reader.start();
long expectedLastAddConfirmed = 8L;
// wait until sending out all prefetch requests
while (reader.readAheadEntries.size() < expectedLastAddConfirmed + 2) {
TimeUnit.MILLISECONDS.sleep(10);
}
assertEquals(expectedLastAddConfirmed + 2, reader.getNextEntryId());
long txId = 1L;
long entryId = 0L;
while (true) {
Entry.Reader entryReader = Utils.ioResult(reader.readNext(1)).get(0);
LogRecordWithDLSN record = entryReader.nextRecord();
while (null != record) {
if (!record.isControl()) {
DLMTestUtil.verifyLogRecord(record);
assertEquals(txId, record.getTransactionId());
++txId;
}
DLSN dlsn = record.getDlsn();
assertEquals(1L, dlsn.getLogSegmentSequenceNo());
assertEquals(entryId, dlsn.getEntryId());
record = entryReader.nextRecord();
}
++entryId;
if (entryId == expectedLastAddConfirmed + 1) {
break;
}
}
assertEquals(6L, txId);
CompletableFuture<List<Entry.Reader>> nextReadFuture = reader.readNext(1);
// write another record to commit previous writes
Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(txId)));
// the long poll will be satisfied
List<Entry.Reader> nextReadEntries = Utils.ioResult(nextReadFuture);
assertEquals(1, nextReadEntries.size());
Entry.Reader entryReader = nextReadEntries.get(0);
LogRecordWithDLSN record = entryReader.nextRecord();
assertNotNull(record);
assertTrue(record.isControl());
assertNull(entryReader.nextRecord());
// once the read is advanced, we will prefetch next record
while (reader.getNextEntryId() <= entryId) {
TimeUnit.MILLISECONDS.sleep(10);
}
assertEquals(entryId + 2, reader.getNextEntryId());
assertEquals(1, reader.readAheadEntries.size());
// advance the entry id
++entryId;
// close the writer, the write will be committed
Utils.close(writer);
entryReader = Utils.ioResult(reader.readNext(1)).get(0);
record = entryReader.nextRecord();
assertNotNull(record);
assertFalse(record.isControl());
assertNull(entryReader.nextRecord());
while (reader.getNextEntryId() <= entryId + 1) {
TimeUnit.MILLISECONDS.sleep(10);
}
assertEquals(entryId + 2, reader.getNextEntryId());
assertEquals(1, reader.readAheadEntries.size());
// get the new log segment
List<LogSegmentMetadata> newSegments = dlm.getLogSegments();
assertEquals(1, newSegments.size());
assertFalse(newSegments.get(0).isInProgress());
reader.onLogSegmentMetadataUpdated(newSegments.get(0));
// should be cancelled and end of log segment should be signaled correctly
try {
// when we closed the log segment, another control record will be
// written, so we loop over the reader until we reach end of log segment.
Utils.ioResult(reader.readNext(1));
Utils.ioResult(reader.readNext(1));
fail("Should reach end of log segment");
} catch (EndOfLogSegmentException eol) {
// expected
}
Utils.close(reader);
}
use of org.apache.distributedlog.LogSegmentMetadata in project bookkeeper by apache.
the class TestBKLogSegmentEntryReader method testCloseReaderToCancelPendingReads.
@Test(timeout = 60000)
public void testCloseReaderToCancelPendingReads() throws Exception {
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.addConfiguration(conf);
confLocal.setNumPrefetchEntriesPerLogSegment(10);
confLocal.setMaxPrefetchEntriesPerLogSegment(10);
DistributedLogManager dlm = createNewDLM(confLocal, runtime.getMethodName());
DLMTestUtil.generateCompletedLogSegments(dlm, confLocal, 1, 20);
List<LogSegmentMetadata> segments = dlm.getLogSegments();
assertEquals(segments.size() + " log segments found, expected to be only one", 1, segments.size());
BKLogSegmentEntryReader reader = createEntryReader(segments.get(0), 0, confLocal);
List<CompletableFuture<List<Entry.Reader>>> futures = Lists.newArrayList();
for (int i = 0; i < 5; i++) {
futures.add(reader.readNext(1));
}
assertFalse("Reader should not be closed yet", reader.isClosed());
Utils.close(reader);
for (CompletableFuture<List<Entry.Reader>> future : futures) {
try {
Utils.ioResult(future);
fail("The read request should be cancelled");
} catch (ReadCancelledException rce) {
// expected
}
}
assertFalse(reader.hasCaughtUpOnInprogress());
assertTrue("Reader should be closed yet", reader.isClosed());
}
use of org.apache.distributedlog.LogSegmentMetadata in project bookkeeper by apache.
the class TestBKLogSegmentEntryReader method testMaxPrefetchEntriesSmallSegment.
@Test(timeout = 60000)
public void testMaxPrefetchEntriesSmallSegment() throws Exception {
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.addConfiguration(conf);
confLocal.setOutputBufferSize(0);
confLocal.setPeriodicFlushFrequencyMilliSeconds(0);
confLocal.setImmediateFlushEnabled(false);
confLocal.setNumPrefetchEntriesPerLogSegment(10);
confLocal.setMaxPrefetchEntriesPerLogSegment(20);
DistributedLogManager dlm = createNewDLM(confLocal, runtime.getMethodName());
generateCompletedLogSegments(dlm, confLocal, 1, 5);
List<LogSegmentMetadata> segments = dlm.getLogSegments();
assertEquals(segments.size() + " log segments found, expected to be only one", 1, segments.size());
BKLogSegmentEntryReader reader = createEntryReader(segments.get(0), 0, confLocal);
reader.start();
// wait for the read ahead entries to become available
while (reader.readAheadEntries.size() < (reader.getLastAddConfirmed() + 1)) {
TimeUnit.MILLISECONDS.sleep(10);
}
long txId = 1L;
long entryId = 0L;
assertEquals((reader.getLastAddConfirmed() + 1), reader.readAheadEntries.size());
assertEquals((reader.getLastAddConfirmed() + 1), reader.getNextEntryId());
// read first entry
Entry.Reader entryReader = Utils.ioResult(reader.readNext(1)).get(0);
LogRecordWithDLSN record = entryReader.nextRecord();
while (null != record) {
if (!record.isControl()) {
DLMTestUtil.verifyLogRecord(record);
assertEquals(txId, record.getTransactionId());
++txId;
}
DLSN dlsn = record.getDlsn();
assertEquals(1L, dlsn.getLogSegmentSequenceNo());
assertEquals(entryId, dlsn.getEntryId());
record = entryReader.nextRecord();
}
++entryId;
assertEquals(2L, txId);
assertEquals(reader.getLastAddConfirmed(), reader.readAheadEntries.size());
assertEquals((reader.getLastAddConfirmed() + 1), reader.getNextEntryId());
assertFalse(reader.hasCaughtUpOnInprogress());
Utils.close(reader);
}
Aggregations