use of org.apache.distributedlog.api.DistributedLogManager in project bookkeeper by apache.
the class TestRollLogSegments method testLastDLSNInRollingLogSegments.
@Test(timeout = 600000)
public void testLastDLSNInRollingLogSegments() throws Exception {
final Map<Long, DLSN> lastDLSNs = new HashMap<Long, DLSN>();
String name = "distrlog-lastdlsn-in-rolling-log-segments";
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.loadConf(conf);
confLocal.setImmediateFlushEnabled(true);
confLocal.setOutputBufferSize(0);
confLocal.setLogSegmentRollingIntervalMinutes(0);
confLocal.setMaxLogSegmentBytes(40);
int numEntries = 100;
DistributedLogManager dlm = createNewDLM(confLocal, name);
BKAsyncLogWriter writer = (BKAsyncLogWriter) dlm.startAsyncLogSegmentNonPartitioned();
final CountDownLatch latch = new CountDownLatch(numEntries);
// send requests in parallel to have outstanding requests
for (int i = 1; i <= numEntries; i++) {
final int entryId = i;
CompletableFuture<DLSN> writeFuture = writer.write(DLMTestUtil.getLogRecordInstance(entryId)).whenComplete(new FutureEventListener<DLSN>() {
@Override
public void onSuccess(DLSN value) {
logger.info("Completed entry {} : {}.", entryId, value);
synchronized (lastDLSNs) {
DLSN lastDLSN = lastDLSNs.get(value.getLogSegmentSequenceNo());
if (null == lastDLSN || lastDLSN.compareTo(value) < 0) {
lastDLSNs.put(value.getLogSegmentSequenceNo(), value);
}
}
latch.countDown();
}
@Override
public void onFailure(Throwable cause) {
}
});
if (i == 1) {
// wait for first log segment created
Utils.ioResult(writeFuture);
}
}
latch.await();
// make sure all ensure blocks were executed.
writer.closeAndComplete();
List<LogSegmentMetadata> segments = dlm.getLogSegments();
logger.info("lastDLSNs after writes {} {}", lastDLSNs.size(), lastDLSNs);
logger.info("segments after writes {} {}", segments.size(), segments);
assertTrue(segments.size() >= 2);
assertTrue(lastDLSNs.size() >= 2);
assertEquals(lastDLSNs.size(), segments.size());
for (LogSegmentMetadata segment : segments) {
DLSN dlsnInMetadata = segment.getLastDLSN();
DLSN dlsnSeen = lastDLSNs.get(segment.getLogSegmentSequenceNumber());
assertNotNull(dlsnInMetadata);
assertNotNull(dlsnSeen);
if (dlsnInMetadata.compareTo(dlsnSeen) != 0) {
logger.error("Last dlsn recorded in log segment {} is different from the one already seen {}.", dlsnInMetadata, dlsnSeen);
}
assertEquals(0, dlsnInMetadata.compareTo(dlsnSeen));
}
dlm.close();
}
use of org.apache.distributedlog.api.DistributedLogManager in project bookkeeper by apache.
the class TestTruncate method testOnlyPurgeSegmentsBeforeNoneFullyTruncatedSegment.
@Test(timeout = 60000)
public void testOnlyPurgeSegmentsBeforeNoneFullyTruncatedSegment() throws Exception {
String name = "distrlog-only-purge-segments-before-none-fully-truncated-segment";
URI uri = createDLMURI("/" + name);
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.addConfiguration(conf);
confLocal.setExplicitTruncationByApplication(true);
// populate data
populateData(new HashMap<Long, DLSN>(), confLocal, name, 4, 10, false);
DistributedLogManager dlm = createNewDLM(confLocal, name);
List<LogSegmentMetadata> segments = dlm.getLogSegments();
LOG.info("Segments before modifying segment status : {}", segments);
ZooKeeperClient zkc = TestZooKeeperClientBuilder.newBuilder(conf).uri(uri).build();
setTruncationStatus(zkc, segments.get(0), TruncationStatus.PARTIALLY_TRUNCATED);
for (int i = 1; i < 4; i++) {
LogSegmentMetadata segment = segments.get(i);
setTruncationStatus(zkc, segment, TruncationStatus.TRUNCATED);
}
List<LogSegmentMetadata> segmentsAfterTruncated = dlm.getLogSegments();
dlm.purgeLogsOlderThan(999999);
List<LogSegmentMetadata> newSegments = dlm.getLogSegments();
LOG.info("Segments after purge segments older than 999999 : {}", newSegments);
assertArrayEquals(segmentsAfterTruncated.toArray(new LogSegmentMetadata[segmentsAfterTruncated.size()]), newSegments.toArray(new LogSegmentMetadata[newSegments.size()]));
dlm.close();
// Update completion time of all 4 segments
long newTimeMs = System.currentTimeMillis() - 60 * 60 * 1000 * 10;
for (int i = 0; i < 4; i++) {
LogSegmentMetadata segment = newSegments.get(i);
updateCompletionTime(zkc, segment, newTimeMs + i);
}
DistributedLogConfiguration newConf = new DistributedLogConfiguration();
newConf.addConfiguration(confLocal);
newConf.setRetentionPeriodHours(1);
DistributedLogManager newDLM = createNewDLM(newConf, name);
AsyncLogWriter newWriter = newDLM.startAsyncLogSegmentNonPartitioned();
long txid = 1 + 4 * 10;
for (int j = 1; j <= 10; j++) {
Utils.ioResult(newWriter.write(DLMTestUtil.getLogRecordInstance(txid++)));
}
// to make sure the truncation task is executed
DLSN lastDLSN = Utils.ioResult(newDLM.getLastDLSNAsync());
LOG.info("Get last dlsn of stream {} : {}", name, lastDLSN);
assertEquals(5, newDLM.getLogSegments().size());
Utils.close(newWriter);
newDLM.close();
zkc.close();
}
use of org.apache.distributedlog.api.DistributedLogManager in project bookkeeper by apache.
the class TestTruncate method testPurgeLogs.
@Test(timeout = 60000)
public void testPurgeLogs() throws Exception {
String name = "distrlog-purge-logs";
URI uri = createDLMURI("/" + name);
populateData(new HashMap<Long, DLSN>(), conf, name, 10, 10, false);
DistributedLogManager distributedLogManager = createNewDLM(conf, name);
List<LogSegmentMetadata> segments = distributedLogManager.getLogSegments();
LOG.info("Segments before modifying completion time : {}", segments);
ZooKeeperClient zkc = TestZooKeeperClientBuilder.newBuilder(conf).uri(uri).build();
// Update completion time of first 5 segments
long newTimeMs = System.currentTimeMillis() - 60 * 60 * 1000 * 2;
for (int i = 0; i < 5; i++) {
LogSegmentMetadata segment = segments.get(i);
updateCompletionTime(zkc, segment, newTimeMs + i);
}
zkc.close();
segments = distributedLogManager.getLogSegments();
LOG.info("Segments after modifying completion time : {}", segments);
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.loadConf(conf);
confLocal.setRetentionPeriodHours(1);
confLocal.setExplicitTruncationByApplication(false);
DistributedLogManager dlm = createNewDLM(confLocal, name);
AsyncLogWriter writer = dlm.startAsyncLogSegmentNonPartitioned();
long txid = 1 + 10 * 10;
for (int j = 1; j <= 10; j++) {
Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(txid++)));
}
// wait until truncation task to be completed.
BKAsyncLogWriter bkLogWriter = (BKAsyncLogWriter) writer;
CompletableFuture<List<LogSegmentMetadata>> truncationAttempt = bkLogWriter.getLastTruncationAttempt();
while (truncationAttempt == null || !truncationAttempt.isDone()) {
TimeUnit.MILLISECONDS.sleep(20);
truncationAttempt = bkLogWriter.getLastTruncationAttempt();
}
assertEquals(6, distributedLogManager.getLogSegments().size());
Utils.close(writer);
dlm.close();
distributedLogManager.close();
}
use of org.apache.distributedlog.api.DistributedLogManager in project bookkeeper by apache.
the class TestTruncate method verifyEntries.
private void verifyEntries(String name, long readFromTxId, long startTxId, int numEntries) throws Exception {
DistributedLogManager dlm = createNewDLM(conf, name);
LogReader reader = dlm.getInputStream(readFromTxId);
long txid = startTxId;
int numRead = 0;
LogRecord r = reader.readNext(false);
while (null != r) {
DLMTestUtil.verifyLogRecord(r);
assertEquals(txid++, r.getTransactionId());
++numRead;
r = reader.readNext(false);
}
assertEquals(numEntries, numRead);
reader.close();
dlm.close();
}
use of org.apache.distributedlog.api.DistributedLogManager in project bookkeeper by apache.
the class TestTruncate method populateData.
private Pair<DistributedLogManager, AsyncLogWriter> populateData(Map<Long, DLSN> txid2DLSN, DistributedLogConfiguration confLocal, String name, int numLogSegments, int numEntriesPerLogSegment, boolean createInprogressLogSegment) throws Exception {
long txid = 1;
for (long i = 1; i <= numLogSegments; i++) {
LOG.info("Writing Log Segment {}.", i);
DistributedLogManager dlm = createNewDLM(confLocal, name);
AsyncLogWriter writer = dlm.startAsyncLogSegmentNonPartitioned();
for (int j = 1; j <= numEntriesPerLogSegment; j++) {
long curTxId = txid++;
DLSN dlsn = Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(curTxId)));
txid2DLSN.put(curTxId, dlsn);
}
Utils.close(writer);
dlm.close();
}
if (createInprogressLogSegment) {
DistributedLogManager dlm = createNewDLM(confLocal, name);
AsyncLogWriter writer = dlm.startAsyncLogSegmentNonPartitioned();
for (int j = 1; j <= 10; j++) {
long curTxId = txid++;
DLSN dlsn = Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(curTxId)));
txid2DLSN.put(curTxId, dlsn);
}
return new ImmutablePair<DistributedLogManager, AsyncLogWriter>(dlm, writer);
} else {
return null;
}
}
Aggregations