use of com.twitter.distributedlog.util.OrderedScheduler in project distributedlog by twitter.
the class TestBKDistributedLogManager method testTruncationValidation.
@Test(timeout = 60000)
public void testTruncationValidation() throws Exception {
String name = "distrlog-truncation-validation";
URI uri = createDLMURI("/" + name);
ZooKeeperClient zookeeperClient = TestZooKeeperClientBuilder.newBuilder().uri(uri).build();
OrderedScheduler scheduler = OrderedScheduler.newBuilder().name("test-truncation-validation").corePoolSize(1).build();
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.loadConf(conf);
confLocal.setDLLedgerMetadataLayoutVersion(LogSegmentMetadata.LEDGER_METADATA_CURRENT_LAYOUT_VERSION);
confLocal.setOutputBufferSize(0);
LogSegmentMetadataStore metadataStore = new ZKLogSegmentMetadataStore(confLocal, zookeeperClient, scheduler);
BKDistributedLogManager dlm = createNewDLM(confLocal, name);
DLSN truncDLSN = DLSN.InitialDLSN;
DLSN beyondTruncDLSN = DLSN.InitialDLSN;
long beyondTruncTxId = 1;
long txid = 1;
for (long i = 0; i < 3; i++) {
long start = txid;
BKAsyncLogWriter writer = dlm.startAsyncLogSegmentNonPartitioned();
for (long j = 1; j <= 10; j++) {
LogRecord record = DLMTestUtil.getLargeLogRecordInstance(txid++);
Future<DLSN> dlsn = writer.write(record);
if (i == 1 && j == 2) {
truncDLSN = Await.result(dlsn);
} else if (i == 2 && j == 3) {
beyondTruncDLSN = Await.result(dlsn);
beyondTruncTxId = record.getTransactionId();
} else if (j == 10) {
Await.ready(dlsn);
}
}
writer.close();
}
{
LogReader reader = dlm.getInputStream(DLSN.InitialDLSN);
LogRecordWithDLSN record = reader.readNext(false);
assertTrue((record != null) && (record.getDlsn().compareTo(DLSN.InitialDLSN) == 0));
reader.close();
}
Map<Long, LogSegmentMetadata> segmentList = DLMTestUtil.readLogSegments(zookeeperClient, ZKLogMetadata.getLogSegmentsPath(uri, name, confLocal.getUnpartitionedStreamName()));
LOG.info("Read segments before truncating first segment : {}", segmentList);
MetadataUpdater updater = LogSegmentMetadataStoreUpdater.createMetadataUpdater(confLocal, metadataStore);
FutureUtils.result(updater.setLogSegmentTruncated(segmentList.get(1L)));
segmentList = DLMTestUtil.readLogSegments(zookeeperClient, ZKLogMetadata.getLogSegmentsPath(uri, name, confLocal.getUnpartitionedStreamName()));
LOG.info("Read segments after truncated first segment : {}", segmentList);
{
LogReader reader = dlm.getInputStream(DLSN.InitialDLSN);
LogRecordWithDLSN record = reader.readNext(false);
assertTrue((record != null) && (record.getDlsn().compareTo(new DLSN(2, 0, 0)) == 0));
reader.close();
}
{
LogReader reader = dlm.getInputStream(1);
LogRecordWithDLSN record = reader.readNext(false);
assertTrue((record != null) && (record.getDlsn().compareTo(new DLSN(2, 0, 0)) == 0));
reader.close();
}
updater = LogSegmentMetadataStoreUpdater.createMetadataUpdater(confLocal, metadataStore);
FutureUtils.result(updater.setLogSegmentActive(segmentList.get(1L)));
segmentList = DLMTestUtil.readLogSegments(zookeeperClient, ZKLogMetadata.getLogSegmentsPath(uri, name, confLocal.getUnpartitionedStreamName()));
LOG.info("Read segments after marked first segment as active : {}", segmentList);
updater = LogSegmentMetadataStoreUpdater.createMetadataUpdater(confLocal, metadataStore);
FutureUtils.result(updater.setLogSegmentTruncated(segmentList.get(2L)));
segmentList = DLMTestUtil.readLogSegments(zookeeperClient, ZKLogMetadata.getLogSegmentsPath(uri, name, confLocal.getUnpartitionedStreamName()));
LOG.info("Read segments after truncated second segment : {}", segmentList);
{
AsyncLogReader reader = dlm.getAsyncLogReader(DLSN.InitialDLSN);
long expectedTxId = 1L;
boolean exceptionEncountered = false;
try {
for (int i = 0; i < 3 * 10; i++) {
LogRecordWithDLSN record = Await.result(reader.readNext());
DLMTestUtil.verifyLargeLogRecord(record);
assertEquals(expectedTxId, record.getTransactionId());
expectedTxId++;
}
} catch (AlreadyTruncatedTransactionException exc) {
exceptionEncountered = true;
}
assertTrue(exceptionEncountered);
Utils.close(reader);
}
updater = LogSegmentMetadataStoreUpdater.createMetadataUpdater(conf, metadataStore);
FutureUtils.result(updater.setLogSegmentActive(segmentList.get(2L)));
BKAsyncLogWriter writer = dlm.startAsyncLogSegmentNonPartitioned();
Assert.assertTrue(Await.result(writer.truncate(truncDLSN)));
BKLogWriteHandler handler = writer.getCachedWriteHandler();
List<LogSegmentMetadata> cachedSegments = handler.getFullLedgerList(false, false);
for (LogSegmentMetadata segment : cachedSegments) {
if (segment.getLastDLSN().compareTo(truncDLSN) < 0) {
Assert.assertTrue(segment.isTruncated());
Assert.assertTrue(!segment.isPartiallyTruncated());
} else if (segment.getFirstDLSN().compareTo(truncDLSN) < 0) {
Assert.assertTrue(!segment.isTruncated());
Assert.assertTrue(segment.isPartiallyTruncated());
} else {
Assert.assertTrue(!segment.isTruncated());
Assert.assertTrue(!segment.isPartiallyTruncated());
}
}
segmentList = DLMTestUtil.readLogSegments(zookeeperClient, ZKLogMetadata.getLogSegmentsPath(uri, name, conf.getUnpartitionedStreamName()));
Assert.assertTrue(segmentList.get(truncDLSN.getLogSegmentSequenceNo()).getMinActiveDLSN().compareTo(truncDLSN) == 0);
{
LogReader reader = dlm.getInputStream(DLSN.InitialDLSN);
LogRecordWithDLSN record = reader.readNext(false);
assertTrue(record != null);
assertEquals(truncDLSN, record.getDlsn());
reader.close();
}
{
LogReader reader = dlm.getInputStream(1);
LogRecordWithDLSN record = reader.readNext(false);
assertTrue(record != null);
assertEquals(truncDLSN, record.getDlsn());
reader.close();
}
{
AsyncLogReader reader = dlm.getAsyncLogReader(DLSN.InitialDLSN);
LogRecordWithDLSN record = Await.result(reader.readNext());
assertTrue(record != null);
assertEquals(truncDLSN, record.getDlsn());
Utils.close(reader);
}
{
LogReader reader = dlm.getInputStream(beyondTruncDLSN);
LogRecordWithDLSN record = reader.readNext(false);
assertTrue(record != null);
assertEquals(beyondTruncDLSN, record.getDlsn());
reader.close();
}
{
LogReader reader = dlm.getInputStream(beyondTruncTxId);
LogRecordWithDLSN record = reader.readNext(false);
assertTrue(record != null);
assertEquals(beyondTruncDLSN, record.getDlsn());
assertEquals(beyondTruncTxId, record.getTransactionId());
reader.close();
}
{
AsyncLogReader reader = dlm.getAsyncLogReader(beyondTruncDLSN);
LogRecordWithDLSN record = Await.result(reader.readNext());
assertTrue(record != null);
assertEquals(beyondTruncDLSN, record.getDlsn());
Utils.close(reader);
}
zookeeperClient.close();
}
use of com.twitter.distributedlog.util.OrderedScheduler in project distributedlog by twitter.
the class BKDistributedLogManager method createWriteHandler.
private void createWriteHandler(ZKLogMetadataForWriter logMetadata, boolean lockHandler, final Promise<BKLogWriteHandler> createPromise) {
OrderedScheduler lockStateExecutor = getLockStateExecutor(true);
// Build the locks
DistributedLock lock;
if (conf.isWriteLockEnabled()) {
lock = new ZKDistributedLock(lockStateExecutor, getLockFactory(true), logMetadata.getLockPath(), conf.getLockTimeoutMilliSeconds(), statsLogger);
} else {
lock = NopDistributedLock.INSTANCE;
}
// Build the ledger allocator
LedgerAllocator allocator;
try {
allocator = createLedgerAllocator(logMetadata);
} catch (IOException e) {
FutureUtils.setException(createPromise, e);
return;
}
// Make sure writer handler created before resources are initialized
final BKLogWriteHandler writeHandler = new BKLogWriteHandler(logMetadata, conf, writerZKCBuilder, writerBKCBuilder, writerMetadataStore, scheduler, allocator, statsLogger, perLogStatsLogger, alertStatsLogger, clientId, regionId, writeLimiter, featureProvider, dynConf, lock);
PermitManager manager = getLogSegmentRollingPermitManager();
if (manager instanceof Watcher) {
writeHandler.register((Watcher) manager);
}
if (lockHandler) {
writeHandler.lockHandler().addEventListener(new FutureEventListener<DistributedLock>() {
@Override
public void onSuccess(DistributedLock lock) {
FutureUtils.setValue(createPromise, writeHandler);
}
@Override
public void onFailure(final Throwable cause) {
writeHandler.asyncClose().ensure(new AbstractFunction0<BoxedUnit>() {
@Override
public BoxedUnit apply() {
FutureUtils.setException(createPromise, cause);
return BoxedUnit.UNIT;
}
});
}
});
} else {
FutureUtils.setValue(createPromise, writeHandler);
}
}
Aggregations