use of com.twitter.distributedlog.lock.DistributedLock in project distributedlog by twitter.
the class BKDistributedLogManager method createWriteHandler.
private void createWriteHandler(ZKLogMetadataForWriter logMetadata, boolean lockHandler, final Promise<BKLogWriteHandler> createPromise) {
OrderedScheduler lockStateExecutor = getLockStateExecutor(true);
// Build the locks
DistributedLock lock;
if (conf.isWriteLockEnabled()) {
lock = new ZKDistributedLock(lockStateExecutor, getLockFactory(true), logMetadata.getLockPath(), conf.getLockTimeoutMilliSeconds(), statsLogger);
} else {
lock = NopDistributedLock.INSTANCE;
}
// Build the ledger allocator
LedgerAllocator allocator;
try {
allocator = createLedgerAllocator(logMetadata);
} catch (IOException e) {
FutureUtils.setException(createPromise, e);
return;
}
// Make sure writer handler created before resources are initialized
final BKLogWriteHandler writeHandler = new BKLogWriteHandler(logMetadata, conf, writerZKCBuilder, writerBKCBuilder, writerMetadataStore, scheduler, allocator, statsLogger, perLogStatsLogger, alertStatsLogger, clientId, regionId, writeLimiter, featureProvider, dynConf, lock);
PermitManager manager = getLogSegmentRollingPermitManager();
if (manager instanceof Watcher) {
writeHandler.register((Watcher) manager);
}
if (lockHandler) {
writeHandler.lockHandler().addEventListener(new FutureEventListener<DistributedLock>() {
@Override
public void onSuccess(DistributedLock lock) {
FutureUtils.setValue(createPromise, writeHandler);
}
@Override
public void onFailure(final Throwable cause) {
writeHandler.asyncClose().ensure(new AbstractFunction0<BoxedUnit>() {
@Override
public BoxedUnit apply() {
FutureUtils.setException(createPromise, cause);
return BoxedUnit.UNIT;
}
});
}
});
} else {
FutureUtils.setValue(createPromise, writeHandler);
}
}
use of com.twitter.distributedlog.lock.DistributedLock in project distributedlog by twitter.
the class BKLogReadHandler method lockStream.
/**
* Elective stream lock--readers are not required to acquire the lock before using the stream.
*/
synchronized Future<Void> lockStream() {
if (null == lockAcquireFuture) {
final Function0<DistributedLock> lockFunction = new ExceptionalFunction0<DistributedLock>() {
@Override
public DistributedLock applyE() throws IOException {
// Unfortunately this has a blocking call which we should not execute on the
// ZK completion thread
BKLogReadHandler.this.readLock = new ZKDistributedLock(lockStateExecutor, lockFactory, readLockPath, conf.getLockTimeoutMilliSeconds(), statsLogger.scope("read_lock"));
LOG.info("acquiring readlock {} at {}", getLockClientId(), readLockPath);
return BKLogReadHandler.this.readLock;
}
};
lockAcquireFuture = ensureReadLockPathExist().flatMap(new ExceptionalFunction<Void, Future<Void>>() {
@Override
public Future<Void> applyE(Void in) throws Throwable {
return scheduler.apply(lockFunction).flatMap(new ExceptionalFunction<DistributedLock, Future<Void>>() {
@Override
public Future<Void> applyE(DistributedLock lock) throws IOException {
return acquireLockOnExecutorThread(lock);
}
});
}
});
}
return lockAcquireFuture;
}
use of com.twitter.distributedlog.lock.DistributedLock in project distributedlog by twitter.
the class TestAsyncReaderWriter method testAsyncWriteWithMinDelayBetweenFlushesFlushFailure.
@Test(timeout = 60000)
public void testAsyncWriteWithMinDelayBetweenFlushesFlushFailure() throws Exception {
String name = runtime.getMethodName();
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.loadConf(testConf);
confLocal.setOutputBufferSize(0);
confLocal.setImmediateFlushEnabled(true);
confLocal.setMinDelayBetweenImmediateFlushMs(1);
URI uri = createDLMURI("/" + name);
ensureURICreated(uri);
DistributedLogNamespace namespace = DistributedLogNamespaceBuilder.newBuilder().conf(confLocal).uri(uri).clientId("gabbagoo").build();
DistributedLogManager dlm = namespace.openLog(name);
DistributedLogNamespace namespace1 = DistributedLogNamespaceBuilder.newBuilder().conf(confLocal).uri(uri).clientId("tortellini").build();
DistributedLogManager dlm1 = namespace1.openLog(name);
int txid = 1;
BKAsyncLogWriter writer = (BKAsyncLogWriter) (dlm.startAsyncLogSegmentNonPartitioned());
// First write succeeds since lock isnt checked until transmit, which is scheduled
Await.result(writer.write(DLMTestUtil.getLogRecordInstance(txid++)));
writer.flushAndCommit();
BKLogSegmentWriter perStreamWriter = writer.getCachedLogWriter();
DistributedLock lock = perStreamWriter.getLock();
FutureUtils.result(lock.asyncClose());
// Get second writer, steal lock
BKAsyncLogWriter writer2 = (BKAsyncLogWriter) (dlm1.startAsyncLogSegmentNonPartitioned());
try {
// Succeeds, kicks off scheduked flush
writer.write(DLMTestUtil.getLogRecordInstance(txid++));
// Succeeds, kicks off scheduled flush
Thread.sleep(100);
Await.result(writer.write(DLMTestUtil.getLogRecordInstance(txid++)));
fail("should have thrown");
} catch (LockingException ex) {
LOG.debug("caught exception ", ex);
}
writer.close();
dlm.close();
}
Aggregations