use of com.twitter.distributedlog.lock.ZKDistributedLock in project distributedlog by twitter.
the class TestBKLogSegmentWriter method createLock.
private ZKDistributedLock createLock(String path, ZooKeeperClient zkClient, boolean acquireLock) throws Exception {
try {
Await.result(Utils.zkAsyncCreateFullPathOptimistic(zkClient, path, new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT));
} catch (KeeperException.NodeExistsException nee) {
// node already exists
}
SessionLockFactory lockFactory = new ZKSessionLockFactory(zkClient, "test-lock", lockStateExecutor, 0, Long.MAX_VALUE, conf.getZKSessionTimeoutMilliseconds(), NullStatsLogger.INSTANCE);
ZKDistributedLock lock = new ZKDistributedLock(lockStateExecutor, lockFactory, path, Long.MAX_VALUE, NullStatsLogger.INSTANCE);
if (acquireLock) {
return FutureUtils.result(lock.asyncAcquire());
} else {
return lock;
}
}
use of com.twitter.distributedlog.lock.ZKDistributedLock in project distributedlog by twitter.
the class TestBKLogSegmentWriter method testUpdateLastTxIdForUserRecords.
/**
* Log Segment Writer should only update last tx id only for user records.
*/
@Test(timeout = 60000)
public void testUpdateLastTxIdForUserRecords() throws Exception {
DistributedLogConfiguration confLocal = newLocalConf();
confLocal.setImmediateFlushEnabled(false);
confLocal.setOutputBufferSize(Integer.MAX_VALUE);
confLocal.setPeriodicFlushFrequencyMilliSeconds(0);
ZKDistributedLock lock = createLock("/test/lock-" + runtime.getMethodName(), zkc, true);
BKLogSegmentWriter writer = createLogSegmentWriter(confLocal, 0L, -1L, lock);
// add 10 records
int numRecords = 10;
List<Future<DLSN>> futureList = new ArrayList<Future<DLSN>>(numRecords);
for (int i = 0; i < numRecords; i++) {
futureList.add(writer.asyncWrite(DLMTestUtil.getLogRecordInstance(i)));
}
LogRecord controlRecord = DLMTestUtil.getLogRecordInstance(9999L);
controlRecord.setControl();
futureList.add(writer.asyncWrite(controlRecord));
assertEquals("Last tx id should be " + (numRecords - 1), numRecords - 1, writer.getLastTxId());
assertEquals("Last DLSN should be " + DLSN.InvalidDLSN, DLSN.InvalidDLSN, writer.getLastDLSN());
assertEquals("Position should be " + numRecords, numRecords, writer.getPositionWithinLogSegment());
// close the writer to flush the output buffer
closeWriterAndLock(writer, lock);
List<DLSN> dlsns = Await.result(Future.collect(futureList));
assertEquals("All 11 records should be written", numRecords + 1, dlsns.size());
for (int i = 0; i < numRecords; i++) {
DLSN dlsn = dlsns.get(i);
assertEquals("Incorrent ledger sequence number", 0L, dlsn.getLogSegmentSequenceNo());
assertEquals("Incorrent entry id", 0L, dlsn.getEntryId());
assertEquals("Inconsistent slot id", i, dlsn.getSlotId());
}
DLSN dlsn = dlsns.get(numRecords);
assertEquals("Incorrent ledger sequence number", 0L, dlsn.getLogSegmentSequenceNo());
assertEquals("Incorrent entry id", 1L, dlsn.getEntryId());
assertEquals("Inconsistent slot id", 0L, dlsn.getSlotId());
assertEquals("Last tx id should be " + (numRecords - 1), numRecords - 1, writer.getLastTxId());
assertEquals("Last acked tx id should be " + (numRecords - 1), numRecords - 1, writer.getLastTxIdAcknowledged());
assertEquals("Position should be " + numRecords, numRecords, writer.getPositionWithinLogSegment());
assertEquals("Last DLSN should be " + dlsn, dlsns.get(numRecords - 1), writer.getLastDLSN());
}
use of com.twitter.distributedlog.lock.ZKDistributedLock in project distributedlog by twitter.
the class TestBKLogSegmentWriter method testAbortShouldFailAllWrites.
/**
* Abort should wait for outstanding transmits to be completed and cancel buffered data.
*
* @throws Exception
*/
@Test(timeout = 60000)
public void testAbortShouldFailAllWrites() throws Exception {
DistributedLogConfiguration confLocal = newLocalConf();
confLocal.setImmediateFlushEnabled(false);
confLocal.setOutputBufferSize(Integer.MAX_VALUE);
confLocal.setPeriodicFlushFrequencyMilliSeconds(0);
ZKDistributedLock lock = createLock("/test/lock-" + runtime.getMethodName(), zkc, true);
BKLogSegmentWriter writer = createLogSegmentWriter(confLocal, 0L, -1L, lock);
// Use another lock to wait for writer releasing lock
ZKDistributedLock lock0 = createLock("/test/lock-" + runtime.getMethodName(), zkc0, false);
Future<ZKDistributedLock> lockFuture0 = lock0.asyncAcquire();
// add 10 records
int numRecords = 10;
List<Future<DLSN>> futureList = new ArrayList<Future<DLSN>>(numRecords);
for (int i = 0; i < numRecords; i++) {
futureList.add(writer.asyncWrite(DLMTestUtil.getLogRecordInstance(i)));
}
assertEquals("Last tx id should be " + (numRecords - 1), numRecords - 1, writer.getLastTxId());
assertEquals("Last acked tx id should be -1", -1L, writer.getLastTxIdAcknowledged());
assertEquals("Last DLSN should be " + DLSN.InvalidDLSN, DLSN.InvalidDLSN, writer.getLastDLSN());
assertEquals("Position should be " + numRecords, numRecords, writer.getPositionWithinLogSegment());
final CountDownLatch deferLatch = new CountDownLatch(1);
writer.getFuturePool().apply(new AbstractFunction0<Object>() {
@Override
public Object apply() {
try {
deferLatch.await();
} catch (InterruptedException e) {
LOG.warn("Interrupted on deferring completion : ", e);
}
return null;
}
});
// transmit the buffered data
FutureUtils.result(writer.flush());
// add another 10 records
List<Future<DLSN>> anotherFutureList = new ArrayList<Future<DLSN>>(numRecords);
for (int i = numRecords; i < 2 * numRecords; i++) {
anotherFutureList.add(writer.asyncWrite(DLMTestUtil.getLogRecordInstance(i)));
}
assertEquals("Last tx id should become " + (2 * numRecords - 1), 2 * numRecords - 1, writer.getLastTxId());
assertEquals("Last acked tx id should become " + (numRecords - 1), (long) (numRecords - 1), writer.getLastTxIdAcknowledged());
assertEquals("Last DLSN should still be " + DLSN.InvalidDLSN, DLSN.InvalidDLSN, writer.getLastDLSN());
assertEquals("Position should become " + (2 * numRecords), 2 * numRecords, writer.getPositionWithinLogSegment());
// abort the writer: it waits for outstanding transmits and abort buffered data
abortWriterAndLock(writer, lock);
Await.result(lockFuture0);
lock0.checkOwnership();
// release defer latch so completion would go through
deferLatch.countDown();
List<DLSN> dlsns = Await.result(Future.collect(futureList));
assertEquals("All first 10 records should be written", numRecords, dlsns.size());
for (int i = 0; i < numRecords; i++) {
DLSN dlsn = dlsns.get(i);
assertEquals("Incorrent ledger sequence number", 0L, dlsn.getLogSegmentSequenceNo());
assertEquals("Incorrent entry id", 0L, dlsn.getEntryId());
assertEquals("Inconsistent slot id", i, dlsn.getSlotId());
}
for (int i = 0; i < numRecords; i++) {
try {
Await.result(anotherFutureList.get(i));
fail("Should be aborted record " + (numRecords + i) + " with transmit exception");
} catch (WriteCancelledException wce) {
// writes should be cancelled.
}
}
assertEquals("Last tx id should still be " + (2 * numRecords - 1), 2 * numRecords - 1, writer.getLastTxId());
assertEquals("Last acked tx id should be still " + (numRecords - 1), (long) (numRecords - 1), writer.getLastTxIdAcknowledged());
assertEquals("Last DLSN should become " + futureList.get(futureList.size() - 1), dlsns.get(futureList.size() - 1), writer.getLastDLSN());
assertEquals("Position should become " + 2 * numRecords, 2 * numRecords, writer.getPositionWithinLogSegment());
// check only 1 entry were written
LedgerHandle lh = getLedgerHandle(writer);
LedgerHandle readLh = openLedgerNoRecovery(lh);
assertTrue("Ledger " + lh.getId() + " should not be closed", readLh.isClosed());
assertEquals("Only one entry is written for ledger " + lh.getId(), 0L, lh.getLastAddPushed());
assertEquals("Only one entry is written for ledger " + lh.getId(), 0L, readLh.getLastAddConfirmed());
}
use of com.twitter.distributedlog.lock.ZKDistributedLock in project distributedlog by twitter.
the class BKLogReadHandler method lockStream.
/**
* Elective stream lock--readers are not required to acquire the lock before using the stream.
*/
synchronized Future<Void> lockStream() {
if (null == lockAcquireFuture) {
final Function0<DistributedLock> lockFunction = new ExceptionalFunction0<DistributedLock>() {
@Override
public DistributedLock applyE() throws IOException {
// Unfortunately this has a blocking call which we should not execute on the
// ZK completion thread
BKLogReadHandler.this.readLock = new ZKDistributedLock(lockStateExecutor, lockFactory, readLockPath, conf.getLockTimeoutMilliSeconds(), statsLogger.scope("read_lock"));
LOG.info("acquiring readlock {} at {}", getLockClientId(), readLockPath);
return BKLogReadHandler.this.readLock;
}
};
lockAcquireFuture = ensureReadLockPathExist().flatMap(new ExceptionalFunction<Void, Future<Void>>() {
@Override
public Future<Void> applyE(Void in) throws Throwable {
return scheduler.apply(lockFunction).flatMap(new ExceptionalFunction<DistributedLock, Future<Void>>() {
@Override
public Future<Void> applyE(DistributedLock lock) throws IOException {
return acquireLockOnExecutorThread(lock);
}
});
}
});
}
return lockAcquireFuture;
}
use of com.twitter.distributedlog.lock.ZKDistributedLock in project distributedlog by twitter.
the class TestBKLogSegmentWriter method testCloseShouldFlush.
/**
* Close a segment log writer should flush buffered data.
*
* @throws Exception
*/
@Test(timeout = 60000)
public void testCloseShouldFlush() throws Exception {
DistributedLogConfiguration confLocal = newLocalConf();
confLocal.setImmediateFlushEnabled(false);
confLocal.setOutputBufferSize(Integer.MAX_VALUE);
confLocal.setPeriodicFlushFrequencyMilliSeconds(0);
ZKDistributedLock lock = createLock("/test/lock-" + runtime.getMethodName(), zkc, true);
BKLogSegmentWriter writer = createLogSegmentWriter(confLocal, 0L, -1L, lock);
// Use another lock to wait for writer releasing lock
ZKDistributedLock lock0 = createLock("/test/lock-" + runtime.getMethodName(), zkc0, false);
Future<ZKDistributedLock> lockFuture0 = lock0.asyncAcquire();
// add 10 records
int numRecords = 10;
List<Future<DLSN>> futureList = new ArrayList<Future<DLSN>>(numRecords);
for (int i = 0; i < numRecords; i++) {
futureList.add(writer.asyncWrite(DLMTestUtil.getLogRecordInstance(i)));
}
assertEquals("Last tx id should be " + (numRecords - 1), numRecords - 1, writer.getLastTxId());
assertEquals("Last acked tx id should be -1", -1L, writer.getLastTxIdAcknowledged());
assertEquals("Last DLSN should be " + DLSN.InvalidDLSN, DLSN.InvalidDLSN, writer.getLastDLSN());
assertEquals("Position should be " + numRecords, 10, writer.getPositionWithinLogSegment());
// close the writer should flush buffered data and release lock
closeWriterAndLock(writer, lock);
Await.result(lockFuture0);
lock0.checkOwnership();
assertEquals("Last tx id should still be " + (numRecords - 1), numRecords - 1, writer.getLastTxId());
assertEquals("Last acked tx id should become " + (numRecords - 1), numRecords - 1, writer.getLastTxIdAcknowledged());
assertEquals("Position should still be " + numRecords, 10, writer.getPositionWithinLogSegment());
List<DLSN> dlsns = Await.result(Future.collect(futureList));
assertEquals("All records should be written", numRecords, dlsns.size());
for (int i = 0; i < numRecords; i++) {
DLSN dlsn = dlsns.get(i);
assertEquals("Incorrent ledger sequence number", 0L, dlsn.getLogSegmentSequenceNo());
assertEquals("Incorrent entry id", 0L, dlsn.getEntryId());
assertEquals("Inconsistent slot id", i, dlsn.getSlotId());
}
assertEquals("Last DLSN should be " + dlsns.get(dlsns.size() - 1), dlsns.get(dlsns.size() - 1), writer.getLastDLSN());
LedgerHandle lh = getLedgerHandle(writer);
LedgerHandle readLh = openLedgerNoRecovery(lh);
assertTrue("Ledger " + lh.getId() + " should be closed", readLh.isClosed());
assertEquals("There should be two entries in ledger " + lh.getId(), 1L, readLh.getLastAddConfirmed());
}
Aggregations