use of org.apache.bookkeeper.client.LedgerHandle in project distributedlog by twitter.
the class TestBKLogSegmentWriter method testAbortShouldFailAllWrites.
/**
* Abort should wait for outstanding transmits to be completed and cancel buffered data.
*
* @throws Exception
*/
@Test(timeout = 60000)
public void testAbortShouldFailAllWrites() throws Exception {
DistributedLogConfiguration confLocal = newLocalConf();
confLocal.setImmediateFlushEnabled(false);
confLocal.setOutputBufferSize(Integer.MAX_VALUE);
confLocal.setPeriodicFlushFrequencyMilliSeconds(0);
ZKDistributedLock lock = createLock("/test/lock-" + runtime.getMethodName(), zkc, true);
BKLogSegmentWriter writer = createLogSegmentWriter(confLocal, 0L, -1L, lock);
// Use another lock to wait for writer releasing lock
ZKDistributedLock lock0 = createLock("/test/lock-" + runtime.getMethodName(), zkc0, false);
Future<ZKDistributedLock> lockFuture0 = lock0.asyncAcquire();
// add 10 records
int numRecords = 10;
List<Future<DLSN>> futureList = new ArrayList<Future<DLSN>>(numRecords);
for (int i = 0; i < numRecords; i++) {
futureList.add(writer.asyncWrite(DLMTestUtil.getLogRecordInstance(i)));
}
assertEquals("Last tx id should be " + (numRecords - 1), numRecords - 1, writer.getLastTxId());
assertEquals("Last acked tx id should be -1", -1L, writer.getLastTxIdAcknowledged());
assertEquals("Last DLSN should be " + DLSN.InvalidDLSN, DLSN.InvalidDLSN, writer.getLastDLSN());
assertEquals("Position should be " + numRecords, numRecords, writer.getPositionWithinLogSegment());
final CountDownLatch deferLatch = new CountDownLatch(1);
writer.getFuturePool().apply(new AbstractFunction0<Object>() {
@Override
public Object apply() {
try {
deferLatch.await();
} catch (InterruptedException e) {
LOG.warn("Interrupted on deferring completion : ", e);
}
return null;
}
});
// transmit the buffered data
FutureUtils.result(writer.flush());
// add another 10 records
List<Future<DLSN>> anotherFutureList = new ArrayList<Future<DLSN>>(numRecords);
for (int i = numRecords; i < 2 * numRecords; i++) {
anotherFutureList.add(writer.asyncWrite(DLMTestUtil.getLogRecordInstance(i)));
}
assertEquals("Last tx id should become " + (2 * numRecords - 1), 2 * numRecords - 1, writer.getLastTxId());
assertEquals("Last acked tx id should become " + (numRecords - 1), (long) (numRecords - 1), writer.getLastTxIdAcknowledged());
assertEquals("Last DLSN should still be " + DLSN.InvalidDLSN, DLSN.InvalidDLSN, writer.getLastDLSN());
assertEquals("Position should become " + (2 * numRecords), 2 * numRecords, writer.getPositionWithinLogSegment());
// abort the writer: it waits for outstanding transmits and abort buffered data
abortWriterAndLock(writer, lock);
Await.result(lockFuture0);
lock0.checkOwnership();
// release defer latch so completion would go through
deferLatch.countDown();
List<DLSN> dlsns = Await.result(Future.collect(futureList));
assertEquals("All first 10 records should be written", numRecords, dlsns.size());
for (int i = 0; i < numRecords; i++) {
DLSN dlsn = dlsns.get(i);
assertEquals("Incorrent ledger sequence number", 0L, dlsn.getLogSegmentSequenceNo());
assertEquals("Incorrent entry id", 0L, dlsn.getEntryId());
assertEquals("Inconsistent slot id", i, dlsn.getSlotId());
}
for (int i = 0; i < numRecords; i++) {
try {
Await.result(anotherFutureList.get(i));
fail("Should be aborted record " + (numRecords + i) + " with transmit exception");
} catch (WriteCancelledException wce) {
// writes should be cancelled.
}
}
assertEquals("Last tx id should still be " + (2 * numRecords - 1), 2 * numRecords - 1, writer.getLastTxId());
assertEquals("Last acked tx id should be still " + (numRecords - 1), (long) (numRecords - 1), writer.getLastTxIdAcknowledged());
assertEquals("Last DLSN should become " + futureList.get(futureList.size() - 1), dlsns.get(futureList.size() - 1), writer.getLastDLSN());
assertEquals("Position should become " + 2 * numRecords, 2 * numRecords, writer.getPositionWithinLogSegment());
// check only 1 entry were written
LedgerHandle lh = getLedgerHandle(writer);
LedgerHandle readLh = openLedgerNoRecovery(lh);
assertTrue("Ledger " + lh.getId() + " should not be closed", readLh.isClosed());
assertEquals("Only one entry is written for ledger " + lh.getId(), 0L, lh.getLastAddPushed());
assertEquals("Only one entry is written for ledger " + lh.getId(), 0L, readLh.getLastAddConfirmed());
}
use of org.apache.bookkeeper.client.LedgerHandle in project distributedlog by twitter.
the class TestLedgerAllocator method testObtainMultipleLedgers.
@Test(timeout = 60000)
public void testObtainMultipleLedgers() throws Exception {
String allocationPath = "/" + runtime.getMethodName();
SimpleLedgerAllocator allocator = createAllocator(allocationPath);
int numLedgers = 10;
Set<LedgerHandle> allocatedLedgers = new HashSet<LedgerHandle>();
for (int i = 0; i < numLedgers; i++) {
allocator.allocate();
ZKTransaction txn = newTxn();
LedgerHandle lh = FutureUtils.result(allocator.tryObtain(txn, NULL_LISTENER));
FutureUtils.result(txn.execute());
allocatedLedgers.add(lh);
}
assertEquals(numLedgers, allocatedLedgers.size());
}
use of org.apache.bookkeeper.client.LedgerHandle in project distributedlog by twitter.
the class TestLedgerAllocator method testSuccessAllocatorShouldDeleteUnusedledger.
@Test(timeout = 60000)
public void testSuccessAllocatorShouldDeleteUnusedledger() throws Exception {
String allocationPath = "/allocation-delete-unused-ledger";
zkc.get().create(allocationPath, new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
Stat stat = new Stat();
byte[] data = zkc.get().getData(allocationPath, false, stat);
Versioned<byte[]> allocationData = new Versioned<byte[]>(data, new ZkVersion(stat.getVersion()));
SimpleLedgerAllocator allocator1 = new SimpleLedgerAllocator(allocationPath, allocationData, newQuorumConfigProvider(dlConf), zkc, bkc);
allocator1.allocate();
// wait until allocated
ZKTransaction txn1 = newTxn();
LedgerHandle lh1 = FutureUtils.result(allocator1.tryObtain(txn1, NULL_LISTENER));
// Second allocator kicks in
stat = new Stat();
data = zkc.get().getData(allocationPath, false, stat);
allocationData = new Versioned<byte[]>(data, new ZkVersion(stat.getVersion()));
SimpleLedgerAllocator allocator2 = new SimpleLedgerAllocator(allocationPath, allocationData, newQuorumConfigProvider(dlConf), zkc, bkc);
allocator2.allocate();
// wait until allocated
ZKTransaction txn2 = newTxn();
LedgerHandle lh2 = FutureUtils.result(allocator2.tryObtain(txn2, NULL_LISTENER));
// should fail to commit txn1 as version is changed by second allocator
try {
FutureUtils.result(txn1.execute());
fail("Should fail commit obtaining ledger handle from first allocator as allocator is modified by second allocator.");
} catch (ZKException ke) {
// as expected
}
FutureUtils.result(txn2.execute());
Utils.close(allocator1);
Utils.close(allocator2);
// ledger handle should be deleted
try {
lh1.close();
fail("LedgerHandle allocated by allocator1 should be deleted.");
} catch (BKException bke) {
// as expected
}
try {
bkc.get().openLedger(lh1.getId(), BookKeeper.DigestType.CRC32, dlConf.getBKDigestPW().getBytes());
fail("LedgerHandle allocated by allocator1 should be deleted.");
} catch (BKException.BKNoSuchLedgerExistsException nslee) {
// as expected
}
long eid = lh2.addEntry("hello world".getBytes());
lh2.close();
LedgerHandle readLh = bkc.get().openLedger(lh2.getId(), BookKeeper.DigestType.CRC32, dlConf.getBKDigestPW().getBytes());
Enumeration<LedgerEntry> entries = readLh.readEntries(eid, eid);
int i = 0;
while (entries.hasMoreElements()) {
LedgerEntry entry = entries.nextElement();
assertEquals("hello world", new String(entry.getEntry(), UTF_8));
++i;
}
assertEquals(1, i);
}
use of org.apache.bookkeeper.client.LedgerHandle in project distributedlog by twitter.
the class TestLedgerAllocatorPool method testConcurrentAllocation.
@Test
public void testConcurrentAllocation() throws Exception {
final int numAllocators = 5;
String allocationPath = "/concurrentAllocation";
final LedgerAllocatorPool pool = new LedgerAllocatorPool(allocationPath, numAllocators, dlConf, zkc, bkc, allocationExecutor);
final ConcurrentMap<Long, LedgerHandle> allocatedLedgers = new ConcurrentHashMap<Long, LedgerHandle>();
final AtomicInteger numFailures = new AtomicInteger(0);
Thread[] allocationThreads = new Thread[numAllocators];
for (int i = 0; i < numAllocators; i++) {
final int tid = i;
allocationThreads[i] = new Thread() {
int numLedgers = 50;
@Override
public void run() {
try {
for (int i = 0; i < numLedgers; i++) {
pool.allocate();
ZKTransaction txn = newTxn();
LedgerHandle lh = FutureUtils.result(pool.tryObtain(txn, NULL_LISTENER));
FutureUtils.result(txn.execute());
lh.close();
allocatedLedgers.putIfAbsent(lh.getId(), lh);
logger.info("[thread {}] allocate {}th ledger {}", new Object[] { tid, i, lh.getId() });
}
} catch (Exception ioe) {
numFailures.incrementAndGet();
}
}
};
}
for (Thread t : allocationThreads) {
t.start();
}
for (Thread t : allocationThreads) {
t.join();
}
assertEquals(0, numFailures.get());
assertEquals(50 * numAllocators, allocatedLedgers.size());
Utils.close(pool);
}
use of org.apache.bookkeeper.client.LedgerHandle in project distributedlog by twitter.
the class TestLedgerAllocatorPool method testAllocateMultipleLedgers.
@Test(timeout = 60000)
public void testAllocateMultipleLedgers() throws Exception {
String allocationPath = "/" + runtime.getMethodName();
int numAllocators = 5;
final LedgerAllocatorPool pool = new LedgerAllocatorPool(allocationPath, numAllocators, dlConf, zkc, bkc, allocationExecutor);
int numLedgers = 20;
Set<LedgerHandle> allocatedLedgers = new HashSet<LedgerHandle>();
for (int i = 0; i < numLedgers; i++) {
pool.allocate();
ZKTransaction txn = newTxn();
LedgerHandle lh = FutureUtils.result(pool.tryObtain(txn, NULL_LISTENER));
FutureUtils.result(txn.execute());
allocatedLedgers.add(lh);
}
assertEquals(numLedgers, allocatedLedgers.size());
}
Aggregations