Search in sources :

Example 1 with ZKDistributedLock

use of org.apache.distributedlog.lock.ZKDistributedLock in project bookkeeper by apache.

the class TestBKLogSegmentWriter method testNondurableWrite.

/**
 * Non durable write should fail if writer is marked as end of stream.
 *
 * @throws Exception
 */
@Test(timeout = 60000)
public void testNondurableWrite() throws Exception {
    DistributedLogConfiguration confLocal = newLocalConf();
    confLocal.setImmediateFlushEnabled(false);
    confLocal.setOutputBufferSize(Integer.MAX_VALUE);
    confLocal.setPeriodicFlushFrequencyMilliSeconds(0);
    confLocal.setDurableWriteEnabled(false);
    ZKDistributedLock lock = createLock("/test/lock-" + runtime.getMethodName(), zkc, true);
    BKLogSegmentWriter writer = createLogSegmentWriter(confLocal, 0L, -1L, lock);
    assertEquals(DLSN.InvalidDLSN, Utils.ioResult(writer.asyncWrite(DLMTestUtil.getLogRecordInstance(2))));
    assertEquals(-1L, ((BKLogSegmentEntryWriter) writer.getEntryWriter()).getLedgerHandle().getLastAddPushed());
    closeWriterAndLock(writer, lock);
}
Also used : BKLogSegmentEntryWriter(org.apache.distributedlog.impl.logsegment.BKLogSegmentEntryWriter) ZKDistributedLock(org.apache.distributedlog.lock.ZKDistributedLock) Test(org.junit.Test)

Example 2 with ZKDistributedLock

use of org.apache.distributedlog.lock.ZKDistributedLock in project bookkeeper by apache.

the class TestBKLogSegmentWriter method testNondurableWriteAfterLedgerIsFenced.

/**
 * Non durable write should fail if the log segment is fenced.
 *
 * @throws Exception
 */
@Test(timeout = 60000)
public void testNondurableWriteAfterLedgerIsFenced() throws Exception {
    DistributedLogConfiguration confLocal = newLocalConf();
    confLocal.setImmediateFlushEnabled(false);
    confLocal.setOutputBufferSize(Integer.MAX_VALUE);
    confLocal.setPeriodicFlushFrequencyMilliSeconds(0);
    confLocal.setDurableWriteEnabled(false);
    ZKDistributedLock lock = createLock("/test/lock-" + runtime.getMethodName(), zkc, true);
    BKLogSegmentWriter writer = createLogSegmentWriter(confLocal, 0L, -1L, lock);
    // fence the ledger
    fenceLedger(getLedgerHandle(writer));
    LogRecord record = DLMTestUtil.getLogRecordInstance(1);
    record.setControl();
    try {
        Utils.ioResult(writer.asyncWrite(record));
        fail("Should fail the writer if the log segment is already fenced");
    } catch (BKTransmitException bkte) {
        // expected
        assertEquals(BKException.Code.LedgerFencedException, bkte.getBKResultCode());
    }
    try {
        Utils.ioResult(writer.asyncWrite(DLMTestUtil.getLogRecordInstance(2)));
        fail("Should fail the writer if the log segment is already fenced");
    } catch (WriteException we) {
    // expected
    }
    abortWriterAndLock(writer, lock);
}
Also used : WriteException(org.apache.distributedlog.exceptions.WriteException) BKTransmitException(org.apache.distributedlog.exceptions.BKTransmitException) ZKDistributedLock(org.apache.distributedlog.lock.ZKDistributedLock) Test(org.junit.Test)

Example 3 with ZKDistributedLock

use of org.apache.distributedlog.lock.ZKDistributedLock in project bookkeeper by apache.

the class TestBKLogSegmentWriter method testUpdateLastTxIdForUserRecords.

/**
 * Log Segment Writer should only update last tx id only for user records.
 */
@Test(timeout = 60000)
public void testUpdateLastTxIdForUserRecords() throws Exception {
    DistributedLogConfiguration confLocal = newLocalConf();
    confLocal.setImmediateFlushEnabled(false);
    confLocal.setOutputBufferSize(Integer.MAX_VALUE);
    confLocal.setPeriodicFlushFrequencyMilliSeconds(0);
    ZKDistributedLock lock = createLock("/test/lock-" + runtime.getMethodName(), zkc, true);
    BKLogSegmentWriter writer = createLogSegmentWriter(confLocal, 0L, -1L, lock);
    // add 10 records
    int numRecords = 10;
    List<CompletableFuture<DLSN>> futureList = new ArrayList<CompletableFuture<DLSN>>(numRecords);
    for (int i = 0; i < numRecords; i++) {
        futureList.add(writer.asyncWrite(DLMTestUtil.getLogRecordInstance(i)));
    }
    LogRecord controlRecord = DLMTestUtil.getLogRecordInstance(9999L);
    controlRecord.setControl();
    futureList.add(writer.asyncWrite(controlRecord));
    assertEquals("Last tx id should be " + (numRecords - 1), numRecords - 1, writer.getLastTxId());
    assertEquals("Last DLSN should be " + DLSN.InvalidDLSN, DLSN.InvalidDLSN, writer.getLastDLSN());
    assertEquals("Position should be " + numRecords, numRecords, writer.getPositionWithinLogSegment());
    // close the writer to flush the output buffer
    closeWriterAndLock(writer, lock);
    List<DLSN> dlsns = Utils.ioResult(FutureUtils.collect(futureList));
    assertEquals("All 11 records should be written", numRecords + 1, dlsns.size());
    for (int i = 0; i < numRecords; i++) {
        DLSN dlsn = dlsns.get(i);
        assertEquals("Incorrent ledger sequence number", 0L, dlsn.getLogSegmentSequenceNo());
        assertEquals("Incorrent entry id", 0L, dlsn.getEntryId());
        assertEquals("Inconsistent slot id", i, dlsn.getSlotId());
    }
    DLSN dlsn = dlsns.get(numRecords);
    assertEquals("Incorrent ledger sequence number", 0L, dlsn.getLogSegmentSequenceNo());
    assertEquals("Incorrent entry id", 1L, dlsn.getEntryId());
    assertEquals("Inconsistent slot id", 0L, dlsn.getSlotId());
    assertEquals("Last tx id should be " + (numRecords - 1), numRecords - 1, writer.getLastTxId());
    assertEquals("Last acked tx id should be " + (numRecords - 1), numRecords - 1, writer.getLastTxIdAcknowledged());
    assertEquals("Position should be " + numRecords, numRecords, writer.getPositionWithinLogSegment());
    assertEquals("Last DLSN should be " + dlsn, dlsns.get(numRecords - 1), writer.getLastDLSN());
}
Also used : CompletableFuture(java.util.concurrent.CompletableFuture) ArrayList(java.util.ArrayList) ZKDistributedLock(org.apache.distributedlog.lock.ZKDistributedLock) Test(org.junit.Test)

Example 4 with ZKDistributedLock

use of org.apache.distributedlog.lock.ZKDistributedLock in project bookkeeper by apache.

the class TestBKLogSegmentWriter method testAbortShouldFailAllWrites.

/**
 * Abort should wait for outstanding transmits to be completed and cancel buffered data.
 *
 * @throws Exception
 */
@Test(timeout = 60000)
public void testAbortShouldFailAllWrites() throws Exception {
    DistributedLogConfiguration confLocal = newLocalConf();
    confLocal.setImmediateFlushEnabled(false);
    confLocal.setOutputBufferSize(Integer.MAX_VALUE);
    confLocal.setPeriodicFlushFrequencyMilliSeconds(0);
    ZKDistributedLock lock = createLock("/test/lock-" + runtime.getMethodName(), zkc, true);
    BKLogSegmentWriter writer = createLogSegmentWriter(confLocal, 0L, -1L, lock);
    // Use another lock to wait for writer releasing lock
    ZKDistributedLock lock0 = createLock("/test/lock-" + runtime.getMethodName(), zkc0, false);
    CompletableFuture<ZKDistributedLock> lockFuture0 = lock0.asyncAcquire();
    // add 10 records
    int numRecords = 10;
    List<CompletableFuture<DLSN>> futureList = new ArrayList<CompletableFuture<DLSN>>(numRecords);
    for (int i = 0; i < numRecords; i++) {
        futureList.add(writer.asyncWrite(DLMTestUtil.getLogRecordInstance(i)));
    }
    assertEquals("Last tx id should be " + (numRecords - 1), numRecords - 1, writer.getLastTxId());
    assertEquals("Last acked tx id should be -1", -1L, writer.getLastTxIdAcknowledged());
    assertEquals("Last DLSN should be " + DLSN.InvalidDLSN, DLSN.InvalidDLSN, writer.getLastDLSN());
    assertEquals("Position should be " + numRecords, numRecords, writer.getPositionWithinLogSegment());
    final CountDownLatch deferLatch = new CountDownLatch(1);
    writer.getFuturePool().submit(() -> {
        try {
            deferLatch.await();
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            LOG.warn("Interrupted on deferring completion : ", e);
        }
    });
    // transmit the buffered data
    Utils.ioResult(writer.flush());
    // add another 10 records
    List<CompletableFuture<DLSN>> anotherFutureList = new ArrayList<CompletableFuture<DLSN>>(numRecords);
    for (int i = numRecords; i < 2 * numRecords; i++) {
        anotherFutureList.add(writer.asyncWrite(DLMTestUtil.getLogRecordInstance(i)));
    }
    assertEquals("Last tx id should become " + (2 * numRecords - 1), 2 * numRecords - 1, writer.getLastTxId());
    assertEquals("Last acked tx id should become " + (numRecords - 1), (long) (numRecords - 1), writer.getLastTxIdAcknowledged());
    assertEquals("Last DLSN should still be " + DLSN.InvalidDLSN, DLSN.InvalidDLSN, writer.getLastDLSN());
    assertEquals("Position should become " + (2 * numRecords), 2 * numRecords, writer.getPositionWithinLogSegment());
    // abort the writer: it waits for outstanding transmits and abort buffered data
    abortWriterAndLock(writer, lock);
    Utils.ioResult(lockFuture0);
    lock0.checkOwnership();
    // release defer latch so completion would go through
    deferLatch.countDown();
    List<DLSN> dlsns = Utils.ioResult(FutureUtils.collect(futureList));
    assertEquals("All first 10 records should be written", numRecords, dlsns.size());
    for (int i = 0; i < numRecords; i++) {
        DLSN dlsn = dlsns.get(i);
        assertEquals("Incorrent ledger sequence number", 0L, dlsn.getLogSegmentSequenceNo());
        assertEquals("Incorrent entry id", 0L, dlsn.getEntryId());
        assertEquals("Inconsistent slot id", i, dlsn.getSlotId());
    }
    for (int i = 0; i < numRecords; i++) {
        try {
            Utils.ioResult(anotherFutureList.get(i));
            fail("Should be aborted record " + (numRecords + i) + " with transmit exception");
        } catch (WriteCancelledException wce) {
        // writes should be cancelled.
        }
    }
    assertEquals("Last tx id should still be " + (2 * numRecords - 1), 2 * numRecords - 1, writer.getLastTxId());
    assertEquals("Last acked tx id should be still " + (numRecords - 1), (long) (numRecords - 1), writer.getLastTxIdAcknowledged());
    assertEquals("Last DLSN should become " + futureList.get(futureList.size() - 1), dlsns.get(futureList.size() - 1), writer.getLastDLSN());
    assertEquals("Position should become " + 2 * numRecords, 2 * numRecords, writer.getPositionWithinLogSegment());
    // check only 1 entry were written
    LedgerHandle lh = getLedgerHandle(writer);
    LedgerHandle readLh = openLedgerNoRecovery(lh);
    assertTrue("Ledger " + lh.getId() + " should not be closed", readLh.isClosed());
    assertEquals("Only one entry is written for ledger " + lh.getId(), 0L, lh.getLastAddPushed());
    assertEquals("Only one entry is written for ledger " + lh.getId(), 0L, readLh.getLastAddConfirmed());
}
Also used : LedgerHandle(org.apache.bookkeeper.client.LedgerHandle) ArrayList(java.util.ArrayList) CountDownLatch(java.util.concurrent.CountDownLatch) ZKDistributedLock(org.apache.distributedlog.lock.ZKDistributedLock) CompletableFuture(java.util.concurrent.CompletableFuture) WriteCancelledException(org.apache.distributedlog.exceptions.WriteCancelledException) Test(org.junit.Test)

Example 5 with ZKDistributedLock

use of org.apache.distributedlog.lock.ZKDistributedLock in project bookkeeper by apache.

the class TestBKLogSegmentWriter method testAbortShouldNotFlush.

/**
 * Abort a segment log writer should just abort pending writes and not flush buffered data.
 *
 * @throws Exception
 */
@Test(timeout = 60000)
public void testAbortShouldNotFlush() throws Exception {
    DistributedLogConfiguration confLocal = newLocalConf();
    confLocal.setImmediateFlushEnabled(false);
    confLocal.setOutputBufferSize(Integer.MAX_VALUE);
    confLocal.setPeriodicFlushFrequencyMilliSeconds(0);
    ZKDistributedLock lock = createLock("/test/lock-" + runtime.getMethodName(), zkc, true);
    BKLogSegmentWriter writer = createLogSegmentWriter(confLocal, 0L, -1L, lock);
    // Use another lock to wait for writer releasing lock
    ZKDistributedLock lock0 = createLock("/test/lock-" + runtime.getMethodName(), zkc0, false);
    CompletableFuture<ZKDistributedLock> lockFuture0 = lock0.asyncAcquire();
    // add 10 records
    int numRecords = 10;
    List<CompletableFuture<DLSN>> futureList = new ArrayList<CompletableFuture<DLSN>>(numRecords);
    for (int i = 0; i < numRecords; i++) {
        futureList.add(writer.asyncWrite(DLMTestUtil.getLogRecordInstance(i)));
    }
    assertEquals("Last tx id should be " + (numRecords - 1), numRecords - 1, writer.getLastTxId());
    assertEquals("Last acked tx id should be -1", -1L, writer.getLastTxIdAcknowledged());
    assertEquals("Last DLSN should be " + DLSN.InvalidDLSN, DLSN.InvalidDLSN, writer.getLastDLSN());
    assertEquals("Position should be " + numRecords, 10, writer.getPositionWithinLogSegment());
    // close the writer should flush buffered data and release lock
    abortWriterAndLock(writer, lock);
    Utils.ioResult(lockFuture0);
    lock0.checkOwnership();
    assertEquals("Last tx id should still be " + (numRecords - 1), numRecords - 1, writer.getLastTxId());
    assertEquals("Last acked tx id should still be " + (numRecords - 1), -1L, writer.getLastTxIdAcknowledged());
    assertEquals("Last DLSN should still be " + DLSN.InvalidDLSN, DLSN.InvalidDLSN, writer.getLastDLSN());
    assertEquals("Position should still be " + numRecords, 10, writer.getPositionWithinLogSegment());
    for (int i = 0; i < numRecords; i++) {
        try {
            Utils.ioResult(futureList.get(i));
            fail("Should be aborted record " + i + " with transmit exception");
        } catch (WriteCancelledException wce) {
            assertTrue("Record " + i + " should be aborted because of ledger fenced", wce.getCause() instanceof BKTransmitException);
            BKTransmitException bkte = (BKTransmitException) wce.getCause();
            assertEquals("Record " + i + " should be aborted", BKException.Code.InterruptedException, bkte.getBKResultCode());
        }
    }
    // check no entries were written
    LedgerHandle lh = getLedgerHandle(writer);
    LedgerHandle readLh = openLedgerNoRecovery(lh);
    assertTrue("Ledger " + lh.getId() + " should not be closed", readLh.isClosed());
    assertEquals("There should be no entries in ledger " + lh.getId(), LedgerHandle.INVALID_ENTRY_ID, readLh.getLastAddConfirmed());
}
Also used : CompletableFuture(java.util.concurrent.CompletableFuture) LedgerHandle(org.apache.bookkeeper.client.LedgerHandle) WriteCancelledException(org.apache.distributedlog.exceptions.WriteCancelledException) BKTransmitException(org.apache.distributedlog.exceptions.BKTransmitException) ArrayList(java.util.ArrayList) ZKDistributedLock(org.apache.distributedlog.lock.ZKDistributedLock) Test(org.junit.Test)

Aggregations

ZKDistributedLock (org.apache.distributedlog.lock.ZKDistributedLock)11 Test (org.junit.Test)9 ArrayList (java.util.ArrayList)6 CompletableFuture (java.util.concurrent.CompletableFuture)6 LedgerHandle (org.apache.bookkeeper.client.LedgerHandle)5 BKTransmitException (org.apache.distributedlog.exceptions.BKTransmitException)4 WriteCancelledException (org.apache.distributedlog.exceptions.WriteCancelledException)3 WriteException (org.apache.distributedlog.exceptions.WriteException)2 CountDownLatch (java.util.concurrent.CountDownLatch)1 EndOfStreamException (org.apache.distributedlog.exceptions.EndOfStreamException)1 ZKException (org.apache.distributedlog.exceptions.ZKException)1 BKLogSegmentEntryWriter (org.apache.distributedlog.impl.logsegment.BKLogSegmentEntryWriter)1 SessionLockFactory (org.apache.distributedlog.lock.SessionLockFactory)1 ZKSessionLockFactory (org.apache.distributedlog.lock.ZKSessionLockFactory)1