use of org.apache.distributedlog.exceptions.WriteCancelledException in project bookkeeper by apache.
the class BKLogSegmentWriter method addCompleteDeferredProcessing.
private void addCompleteDeferredProcessing(final BKTransmitPacket transmitPacket, final long entryId, final int rc) {
boolean cancelPendingPromises = false;
EntryBuffer recordSet = transmitPacket.getRecordSet();
synchronized (this) {
if (transmitResultUpdater.compareAndSet(this, BKException.Code.OK, rc)) {
// If this is the first time we are setting an error code in the transmitResult then
// we must cancel pending promises; once this error has been set, more records will not
// be enqueued; they will be failed with WriteException
cancelPendingPromises = (BKException.Code.OK != rc);
} else {
LOG.warn("Log segment {} entryId {}: Tried to set transmit result to ({}) but is already ({})", new Object[] { fullyQualifiedLogSegment, entryId, rc, transmitResultUpdater.get(this) });
}
if (transmitResultUpdater.get(this) != BKException.Code.OK) {
if (recordSet.hasUserRecords()) {
transmitDataPacketSize.registerFailedEvent(recordSet.getNumBytes(), TimeUnit.MICROSECONDS);
}
} else {
// visible by advancing the lastAck
if (recordSet.hasUserRecords()) {
transmitDataPacketSize.registerSuccessfulEvent(recordSet.getNumBytes(), TimeUnit.MICROSECONDS);
controlFlushNeeded = true;
if (immediateFlushEnabled) {
if (0 == minDelayBetweenImmediateFlushMs) {
backgroundFlush(true);
} else {
scheduleFlushWithDelayIfNeeded(new Callable<Void>() {
@Override
public Void call() throws Exception {
backgroundFlush(true);
return null;
}
}, immFlushSchedFutureRefUpdater);
}
}
}
}
// update last dlsn before satisifying future
if (BKException.Code.OK == transmitResultUpdater.get(this)) {
DLSN lastDLSNInPacket = recordSet.finalizeTransmit(logSegmentSequenceNumber, entryId);
if (recordSet.hasUserRecords()) {
if (null != lastDLSNInPacket && lastDLSN.compareTo(lastDLSNInPacket) < 0) {
lastDLSN = lastDLSNInPacket;
}
}
}
}
if (BKException.Code.OK == transmitResultUpdater.get(this)) {
recordSet.completeTransmit(logSegmentSequenceNumber, entryId);
} else {
recordSet.abortTransmit(Utils.transmitException(transmitResultUpdater.get(this)));
}
if (cancelPendingPromises) {
// Since the writer is in a bad state no more packets will be tramsitted, and its safe to
// assign a new empty packet. This is to avoid a race with closeInternal which may also
// try to cancel the current packet;
final BKTransmitPacket packetCurrentSaved;
synchronized (this) {
packetCurrentSaved = new BKTransmitPacket(recordSetWriter);
recordSetWriter = newRecordSetWriter();
}
packetCurrentSaved.getRecordSet().abortTransmit(new WriteCancelledException(streamName, Utils.transmitException(transmitResultUpdater.get(this))));
}
}
use of org.apache.distributedlog.exceptions.WriteCancelledException in project bookkeeper by apache.
the class TestBKLogSegmentWriter method testAbortShouldFailAllWrites.
/**
* Abort should wait for outstanding transmits to be completed and cancel buffered data.
*
* @throws Exception
*/
@Test(timeout = 60000)
public void testAbortShouldFailAllWrites() throws Exception {
DistributedLogConfiguration confLocal = newLocalConf();
confLocal.setImmediateFlushEnabled(false);
confLocal.setOutputBufferSize(Integer.MAX_VALUE);
confLocal.setPeriodicFlushFrequencyMilliSeconds(0);
ZKDistributedLock lock = createLock("/test/lock-" + runtime.getMethodName(), zkc, true);
BKLogSegmentWriter writer = createLogSegmentWriter(confLocal, 0L, -1L, lock);
// Use another lock to wait for writer releasing lock
ZKDistributedLock lock0 = createLock("/test/lock-" + runtime.getMethodName(), zkc0, false);
CompletableFuture<ZKDistributedLock> lockFuture0 = lock0.asyncAcquire();
// add 10 records
int numRecords = 10;
List<CompletableFuture<DLSN>> futureList = new ArrayList<CompletableFuture<DLSN>>(numRecords);
for (int i = 0; i < numRecords; i++) {
futureList.add(writer.asyncWrite(DLMTestUtil.getLogRecordInstance(i)));
}
assertEquals("Last tx id should be " + (numRecords - 1), numRecords - 1, writer.getLastTxId());
assertEquals("Last acked tx id should be -1", -1L, writer.getLastTxIdAcknowledged());
assertEquals("Last DLSN should be " + DLSN.InvalidDLSN, DLSN.InvalidDLSN, writer.getLastDLSN());
assertEquals("Position should be " + numRecords, numRecords, writer.getPositionWithinLogSegment());
final CountDownLatch deferLatch = new CountDownLatch(1);
writer.getFuturePool().submit(() -> {
try {
deferLatch.await();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOG.warn("Interrupted on deferring completion : ", e);
}
});
// transmit the buffered data
Utils.ioResult(writer.flush());
// add another 10 records
List<CompletableFuture<DLSN>> anotherFutureList = new ArrayList<CompletableFuture<DLSN>>(numRecords);
for (int i = numRecords; i < 2 * numRecords; i++) {
anotherFutureList.add(writer.asyncWrite(DLMTestUtil.getLogRecordInstance(i)));
}
assertEquals("Last tx id should become " + (2 * numRecords - 1), 2 * numRecords - 1, writer.getLastTxId());
assertEquals("Last acked tx id should become " + (numRecords - 1), (long) (numRecords - 1), writer.getLastTxIdAcknowledged());
assertEquals("Last DLSN should still be " + DLSN.InvalidDLSN, DLSN.InvalidDLSN, writer.getLastDLSN());
assertEquals("Position should become " + (2 * numRecords), 2 * numRecords, writer.getPositionWithinLogSegment());
// abort the writer: it waits for outstanding transmits and abort buffered data
abortWriterAndLock(writer, lock);
Utils.ioResult(lockFuture0);
lock0.checkOwnership();
// release defer latch so completion would go through
deferLatch.countDown();
List<DLSN> dlsns = Utils.ioResult(FutureUtils.collect(futureList));
assertEquals("All first 10 records should be written", numRecords, dlsns.size());
for (int i = 0; i < numRecords; i++) {
DLSN dlsn = dlsns.get(i);
assertEquals("Incorrent ledger sequence number", 0L, dlsn.getLogSegmentSequenceNo());
assertEquals("Incorrent entry id", 0L, dlsn.getEntryId());
assertEquals("Inconsistent slot id", i, dlsn.getSlotId());
}
for (int i = 0; i < numRecords; i++) {
try {
Utils.ioResult(anotherFutureList.get(i));
fail("Should be aborted record " + (numRecords + i) + " with transmit exception");
} catch (WriteCancelledException wce) {
// writes should be cancelled.
}
}
assertEquals("Last tx id should still be " + (2 * numRecords - 1), 2 * numRecords - 1, writer.getLastTxId());
assertEquals("Last acked tx id should be still " + (numRecords - 1), (long) (numRecords - 1), writer.getLastTxIdAcknowledged());
assertEquals("Last DLSN should become " + futureList.get(futureList.size() - 1), dlsns.get(futureList.size() - 1), writer.getLastDLSN());
assertEquals("Position should become " + 2 * numRecords, 2 * numRecords, writer.getPositionWithinLogSegment());
// check only 1 entry were written
LedgerHandle lh = getLedgerHandle(writer);
LedgerHandle readLh = openLedgerNoRecovery(lh);
assertTrue("Ledger " + lh.getId() + " should not be closed", readLh.isClosed());
assertEquals("Only one entry is written for ledger " + lh.getId(), 0L, lh.getLastAddPushed());
assertEquals("Only one entry is written for ledger " + lh.getId(), 0L, readLh.getLastAddConfirmed());
}
use of org.apache.distributedlog.exceptions.WriteCancelledException in project bookkeeper by apache.
the class TestBKLogSegmentWriter method testAbortShouldNotFlush.
/**
* Abort a segment log writer should just abort pending writes and not flush buffered data.
*
* @throws Exception
*/
@Test(timeout = 60000)
public void testAbortShouldNotFlush() throws Exception {
DistributedLogConfiguration confLocal = newLocalConf();
confLocal.setImmediateFlushEnabled(false);
confLocal.setOutputBufferSize(Integer.MAX_VALUE);
confLocal.setPeriodicFlushFrequencyMilliSeconds(0);
ZKDistributedLock lock = createLock("/test/lock-" + runtime.getMethodName(), zkc, true);
BKLogSegmentWriter writer = createLogSegmentWriter(confLocal, 0L, -1L, lock);
// Use another lock to wait for writer releasing lock
ZKDistributedLock lock0 = createLock("/test/lock-" + runtime.getMethodName(), zkc0, false);
CompletableFuture<ZKDistributedLock> lockFuture0 = lock0.asyncAcquire();
// add 10 records
int numRecords = 10;
List<CompletableFuture<DLSN>> futureList = new ArrayList<CompletableFuture<DLSN>>(numRecords);
for (int i = 0; i < numRecords; i++) {
futureList.add(writer.asyncWrite(DLMTestUtil.getLogRecordInstance(i)));
}
assertEquals("Last tx id should be " + (numRecords - 1), numRecords - 1, writer.getLastTxId());
assertEquals("Last acked tx id should be -1", -1L, writer.getLastTxIdAcknowledged());
assertEquals("Last DLSN should be " + DLSN.InvalidDLSN, DLSN.InvalidDLSN, writer.getLastDLSN());
assertEquals("Position should be " + numRecords, 10, writer.getPositionWithinLogSegment());
// close the writer should flush buffered data and release lock
abortWriterAndLock(writer, lock);
Utils.ioResult(lockFuture0);
lock0.checkOwnership();
assertEquals("Last tx id should still be " + (numRecords - 1), numRecords - 1, writer.getLastTxId());
assertEquals("Last acked tx id should still be " + (numRecords - 1), -1L, writer.getLastTxIdAcknowledged());
assertEquals("Last DLSN should still be " + DLSN.InvalidDLSN, DLSN.InvalidDLSN, writer.getLastDLSN());
assertEquals("Position should still be " + numRecords, 10, writer.getPositionWithinLogSegment());
for (int i = 0; i < numRecords; i++) {
try {
Utils.ioResult(futureList.get(i));
fail("Should be aborted record " + i + " with transmit exception");
} catch (WriteCancelledException wce) {
assertTrue("Record " + i + " should be aborted because of ledger fenced", wce.getCause() instanceof BKTransmitException);
BKTransmitException bkte = (BKTransmitException) wce.getCause();
assertEquals("Record " + i + " should be aborted", BKException.Code.InterruptedException, bkte.getBKResultCode());
}
}
// check no entries were written
LedgerHandle lh = getLedgerHandle(writer);
LedgerHandle readLh = openLedgerNoRecovery(lh);
assertTrue("Ledger " + lh.getId() + " should not be closed", readLh.isClosed());
assertEquals("There should be no entries in ledger " + lh.getId(), LedgerHandle.INVALID_ENTRY_ID, readLh.getLastAddConfirmed());
}
use of org.apache.distributedlog.exceptions.WriteCancelledException in project bookkeeper by apache.
the class BKAsyncLogWriter method appendCancelledFutures.
private void appendCancelledFutures(List<CompletableFuture<DLSN>> futures, int numToAdd) {
final WriteCancelledException cre = new WriteCancelledException(getStreamName());
for (int i = 0; i < numToAdd; i++) {
CompletableFuture<DLSN> cancelledFuture = FutureUtils.exception(cre);
futures.add(cancelledFuture);
}
}
use of org.apache.distributedlog.exceptions.WriteCancelledException in project bookkeeper by apache.
the class TestBKLogSegmentWriter method testCloseShouldNotFlushIfInErrorState.
/**
* Close a log segment writer that is already in error state, should not flush buffered data.
*
* @throws Exception
*/
void testCloseShouldNotFlushIfInErrorState(int rcToFailComplete) throws Exception {
DistributedLogConfiguration confLocal = newLocalConf();
confLocal.setImmediateFlushEnabled(false);
confLocal.setOutputBufferSize(Integer.MAX_VALUE);
confLocal.setPeriodicFlushFrequencyMilliSeconds(0);
ZKDistributedLock lock = createLock("/test/lock-" + runtime.getMethodName(), zkc, true);
BKLogSegmentWriter writer = createLogSegmentWriter(confLocal, 0L, -1L, lock);
// Use another lock to wait for writer releasing lock
ZKDistributedLock lock0 = createLock("/test/lock-" + runtime.getMethodName(), zkc0, false);
CompletableFuture<ZKDistributedLock> lockFuture0 = lock0.asyncAcquire();
// add 10 records
int numRecords = 10;
List<CompletableFuture<DLSN>> futureList = new ArrayList<CompletableFuture<DLSN>>(numRecords);
for (int i = 0; i < numRecords; i++) {
futureList.add(writer.asyncWrite(DLMTestUtil.getLogRecordInstance(i)));
}
assertEquals("Last tx id should be " + (numRecords - 1), numRecords - 1, writer.getLastTxId());
assertEquals("Last acked tx id should be -1", -1L, writer.getLastTxIdAcknowledged());
assertEquals("Last DLSN should be " + DLSN.InvalidDLSN, DLSN.InvalidDLSN, writer.getLastDLSN());
assertEquals("Position should be " + numRecords, 10, writer.getPositionWithinLogSegment());
writer.setTransmitResult(rcToFailComplete);
// close the writer should release lock but not flush data
try {
closeWriterAndLock(writer, lock);
fail("Close a log segment writer in error state should throw exception");
} catch (BKTransmitException bkte) {
assertEquals("Inconsistent rc is thrown", rcToFailComplete, bkte.getBKResultCode());
}
Utils.ioResult(lockFuture0);
lock0.checkOwnership();
assertEquals("Last tx id should still be " + (numRecords - 1), numRecords - 1, writer.getLastTxId());
assertEquals("Last acked tx id should still be " + (numRecords - 1), -1L, writer.getLastTxIdAcknowledged());
assertEquals("Last DLSN should still be " + DLSN.InvalidDLSN, DLSN.InvalidDLSN, writer.getLastDLSN());
assertEquals("Position should still be " + numRecords, 10, writer.getPositionWithinLogSegment());
for (int i = 0; i < numRecords; i++) {
try {
Utils.ioResult(futureList.get(i));
fail("Should be aborted record " + i + " with transmit exception");
} catch (WriteCancelledException wce) {
assertTrue("Record " + i + " should be aborted because of ledger fenced", wce.getCause() instanceof BKTransmitException);
BKTransmitException bkte = (BKTransmitException) wce.getCause();
assertEquals("Record " + i + " should be aborted", rcToFailComplete, bkte.getBKResultCode());
}
}
// check no entries were written
LedgerHandle lh = getLedgerHandle(writer);
LedgerHandle readLh = openLedgerNoRecovery(lh);
assertFalse("Ledger " + lh.getId() + " should not be closed", readLh.isClosed());
assertEquals("There should be no entries in ledger " + lh.getId(), LedgerHandle.INVALID_ENTRY_ID, readLh.getLastAddConfirmed());
}
Aggregations