use of org.apache.distributedlog.exceptions.WriteException in project bookkeeper by apache.
the class BKLogSegmentWriter method flushIfNeeded.
// Based on transmit buffer size, immediate flush, etc., should we flush the current
// packet now.
void flushIfNeeded() throws BKTransmitException, WriteException, InvalidEnvelopedEntryException, LockingException, FlushException {
if (outstandingBytes > transmissionThreshold) {
// If flush delay is disabled, flush immediately, else schedule appropriately.
if (0 == minDelayBetweenImmediateFlushMs) {
checkStateAndTransmit();
} else {
scheduleFlushWithDelayIfNeeded(new Callable<Void>() {
@Override
public Void call() throws Exception {
checkStateAndTransmit();
return null;
}
}, transmitSchedFutureRefUpdater);
// Timing here is not very important--the last flush failed and we should
// indicate this to the caller. The next flush may succeed and unset the
// scheduledFlushException in which case the next write will succeed (if the caller
// hasn't already closed the writer).
Exception exec = scheduledFlushExceptionUpdater.get(this);
if (exec != null) {
throw new FlushException("Last flush encountered an error while writing data to the backend", getLastTxId(), getLastTxIdAcknowledged(), exec);
}
}
}
}
use of org.apache.distributedlog.exceptions.WriteException in project bookkeeper by apache.
the class BKLogSegmentWriter method addCompleteDeferredProcessing.
private void addCompleteDeferredProcessing(final BKTransmitPacket transmitPacket, final long entryId, final int rc) {
boolean cancelPendingPromises = false;
EntryBuffer recordSet = transmitPacket.getRecordSet();
synchronized (this) {
if (transmitResultUpdater.compareAndSet(this, BKException.Code.OK, rc)) {
// If this is the first time we are setting an error code in the transmitResult then
// we must cancel pending promises; once this error has been set, more records will not
// be enqueued; they will be failed with WriteException
cancelPendingPromises = (BKException.Code.OK != rc);
} else {
LOG.warn("Log segment {} entryId {}: Tried to set transmit result to ({}) but is already ({})", new Object[] { fullyQualifiedLogSegment, entryId, rc, transmitResultUpdater.get(this) });
}
if (transmitResultUpdater.get(this) != BKException.Code.OK) {
if (recordSet.hasUserRecords()) {
transmitDataPacketSize.registerFailedEvent(recordSet.getNumBytes(), TimeUnit.MICROSECONDS);
}
} else {
// visible by advancing the lastAck
if (recordSet.hasUserRecords()) {
transmitDataPacketSize.registerSuccessfulEvent(recordSet.getNumBytes(), TimeUnit.MICROSECONDS);
controlFlushNeeded = true;
if (immediateFlushEnabled) {
if (0 == minDelayBetweenImmediateFlushMs) {
backgroundFlush(true);
} else {
scheduleFlushWithDelayIfNeeded(new Callable<Void>() {
@Override
public Void call() throws Exception {
backgroundFlush(true);
return null;
}
}, immFlushSchedFutureRefUpdater);
}
}
}
}
// update last dlsn before satisifying future
if (BKException.Code.OK == transmitResultUpdater.get(this)) {
DLSN lastDLSNInPacket = recordSet.finalizeTransmit(logSegmentSequenceNumber, entryId);
if (recordSet.hasUserRecords()) {
if (null != lastDLSNInPacket && lastDLSN.compareTo(lastDLSNInPacket) < 0) {
lastDLSN = lastDLSNInPacket;
}
}
}
}
if (BKException.Code.OK == transmitResultUpdater.get(this)) {
recordSet.completeTransmit(logSegmentSequenceNumber, entryId);
} else {
recordSet.abortTransmit(Utils.transmitException(transmitResultUpdater.get(this)));
}
if (cancelPendingPromises) {
// Since the writer is in a bad state no more packets will be tramsitted, and its safe to
// assign a new empty packet. This is to avoid a race with closeInternal which may also
// try to cancel the current packet;
final BKTransmitPacket packetCurrentSaved;
synchronized (this) {
packetCurrentSaved = new BKTransmitPacket(recordSetWriter);
recordSetWriter = newRecordSetWriter();
}
packetCurrentSaved.getRecordSet().abortTransmit(new WriteCancelledException(streamName, Utils.transmitException(transmitResultUpdater.get(this))));
}
}
use of org.apache.distributedlog.exceptions.WriteException in project bookkeeper by apache.
the class EnvelopedEntryWriter method writeRecord.
@Override
public synchronized void writeRecord(LogRecord record, CompletableFuture<DLSN> transmitPromise) throws LogRecordTooLongException, WriteException {
int logRecordSize = record.getPersistentSize();
if (logRecordSize > MAX_LOGRECORD_SIZE) {
throw new LogRecordTooLongException("Log Record of size " + logRecordSize + " written when only " + MAX_LOGRECORD_SIZE + " is allowed");
}
try {
this.writer.writeOp(record);
int numRecords = 1;
if (!record.isControl()) {
hasUserData = true;
}
if (record.isRecordSet()) {
numRecords = LogRecordSet.numRecords(record);
}
count += numRecords;
writeRequests.add(new WriteRequest(numRecords, transmitPromise));
maxTxId = Math.max(maxTxId, record.getTransactionId());
} catch (IOException e) {
logger.error("Failed to append record to record set of {} : ", logName, e);
throw new WriteException(logName, "Failed to append record to record set of " + logName);
}
}
use of org.apache.distributedlog.exceptions.WriteException in project bookkeeper by apache.
the class TestAppendOnlyStreamWriter method writeRecordsAndReadThemBackAfterInjectingAFailedTransmit.
long writeRecordsAndReadThemBackAfterInjectingAFailedTransmit(DistributedLogConfiguration conf, String name, int writeLen, int sectionWrites) throws Exception {
BKDistributedLogManager dlm = (BKDistributedLogManager) createNewDLM(conf, name);
URI uri = createDLMURI("/" + name);
Utils.ioResult(dlm.getWriterMetadataStore().getLog(uri, name, true, true));
// Log exists but is empty, better not throw.
AppendOnlyStreamWriter writer = dlm.getAppendOnlyStreamWriter();
byte[] byteStream = DLMTestUtil.repeatString("A", writeLen).getBytes();
// Log a hundred entries. Offset is advanced accordingly.
for (int i = 0; i < sectionWrites; i++) {
writer.write(byteStream);
}
writer.force(false);
long read = read(dlm, 1 * sectionWrites * writeLen);
assertEquals(1 * sectionWrites * writeLen, read);
// Now write another 100, but trigger failure during transmit.
for (int i = 0; i < sectionWrites; i++) {
writer.write(byteStream);
}
try {
FailpointUtils.setFailpoint(FailpointUtils.FailPointName.FP_TransmitFailGetBuffer, FailpointUtils.FailPointActions.FailPointAction_Throw);
writer.force(false);
fail("should have thown âīšâ");
} catch (WriteException we) {
} finally {
FailpointUtils.removeFailpoint(FailpointUtils.FailPointName.FP_TransmitFailGetBuffer);
}
// This actually fails because we try to close an errored out stream.
writer.write(byteStream);
// Writing another 100 triggers offset gap.
for (int i = 0; i < sectionWrites; i++) {
writer.write(byteStream);
}
writer.force(false);
writer.markEndOfStream();
writer.close();
long length = dlm.getLastTxId();
assertEquals(3 * sectionWrites * writeLen + 5, length);
read = read(dlm, length);
dlm.close();
return read;
}
use of org.apache.distributedlog.exceptions.WriteException in project bookkeeper by apache.
the class TestBKLogSegmentWriter method testNondurableWriteAfterLedgerIsFenced.
/**
* Non durable write should fail if the log segment is fenced.
*
* @throws Exception
*/
@Test(timeout = 60000)
public void testNondurableWriteAfterLedgerIsFenced() throws Exception {
DistributedLogConfiguration confLocal = newLocalConf();
confLocal.setImmediateFlushEnabled(false);
confLocal.setOutputBufferSize(Integer.MAX_VALUE);
confLocal.setPeriodicFlushFrequencyMilliSeconds(0);
confLocal.setDurableWriteEnabled(false);
ZKDistributedLock lock = createLock("/test/lock-" + runtime.getMethodName(), zkc, true);
BKLogSegmentWriter writer = createLogSegmentWriter(confLocal, 0L, -1L, lock);
// fence the ledger
fenceLedger(getLedgerHandle(writer));
LogRecord record = DLMTestUtil.getLogRecordInstance(1);
record.setControl();
try {
Utils.ioResult(writer.asyncWrite(record));
fail("Should fail the writer if the log segment is already fenced");
} catch (BKTransmitException bkte) {
// expected
assertEquals(BKException.Code.LedgerFencedException, bkte.getBKResultCode());
}
try {
Utils.ioResult(writer.asyncWrite(DLMTestUtil.getLogRecordInstance(2)));
fail("Should fail the writer if the log segment is already fenced");
} catch (WriteException we) {
// expected
}
abortWriterAndLock(writer, lock);
}
Aggregations