use of com.twitter.distributedlog.exceptions.WriteException in project distributedlog by twitter.
the class TestBKLogSegmentWriter method testNondurableWriteAfterWriterIsClosed.
/**
* Non durable write should fail if writer is closed.
*
* @throws Exception
*/
@Test(timeout = 60000)
public void testNondurableWriteAfterWriterIsClosed() throws Exception {
DistributedLogConfiguration confLocal = newLocalConf();
confLocal.setImmediateFlushEnabled(false);
confLocal.setOutputBufferSize(Integer.MAX_VALUE);
confLocal.setPeriodicFlushFrequencyMilliSeconds(0);
confLocal.setDurableWriteEnabled(false);
ZKDistributedLock lock = createLock("/test/lock-" + runtime.getMethodName(), zkc, true);
BKLogSegmentWriter writer = createLogSegmentWriter(confLocal, 0L, -1L, lock);
// close the writer
closeWriterAndLock(writer, lock);
FutureUtils.result(writer.asyncClose());
try {
Await.result(writer.asyncWrite(DLMTestUtil.getLogRecordInstance(1)));
fail("Should fail the write if the writer is closed");
} catch (WriteException we) {
// expected
}
}
use of com.twitter.distributedlog.exceptions.WriteException in project distributedlog by twitter.
the class EnvelopedEntryWriter method writeRecord.
@Override
public synchronized void writeRecord(LogRecord record, Promise<DLSN> transmitPromise) throws LogRecordTooLongException, WriteException {
int logRecordSize = record.getPersistentSize();
if (logRecordSize > MAX_LOGRECORD_SIZE) {
throw new LogRecordTooLongException("Log Record of size " + logRecordSize + " written when only " + MAX_LOGRECORD_SIZE + " is allowed");
}
try {
this.writer.writeOp(record);
int numRecords = 1;
if (!record.isControl()) {
hasUserData = true;
}
if (record.isRecordSet()) {
numRecords = LogRecordSet.numRecords(record);
}
count += numRecords;
writeRequests.add(new WriteRequest(numRecords, transmitPromise));
maxTxId = Math.max(maxTxId, record.getTransactionId());
} catch (IOException e) {
logger.error("Failed to append record to record set of {} : ", logName, e);
throw new WriteException(logName, "Failed to append record to record set of " + logName);
}
}
use of com.twitter.distributedlog.exceptions.WriteException in project distributedlog by twitter.
the class DistributedLogMultiStreamWriter method write.
public synchronized Future<DLSN> write(ByteBuffer buffer) {
int logRecordSize = buffer.remaining();
if (logRecordSize > MAX_LOGRECORD_SIZE) {
return Future.exception(new LogRecordTooLongException("Log record of size " + logRecordSize + " written when only " + MAX_LOGRECORD_SIZE + " is allowed"));
}
// if exceed max number of bytes
if ((recordSetWriter.getNumBytes() + logRecordSize) > MAX_LOGRECORDSET_SIZE) {
flush();
}
Promise<DLSN> writePromise = new Promise<DLSN>();
try {
recordSetWriter.writeRecord(buffer, writePromise);
} catch (LogRecordTooLongException e) {
return Future.exception(e);
} catch (WriteException e) {
recordSetWriter.abortTransmit(e);
recordSetWriter = newRecordSetWriter();
return Future.exception(e);
}
if (recordSetWriter.getNumBytes() >= bufferSize) {
flush();
}
return writePromise;
}
use of com.twitter.distributedlog.exceptions.WriteException in project distributedlog by twitter.
the class EnvelopedRecordSetWriter method writeRecord.
@Override
public synchronized void writeRecord(ByteBuffer record, Promise<DLSN> transmitPromise) throws LogRecordTooLongException, WriteException {
int logRecordSize = record.remaining();
if (logRecordSize > MAX_LOGRECORD_SIZE) {
throw new LogRecordTooLongException("Log Record of size " + logRecordSize + " written when only " + MAX_LOGRECORD_SIZE + " is allowed");
}
try {
writer.writeInt(record.remaining());
writeChannel.write(record);
++count;
promiseList.add(transmitPromise);
} catch (IOException e) {
logger.error("Failed to append record to record set", e);
throw new WriteException("", "Failed to append record to record set");
}
}
use of com.twitter.distributedlog.exceptions.WriteException in project distributedlog by twitter.
the class TestAppendOnlyStreamWriter method writeRecordsAndReadThemBackAfterInjectingAFailedTransmit.
long writeRecordsAndReadThemBackAfterInjectingAFailedTransmit(DistributedLogConfiguration conf, String name, int writeLen, int sectionWrites) throws Exception {
BKDistributedLogManager dlm = (BKDistributedLogManager) createNewDLM(conf, name);
URI uri = createDLMURI("/" + name);
BKDistributedLogManager.createLog(conf, dlm.getReaderZKC(), uri, name);
// Log exists but is empty, better not throw.
AppendOnlyStreamWriter writer = dlm.getAppendOnlyStreamWriter();
byte[] byteStream = DLMTestUtil.repeatString("A", writeLen).getBytes();
// Log a hundred entries. Offset is advanced accordingly.
for (int i = 0; i < sectionWrites; i++) {
writer.write(byteStream);
}
writer.force(false);
long read = read(dlm, 1 * sectionWrites * writeLen);
assertEquals(1 * sectionWrites * writeLen, read);
// Now write another 100, but trigger failure during transmit.
for (int i = 0; i < sectionWrites; i++) {
writer.write(byteStream);
}
try {
FailpointUtils.setFailpoint(FailpointUtils.FailPointName.FP_TransmitFailGetBuffer, FailpointUtils.FailPointActions.FailPointAction_Throw);
writer.force(false);
fail("should have thown âīšâ");
} catch (WriteException we) {
;
} finally {
FailpointUtils.removeFailpoint(FailpointUtils.FailPointName.FP_TransmitFailGetBuffer);
}
// This actually fails because we try to close an errored out stream.
writer.write(byteStream);
// Writing another 100 triggers offset gap.
for (int i = 0; i < sectionWrites; i++) {
writer.write(byteStream);
}
writer.force(false);
writer.markEndOfStream();
writer.close();
long length = dlm.getLastTxId();
assertEquals(3 * sectionWrites * writeLen + 5, length);
read = read(dlm, length);
dlm.close();
return read;
}
Aggregations