use of com.twitter.distributedlog.exceptions.LogRecordTooLongException in project distributedlog by twitter.
the class TestLogRecordSet method testWriteRecords.
void testWriteRecords(Type codec) throws Exception {
Writer writer = LogRecordSet.newWriter(1024, codec);
assertEquals("zero user bytes", HEADER_LEN, writer.getNumBytes());
assertEquals("zero records", 0, writer.getNumRecords());
List<Future<DLSN>> writePromiseList = Lists.newArrayList();
/// write first 5 records
for (int i = 0; i < 5; i++) {
ByteBuffer record = ByteBuffer.wrap(("record-" + i).getBytes(UTF_8));
Promise<DLSN> writePromise = new Promise<DLSN>();
writer.writeRecord(record, writePromise);
writePromiseList.add(writePromise);
assertEquals((i + 1) + " records", (i + 1), writer.getNumRecords());
}
ByteBuffer dataBuf = ByteBuffer.allocate(MAX_LOGRECORD_SIZE + 1);
try {
writer.writeRecord(dataBuf, new Promise<DLSN>());
fail("Should fail on writing large record");
} catch (LogRecordTooLongException lrtle) {
// expected
}
assertEquals("5 records", 5, writer.getNumRecords());
/// write another 5 records
for (int i = 0; i < 5; i++) {
ByteBuffer record = ByteBuffer.wrap(("record-" + (i + 5)).getBytes(UTF_8));
Promise<DLSN> writePromise = new Promise<DLSN>();
writer.writeRecord(record, writePromise);
writePromiseList.add(writePromise);
assertEquals((i + 6) + " records", (i + 6), writer.getNumRecords());
}
ByteBuffer buffer = writer.getBuffer();
assertEquals("10 records", 10, writer.getNumRecords());
// Test transmit complete
writer.completeTransmit(1L, 1L, 10L);
List<DLSN> writeResults = Await.result(Future.collect(writePromiseList));
for (int i = 0; i < 10; i++) {
assertEquals(new DLSN(1L, 1L, 10L + i), writeResults.get(i));
}
// Test reading from buffer
byte[] data = new byte[buffer.remaining()];
buffer.get(data);
LogRecordWithDLSN record = new LogRecordWithDLSN(new DLSN(1L, 1L, 10L), 99L, data, 999L);
record.setPositionWithinLogSegment(888);
record.setRecordSet();
Reader reader = LogRecordSet.of(record);
LogRecordWithDLSN readRecord = reader.nextRecord();
int numReads = 0;
while (null != readRecord) {
assertEquals(new DLSN(1L, 1L, 10L + numReads), readRecord.getDlsn());
assertEquals(99L, readRecord.getTransactionId());
assertEquals(888 + numReads, readRecord.getPositionWithinLogSegment());
assertEquals(999L, readRecord.getStartSequenceIdOfCurrentSegment());
assertEquals(999L + 888 + numReads - 1, readRecord.getSequenceId());
// read next
++numReads;
readRecord = reader.nextRecord();
}
assertEquals(10, numReads);
}
use of com.twitter.distributedlog.exceptions.LogRecordTooLongException in project distributedlog by twitter.
the class DistributedLogMultiStreamWriter method write.
public synchronized Future<DLSN> write(ByteBuffer buffer) {
int logRecordSize = buffer.remaining();
if (logRecordSize > MAX_LOGRECORD_SIZE) {
return Future.exception(new LogRecordTooLongException("Log record of size " + logRecordSize + " written when only " + MAX_LOGRECORD_SIZE + " is allowed"));
}
// if exceed max number of bytes
if ((recordSetWriter.getNumBytes() + logRecordSize) > MAX_LOGRECORDSET_SIZE) {
flush();
}
Promise<DLSN> writePromise = new Promise<DLSN>();
try {
recordSetWriter.writeRecord(buffer, writePromise);
} catch (LogRecordTooLongException e) {
return Future.exception(e);
} catch (WriteException e) {
recordSetWriter.abortTransmit(e);
recordSetWriter = newRecordSetWriter();
return Future.exception(e);
}
if (recordSetWriter.getNumBytes() >= bufferSize) {
flush();
}
return writePromise;
}
use of com.twitter.distributedlog.exceptions.LogRecordTooLongException in project distributedlog by twitter.
the class TestDistributedLogMultiStreamWriter method testWriteTooLargeRecord.
@Test(timeout = 20000)
public void testWriteTooLargeRecord() throws Exception {
DistributedLogClient client = mock(DistributedLogClient.class);
DistributedLogMultiStreamWriter writer = DistributedLogMultiStreamWriter.newBuilder().streams(Lists.newArrayList("stream1", "stream2")).client(client).compressionCodec(CompressionCodec.Type.LZ4).firstSpeculativeTimeoutMs(100000).maxSpeculativeTimeoutMs(200000).speculativeBackoffMultiplier(2).requestTimeoutMs(5000000).flushIntervalMs(0).bufferSize(0).build();
byte[] data = new byte[LogRecord.MAX_LOGRECORD_SIZE + 10];
ByteBuffer buffer = ByteBuffer.wrap(data);
Future<DLSN> writeFuture = writer.write(buffer);
assertTrue(writeFuture.isDefined());
try {
Await.result(writeFuture);
fail("Should fail on writing too long record");
} catch (LogRecordTooLongException lrtle) {
// expected
}
writer.close();
}
Aggregations