use of org.apache.distributedlog.api.DistributedLogManager in project bookkeeper by apache.
the class TestAppendOnlyStreamReader method skipForwardThenSkipBack.
// Simple test subroutine writes some records, reads some back, skips ahead, skips back.
public void skipForwardThenSkipBack(String name, DistributedLogConfiguration conf) throws Exception {
DistributedLogManager dlmwrite = createNewDLM(conf, name);
DistributedLogManager dlmreader = createNewDLM(conf, name);
long txid = 1;
AppendOnlyStreamWriter writer = dlmwrite.getAppendOnlyStreamWriter();
writer.write(DLMTestUtil.repeatString("abc", 5).getBytes());
writer.write(DLMTestUtil.repeatString("abc", 5).getBytes());
writer.write(DLMTestUtil.repeatString("def", 5).getBytes());
writer.write(DLMTestUtil.repeatString("def", 5).getBytes());
writer.write(DLMTestUtil.repeatString("ghi", 5).getBytes());
writer.write(DLMTestUtil.repeatString("ghi", 5).getBytes());
writer.force(false);
writer.close();
AppendOnlyStreamReader reader = dlmreader.getAppendOnlyStreamReader();
byte[] bytesIn = new byte[30];
byte[] bytes1 = DLMTestUtil.repeatString("abc", 10).getBytes();
byte[] bytes2 = DLMTestUtil.repeatString("def", 10).getBytes();
byte[] bytes3 = DLMTestUtil.repeatString("ghi", 10).getBytes();
int read = reader.read(bytesIn, 0, 30);
assertEquals(30, read);
assertTrue(Arrays.equals(bytes1, bytesIn));
reader.skipTo(60);
read = reader.read(bytesIn, 0, 30);
assertEquals(30, read);
assertTrue(Arrays.equals(bytes3, bytesIn));
reader.skipTo(30);
read = reader.read(bytesIn, 0, 30);
assertEquals(30, read);
assertTrue(Arrays.equals(bytes2, bytesIn));
}
use of org.apache.distributedlog.api.DistributedLogManager in project bookkeeper by apache.
the class TestAppendOnlyStreamReader method testSkipToForNoPositionChange.
@Test(timeout = 60000)
public void testSkipToForNoPositionChange() throws Exception {
String name = testNames.getMethodName();
DistributedLogManager dlmwrite = createNewDLM(conf, name);
DistributedLogManager dlmreader = createNewDLM(conf, name);
long txid = 1;
AppendOnlyStreamWriter writer = dlmwrite.getAppendOnlyStreamWriter();
writer.write(DLMTestUtil.repeatString("abc", 5).getBytes());
writer.close();
final AppendOnlyStreamReader reader = dlmreader.getAppendOnlyStreamReader();
assertTrue(reader.skipTo(0));
byte[] bytesIn = new byte[4];
int read = reader.read(bytesIn, 0, 4);
assertEquals(4, read);
assertEquals(new String("abca"), new String(bytesIn));
assertTrue(reader.skipTo(reader.position()));
assertTrue(reader.skipTo(1));
read = reader.read(bytesIn, 0, 4);
assertEquals(4, read);
assertEquals(new String("bcab"), new String(bytesIn));
}
use of org.apache.distributedlog.api.DistributedLogManager in project bookkeeper by apache.
the class TestAppendOnlyStreamReader method testSkipToSkipsBytesUntilEndOfStream.
@Test(timeout = 60000)
public void testSkipToSkipsBytesUntilEndOfStream() throws Exception {
String name = testNames.getMethodName();
DistributedLogManager dlmwrite = createNewDLM(conf, name);
DistributedLogManager dlmreader = createNewDLM(conf, name);
long txid = 1;
AppendOnlyStreamWriter writer = dlmwrite.getAppendOnlyStreamWriter();
writer.write(DLMTestUtil.repeatString("abc", 5).getBytes());
writer.markEndOfStream();
writer.force(false);
writer.close();
AppendOnlyStreamReader reader = dlmreader.getAppendOnlyStreamReader();
byte[] bytesIn = new byte[9];
int read = reader.read(bytesIn, 0, 9);
assertEquals(9, read);
assertTrue(Arrays.equals(DLMTestUtil.repeatString("abc", 3).getBytes(), bytesIn));
assertTrue(reader.skipTo(15));
try {
read = reader.read(bytesIn, 0, 1);
fail("Should have thrown");
} catch (EndOfStreamException ex) {
}
assertTrue(reader.skipTo(0));
try {
reader.skipTo(16);
fail("Should have thrown");
} catch (EndOfStreamException ex) {
}
}
use of org.apache.distributedlog.api.DistributedLogManager in project bookkeeper by apache.
the class TestAsyncBulkWrite method testAsyncBulkWriteSpanningPackets.
/**
* Test Case: A large write batch will span multiple packets.
* @throws Exception
*/
@Test(timeout = 60000)
public void testAsyncBulkWriteSpanningPackets() throws Exception {
String name = "distrlog-testAsyncBulkWriteSpanningPackets";
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.loadConf(testConf);
confLocal.setOutputBufferSize(1024);
DistributedLogManager dlm = createNewDLM(confLocal, name);
BKAsyncLogWriter writer = (BKAsyncLogWriter) (dlm.startAsyncLogSegmentNonPartitioned());
// First entry.
int numTransmissions = 4;
int recSize = 10 * 1024;
int batchSize = (numTransmissions * MAX_LOGRECORDSET_SIZE + 1) / recSize;
long ledgerIndex = 1;
long entryIndex = 0;
long slotIndex = 0;
long txIndex = 1;
DLSN dlsn = checkAllSucceeded(writer, batchSize, recSize, ledgerIndex, entryIndex, slotIndex, txIndex);
assertEquals(4, dlsn.getEntryId());
assertEquals(1, dlsn.getLogSegmentSequenceNo());
writer.closeAndComplete();
dlm.close();
}
use of org.apache.distributedlog.api.DistributedLogManager in project bookkeeper by apache.
the class TestAsyncBulkWrite method testAsyncBulkWritePartialFailureBufferFailure.
/**
* Test Case for partial failure in a bulk write.
* Write a batch: 10 good records + 1 too large record + 10 good records.
* Expected: first 10 good records succeed, the too-large-record will be rejected, while
* the last 10 good records will be cancelled because their previous write is rejected.
*/
@Test(timeout = 60000)
public void testAsyncBulkWritePartialFailureBufferFailure() throws Exception {
String name = "distrlog-testAsyncBulkWritePartialFailure";
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.loadConf(testConf);
confLocal.setOutputBufferSize(1024);
DistributedLogManager dlm = createNewDLM(confLocal, name);
BKAsyncLogWriter writer = (BKAsyncLogWriter) (dlm.startAsyncLogSegmentNonPartitioned());
final int goodRecs = 10;
// Generate records: 10 good records, 1 too large record, 10 good records
final List<LogRecord> records = DLMTestUtil.getLargeLogRecordInstanceList(1, goodRecs);
records.add(DLMTestUtil.getLogRecordInstance(goodRecs, MAX_LOGRECORD_SIZE + 1));
records.addAll(DLMTestUtil.getLargeLogRecordInstanceList(1, goodRecs));
CompletableFuture<List<CompletableFuture<DLSN>>> futureResults = writer.writeBulk(records);
List<CompletableFuture<DLSN>> results = validateFutureSucceededAndGetResult(futureResults);
// One future returned for each write.
assertEquals(2 * goodRecs + 1, results.size());
// First goodRecs are good.
for (int i = 0; i < goodRecs; i++) {
DLSN dlsn = validateFutureSucceededAndGetResult(results.get(i));
}
// First failure is log rec too big.
validateFutureFailed(results.get(goodRecs), LogRecordTooLongException.class);
// Rest are WriteCancelledException.
for (int i = goodRecs + 1; i < 2 * goodRecs + 1; i++) {
validateFutureFailed(results.get(i), WriteCancelledException.class);
}
writer.closeAndComplete();
dlm.close();
}
Aggregations