use of io.pravega.common.util.ByteArraySegment in project pravega by pravega.
the class BookKeeperLogTests method testAutoCloseOnBookieFailure.
/**
* Tests the ability to auto-close upon a permanent write failure caused by BookKeeper.
*
* @throws Exception If one got thrown.
*/
@Test
public void testAutoCloseOnBookieFailure() throws Exception {
try (DurableDataLog log = createDurableDataLog()) {
log.initialize(TIMEOUT);
try {
// Suspend a bookie (this will trigger write errors).
stopFirstBookie();
// First write should fail. Either a DataLogNotAvailableException (insufficient bookies) or
// WriteFailureException (general unable to write) should be thrown.
AssertExtensions.assertThrows("First write did not fail with the appropriate exception.", () -> log.append(new ByteArraySegment(getWriteData()), TIMEOUT), ex -> ex instanceof RetriesExhaustedException && (ex.getCause() instanceof DataLogNotAvailableException || isLedgerClosedException(ex.getCause())) || ex instanceof ObjectClosedException || ex instanceof CancellationException);
// Subsequent writes should be rejected since the BookKeeperLog is now closed.
AssertExtensions.assertThrows("Second write did not fail with the appropriate exception.", () -> log.append(new ByteArraySegment(getWriteData()), TIMEOUT), ex -> ex instanceof ObjectClosedException || ex instanceof CancellationException);
} finally {
// Don't forget to resume the bookie.
restartFirstBookie();
}
}
}
use of io.pravega.common.util.ByteArraySegment in project pravega by pravega.
the class WriteQueueTests method testRemoveFinishedWrites.
/**
* Tests the removeFinishedWrites() method.
*/
@Test
public void testRemoveFinishedWrites() {
// Just over 1ms.
final int timeIncrement = 1234 * 1000;
AtomicLong time = new AtomicLong();
val q = new WriteQueue(time::get);
val writes = new ArrayDeque<Write>();
for (int i = 0; i < ITEM_COUNT; i++) {
time.addAndGet(timeIncrement);
val w = new Write(new ByteArraySegment(new byte[i]), new TestWriteLedger(i), new CompletableFuture<>());
if (i % 2 == 0) {
// Complete 1 out of two writes.
w.setEntryId(i);
w.complete();
}
q.add(w);
writes.addLast(w);
}
while (!writes.isEmpty()) {
val write = writes.pollFirst();
if (!write.isDone()) {
val result1 = q.removeFinishedWrites();
AssertExtensions.assertContainsSameElements("Unexpected value from removeFinishedWrites when there were writes left in the queue.", EnumSet.of(WriteQueue.CleanupStatus.QueueNotEmpty), result1);
val stats1 = q.getStatistics();
Assert.assertEquals("Unexpected size after removeFinishedWrites with no effect.", writes.size() + 1, stats1.getSize());
// Complete this write.
write.setEntryId(time.get());
write.complete();
}
// Estimate the Expected elapsed time based on the removals.
long expectedElapsed = write.getQueueAddedTimestamp();
int removed = 1;
while (!writes.isEmpty() && writes.peekFirst().isDone()) {
expectedElapsed += writes.pollFirst().getQueueAddedTimestamp();
removed++;
}
expectedElapsed = (time.get() * removed - expectedElapsed) / AbstractTimer.NANOS_TO_MILLIS / removed;
val result2 = q.removeFinishedWrites();
val expectedResult = EnumSet.of(writes.isEmpty() ? WriteQueue.CleanupStatus.QueueEmpty : WriteQueue.CleanupStatus.QueueNotEmpty);
AssertExtensions.assertContainsSameElements("Unexpected result from removeFinishedWrites.", expectedResult, result2);
val stats2 = q.getStatistics();
Assert.assertEquals("Unexpected size after removeFinishedWrites.", writes.size(), stats2.getSize());
Assert.assertEquals("Unexpected getExpectedProcessingTimeMillis after clear.", expectedElapsed, stats2.getExpectedProcessingTimeMillis());
}
// Verify that it does report failed writes when encountered.
val w3 = new Write(new ByteArraySegment(new byte[1]), new TestWriteLedger(0), new CompletableFuture<>());
q.add(w3);
w3.fail(new IntentionalException(), true);
val result3 = q.removeFinishedWrites();
AssertExtensions.assertContainsSameElements("Unexpected value from removeFinishedWrites when there were failed writes.", EnumSet.of(WriteQueue.CleanupStatus.QueueEmpty, WriteQueue.CleanupStatus.WriteFailed), result3);
}
use of io.pravega.common.util.ByteArraySegment in project pravega by pravega.
the class RollingStorage method serializeHandle.
private void serializeHandle(RollingSegmentHandle handle) throws StreamSegmentException {
ByteArraySegment handleData = HandleSerializer.serialize(handle);
try {
this.baseStorage.write(handle.getHeaderHandle(), 0, handleData.getReader(), handleData.getLength());
handle.setHeaderLength(handleData.getLength());
log.debug("Header for '{}' fully serialized to '{}'.", handle.getSegmentName(), handle.getHeaderHandle().getSegmentName());
} catch (BadOffsetException ex) {
// If we get BadOffsetException when writing the Handle, it means it was modified externally.
throw new StorageNotPrimaryException(handle.getSegmentName(), ex);
}
}
use of io.pravega.common.util.ByteArraySegment in project pravega by pravega.
the class DataFrameBuilderTests method testAppendWithSerializationFailure.
/**
* Tests the case when the appends fail because of Serialization failures.
* Serialization errors should only affect the append that caused it. It should not cause any data to be dropped
* or put the DataFrameBuilder in a stuck state.
* This should be done both with large and with small LogItems. Large items span multiple frames.
*/
@Test
public void testAppendWithSerializationFailure() throws Exception {
// Fail every X records.
int failEvery = 7;
ArrayList<TestLogItem> records = DataFrameTestHelpers.generateLogItems(RECORD_COUNT / 2, SMALL_RECORD_MIN_SIZE, SMALL_RECORD_MAX_SIZE, 0);
records.addAll(DataFrameTestHelpers.generateLogItems(RECORD_COUNT / 2, LARGE_RECORD_MIN_SIZE, LARGE_RECORD_MAX_SIZE, records.size()));
// Have every other 'failEvery' record fail after writing 90% of itself.
for (int i = 0; i < records.size(); i += failEvery) {
records.get(i).failSerializationAfterComplete(0.9, new IOException("intentional " + i));
}
HashSet<Integer> failedIndices = new HashSet<>();
val order = new HashMap<DataFrameBuilder.CommitArgs, Integer>();
try (TestDurableDataLog dataLog = TestDurableDataLog.create(CONTAINER_ID, FRAME_SIZE, executorService())) {
dataLog.initialize(TIMEOUT);
List<DataFrameBuilder.CommitArgs> commitFrames = Collections.synchronizedList(new ArrayList<>());
BiConsumer<Throwable, DataFrameBuilder.CommitArgs> errorCallback = (ex, a) -> Assert.fail(String.format("Unexpected error occurred upon commit. %s", ex));
val args = new DataFrameBuilder.Args(DataFrameTestHelpers.appendOrder(order), commitFrames::add, errorCallback, executorService());
try (DataFrameBuilder<TestLogItem> b = new DataFrameBuilder<>(dataLog, SERIALIZER, args)) {
for (int i = 0; i < records.size(); i++) {
try {
b.append(records.get(i));
} catch (IOException ex) {
failedIndices.add(i);
}
}
}
// Read all entries in the Log and interpret them as DataFrames, then verify the records can be reconstructed.
await(() -> commitFrames.size() >= order.size(), 20);
List<DataFrame.DataFrameEntryIterator> frames = dataLog.getAllEntries(readItem -> DataFrame.read(readItem.getPayload(), readItem.getLength(), readItem.getAddress()));
Assert.assertEquals("Unexpected number of frames generated.", commitFrames.size(), frames.size());
// Check the correctness of the commit callback.
AssertExtensions.assertGreaterThan("Not enough Data Frames were generated.", 1, commitFrames.size());
AssertExtensions.assertGreaterThan("Not enough LogItems were failed.", records.size() / failEvery, failedIndices.size());
DataFrameTestHelpers.checkReadRecords(frames, records, failedIndices, r -> new ByteArraySegment(r.getFullSerialization()));
}
}
use of io.pravega.common.util.ByteArraySegment in project pravega by pravega.
the class DataFrameBuilderTests method testFlush.
/**
* Tests the flush() method.
*/
@Test
public void testFlush() throws Exception {
// Append two records, make sure they are not flushed, close the Builder, then make sure they are flushed.
try (TestDurableDataLog dataLog = TestDurableDataLog.create(CONTAINER_ID, FRAME_SIZE, executorService())) {
dataLog.initialize(TIMEOUT);
ArrayList<TestLogItem> records = DataFrameTestHelpers.generateLogItems(2, SMALL_RECORD_MIN_SIZE, SMALL_RECORD_MAX_SIZE, 0);
List<DataFrameBuilder.CommitArgs> commitFrames = Collections.synchronizedList(new ArrayList<>());
BiConsumer<Throwable, DataFrameBuilder.CommitArgs> errorCallback = (ex, a) -> Assert.fail(String.format("Unexpected error occurred upon commit. %s", ex));
val args = new DataFrameBuilder.Args(Callbacks::doNothing, commitFrames::add, errorCallback, executorService());
@Cleanup DataFrameBuilder<TestLogItem> b = new DataFrameBuilder<>(dataLog, SERIALIZER, args);
for (TestLogItem item : records) {
b.append(item);
}
// Check the correctness of the commit callback.
Assert.assertEquals("A Data Frame was generated but none was expected yet.", 0, commitFrames.size());
// Invoke flush.
b.flush();
// Wait for all the frames commit callbacks to be invoked.
await(() -> commitFrames.size() >= 1, 20);
// Check the correctness of the commit callback (after closing the builder).
Assert.assertEquals("Exactly one Data Frame was expected so far.", 1, commitFrames.size());
// Read all entries in the Log and interpret them as DataFrames, then verify the records can be reconstructed.
val frames = dataLog.getAllEntries(readItem -> DataFrame.read(readItem.getPayload(), readItem.getLength(), readItem.getAddress()));
Assert.assertEquals("Unexpected number of frames generated.", commitFrames.size(), frames.size());
DataFrameTestHelpers.checkReadRecords(frames, records, r -> new ByteArraySegment(r.getFullSerialization()));
}
}
Aggregations