use of io.pravega.common.util.ArrayView in project pravega by pravega.
the class DurableLogTests method testRecoveryFailures.
/**
* Tests the DurableLog recovery process in a scenario when there are failures during the process
* (these may or may not be DataCorruptionExceptions).
*/
@Test
public void testRecoveryFailures() throws Exception {
int streamSegmentCount = 50;
int appendsPerStreamSegment = 20;
// Fail DataLog reads after X reads.
int failReadAfter = 2;
// Setup a DurableLog and start it.
AtomicReference<TestDurableDataLog> dataLog = new AtomicReference<>();
@Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()), dataLog::set);
@Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
storage.initialize(1);
HashSet<Long> streamSegmentIds;
List<OperationWithCompletion> completionFutures;
// First DurableLog. We use this for generating data.
UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
@Cleanup InMemoryCacheFactory cacheFactory = new InMemoryCacheFactory();
@Cleanup CacheManager cacheManager = new CacheManager(DEFAULT_READ_INDEX_CONFIG.getCachePolicy(), executorService());
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
// Generate some test data (we need to do this after we started the DurableLog because in the process of
// recovery, it wipes away all existing metadata).
streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, metadata, durableLog, storage);
List<Operation> operations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
// Process all generated operations and wait for them to complete
completionFutures = processOperations(operations, durableLog);
OperationWithCompletion.allOf(completionFutures).join();
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
// Recovery failure due to DataLog Failures.
metadata = new MetadataBuilder(CONTAINER_ID).build();
dataLog.set(null);
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
// Inject some artificial error into the DataLogRead after a few reads.
ErrorInjector<Exception> readNextInjector = new ErrorInjector<>(count -> count > failReadAfter, () -> new DataLogNotAvailableException("intentional"));
dataLog.get().setReadErrorInjectors(null, readNextInjector);
// Verify the exception thrown from startAsync() is of the right kind. This exception will be wrapped in
// multiple layers, so we need to dig deep into it.
AssertExtensions.assertThrows("Recovery did not fail properly when expecting DurableDataLogException.", () -> durableLog.startAsync().awaitRunning(), ex -> {
if (ex instanceof IllegalStateException) {
ex = ex.getCause();
}
if (ex == null) {
try {
// We need this to enter a FAILED state to get its failure cause.
durableLog.awaitTerminated();
} catch (Exception ex2) {
ex = durableLog.failureCause();
}
}
ex = Exceptions.unwrap(ex);
return ex instanceof DataLogNotAvailableException && ex.getMessage().equals("intentional");
});
}
// Recovery failure due to DataCorruptionException.
metadata = new MetadataBuilder(CONTAINER_ID).build();
dataLog.set(null);
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
// Reset error injectors to nothing.
dataLog.get().setReadErrorInjectors(null, null);
AtomicInteger readCounter = new AtomicInteger();
dataLog.get().setReadInterceptor(readItem -> {
if (readCounter.incrementAndGet() > failReadAfter && readItem.getLength() > DataFrame.MIN_ENTRY_LENGTH_NEEDED) {
// Mangle with the payload and overwrite its contents with a DataFrame having a bogus
// previous sequence number.
DataFrame df = DataFrame.ofSize(readItem.getLength());
df.seal();
ArrayView serialization = df.getData();
return new InjectedReadItem(serialization.getReader(), serialization.getLength(), readItem.getAddress());
}
return readItem;
});
// Verify the exception thrown from startAsync() is of the right kind. This exception will be wrapped in
// multiple layers, so we need to dig deep into it.
AssertExtensions.assertThrows("Recovery did not fail properly when expecting DataCorruptionException.", () -> durableLog.startAsync().awaitRunning(), ex -> {
if (ex instanceof IllegalStateException) {
ex = ex.getCause();
}
return Exceptions.unwrap(ex) instanceof DataCorruptionException;
});
// Verify that the underlying DurableDataLog has been disabled.
val disabledDataLog = dataLogFactory.createDurableDataLog(CONTAINER_ID);
AssertExtensions.assertThrows("DurableDataLog has not been disabled following a recovery failure with DataCorruptionException.", () -> disabledDataLog.initialize(TIMEOUT), ex -> ex instanceof DataLogDisabledException);
}
}
use of io.pravega.common.util.ArrayView in project pravega by pravega.
the class SegmentStateStore method put.
@Override
public CompletableFuture<Void> put(String segmentName, SegmentState state, Duration timeout) {
String stateSegment = StreamSegmentNameUtils.getStateSegmentName(segmentName);
TimeoutTimer timer = new TimeoutTimer(timeout);
ArrayView toWrite = serialize(state);
// delete the existing segment (if any), then create a new one and write the contents to it.
return this.storage.openWrite(stateSegment).thenComposeAsync(handle -> this.storage.delete(handle, timer.getRemaining()), this.executor).exceptionally(this::handleSegmentNotExistsException).thenComposeAsync(v -> this.storage.create(stateSegment, SegmentRollingPolicy.NO_ROLLING, timer.getRemaining()), this.executor).thenComposeAsync(v -> this.storage.openWrite(stateSegment), this.executor).thenComposeAsync(handle -> this.storage.write(handle, 0, toWrite.getReader(), toWrite.getLength(), timer.getRemaining()), this.executor);
}
use of io.pravega.common.util.ArrayView in project pravega by pravega.
the class ClientAdapterBase method append.
@Override
public CompletableFuture<Void> append(String streamName, Event event, Duration timeout) {
ensureRunning();
ArrayView s = event.getSerialization();
byte[] payload = s.arrayOffset() == 0 ? s.array() : Arrays.copyOfRange(s.array(), s.arrayOffset(), s.getLength());
String routingKey = Integer.toString(event.getRoutingKey());
String parentName = StreamSegmentNameUtils.getParentStreamSegmentName(streamName);
if (isTransaction(streamName, parentName)) {
// Dealing with a Transaction.
return CompletableFuture.runAsync(() -> {
try {
UUID txnId = getTransactionId(streamName);
getWriter(parentName, event.getRoutingKey()).getTxn(txnId).writeEvent(routingKey, payload);
} catch (Exception ex) {
this.transactionIds.remove(streamName);
throw new CompletionException(ex);
}
}, this.testExecutor);
} else {
try {
return getWriter(streamName, event.getRoutingKey()).writeEvent(routingKey, payload);
} catch (Exception ex) {
return Futures.failedFuture(ex);
}
}
}
use of io.pravega.common.util.ArrayView in project pravega by pravega.
the class BookKeeperAdapter method append.
@Override
public CompletableFuture<Void> append(String logName, Event event, Duration timeout) {
ensureRunning();
DurableDataLog log = this.logs.getOrDefault(logName, null);
if (log == null) {
return Futures.failedFuture(new StreamSegmentNotExistsException(logName));
}
ArrayView s = event.getSerialization();
return Futures.toVoid(log.append(s, timeout));
}
use of io.pravega.common.util.ArrayView in project pravega by pravega.
the class DataFrameOutputStreamTests method testBufferReuse.
/**
* Tests the ability to reuse existing physical buffers, and discard them if needed.
*/
@Test
public void testBufferReuse() throws Exception {
final int count = 500;
final int resetEvery = 50;
final byte[] writeData = new byte[1000];
final int maxFrameSize = 10 * 1024;
// Callback for when a frame is written.
AtomicReference<DataFrame> writtenFrame = new AtomicReference<>();
int expectedStartIndex = 0;
@Cleanup DataFrameOutputStream s = new DataFrameOutputStream(maxFrameSize, writtenFrame::set);
for (int i = 0; i < count; i++) {
if (i % resetEvery == 0) {
s.releaseBuffer();
expectedStartIndex = 0;
}
// We generate some frame of fixed size.
s.startNewRecord();
s.write(writeData);
s.endRecord();
s.flush();
// Then we inspect it's ArrayView's buffer characteristics, especially the array offset. If it increases as
// expect it to (and then resets when it exceeds a certain size), then we know the same physical buffer is
// reused.
ArrayView av = writtenFrame.getAndSet(null).getData();
Assert.assertEquals("Unexpected buffer index after flush #" + (i + 1), expectedStartIndex, av.arrayOffset());
expectedStartIndex += av.getLength();
if (maxFrameSize - expectedStartIndex < av.getLength()) {
expectedStartIndex = 0;
}
}
}
Aggregations