Search in sources :

Example 11 with BadOffsetException

use of io.pravega.segmentstore.contracts.BadOffsetException in project pravega by pravega.

the class SegmentAggregatorTests method testReconcileAppends.

// endregion
// region Unknown outcome operation reconciliation
/**
 * Tests the ability of the SegmentAggregator to reconcile AppendOperations (Cached/NonCached).
 */
@Test
public void testReconcileAppends() throws Exception {
    final WriterConfig config = DEFAULT_CONFIG;
    final int appendCount = 1000;
    final int failEvery = 3;
    @Cleanup TestContext context = new TestContext(config);
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    context.segmentAggregator.initialize(TIMEOUT).join();
    // The writes always succeed, but every few times we return some random error, indicating that they didn't.
    AtomicInteger writeCount = new AtomicInteger();
    AtomicReference<Exception> setException = new AtomicReference<>();
    context.storage.setWriteInterceptor((segmentName, offset, data, length, storage) -> {
        if (writeCount.incrementAndGet() % failEvery == 0) {
            // Time to wreak some havoc.
            return storage.write(writeHandle(segmentName), offset, data, length, TIMEOUT).thenAccept(v -> {
                IntentionalException ex = new IntentionalException(String.format("S=%s,O=%d,L=%d", segmentName, offset, length));
                setException.set(ex);
                throw ex;
            });
        } else {
            setException.set(null);
            return null;
        }
    });
    @Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
    for (int i = 0; i < appendCount; i++) {
        // Add another operation and record its length.
        StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
        context.segmentAggregator.add(appendOp);
        getAppendData(appendOp, writtenData, context);
    }
    // Force a flush by incrementing the time by a lot.
    context.increaseTime(config.getFlushThresholdTime().toMillis() + 1);
    while (context.segmentAggregator.mustFlush()) {
        // Call flush() and inspect the result.
        FlushResult flushResult = null;
        try {
            flushResult = context.segmentAggregator.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
            Assert.assertNull("An exception was expected, but none was thrown.", setException.get());
            Assert.assertNotNull("No FlushResult provided.", flushResult);
        } catch (Exception ex) {
            if (setException.get() != null) {
                Assert.assertEquals("Unexpected exception thrown.", setException.get(), Exceptions.unwrap(ex));
            } else {
                // Only expecting a BadOffsetException after our own injected exception.
                Throwable realEx = Exceptions.unwrap(ex);
                Assert.assertTrue("Unexpected exception thrown: " + realEx, realEx instanceof BadOffsetException);
            }
        }
        // Check flush result.
        if (flushResult != null) {
            AssertExtensions.assertGreaterThan("Not enough bytes were flushed (time threshold).", 0, flushResult.getFlushedBytes());
            Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes());
        }
        // Force a flush by incrementing the time by a lot.
        context.increaseTime(config.getFlushThresholdTime().toMillis() + 1);
    }
    // Verify data.
    byte[] expectedData = writtenData.toByteArray();
    byte[] actualData = new byte[expectedData.length];
    long storageLength = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join().getLength();
    Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageLength);
    context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join();
    Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}
Also used : AtomicReference(java.util.concurrent.atomic.AtomicReference) ByteArrayOutputStream(java.io.ByteArrayOutputStream) FixedByteArrayOutputStream(io.pravega.common.io.FixedByteArrayOutputStream) Cleanup(lombok.Cleanup) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) IntentionalException(io.pravega.test.common.IntentionalException) IOException(java.io.IOException) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) IntentionalException(io.pravega.test.common.IntentionalException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) Test(org.junit.Test)

Example 12 with BadOffsetException

use of io.pravega.segmentstore.contracts.BadOffsetException in project pravega by pravega.

the class SegmentAggregatorTests method testProgressiveReconcile.

/**
 * Tests the ability of the SegmentAggregator to reconcile operations as they are added to it (it detected a possible
 * data corruption, but it does not yet have all the operations it needs to reconcile - it needs to stay in reconciliation
 * mode until all disagreements have been resolved).
 */
@Test
public void testProgressiveReconcile() throws Exception {
    final WriterConfig config = DEFAULT_CONFIG;
    final int appendCount = 1000;
    final int failEvery = 3;
    final int maxFlushLoopCount = 5;
    @Cleanup TestContext context = new TestContext(config);
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    context.segmentAggregator.initialize(TIMEOUT).join();
    @Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
    ArrayList<StorageOperation> appendOperations = new ArrayList<>();
    ArrayList<InputStream> appendData = new ArrayList<>();
    for (int i = 0; i < appendCount; i++) {
        // Add another operation and record its length.
        StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
        appendOperations.add(appendOp);
        byte[] ad = new byte[(int) appendOp.getLength()];
        getAppendData(appendOp, new FixedByteArrayOutputStream(ad, 0, ad.length), context);
        appendData.add(new ByteArrayInputStream(ad));
        writtenData.write(ad);
    }
    // Add each operation at at time, and every X appends, write ahead to storage (X-1 appends). This will force a
    // good mix of reconciles and normal appends.
    int errorCount = 0;
    int flushCount = 0;
    for (int i = 0; i < appendOperations.size(); i++) {
        StorageOperation op = appendOperations.get(i);
        context.segmentAggregator.add(op);
        if (i % failEvery == 0) {
            // Corrupt the storage by adding the next failEvery-1 ops to Storage.
            for (int j = i; j < i + failEvery - 1 && j < appendOperations.size(); j++) {
                long offset = context.storage.getStreamSegmentInfo(SEGMENT_NAME, TIMEOUT).join().getLength();
                context.storage.write(writeHandle(SEGMENT_NAME), offset, appendData.get(j), appendData.get(j).available(), TIMEOUT).join();
            }
        }
        // Force a flush by incrementing the time by a lot.
        context.increaseTime(config.getFlushThresholdTime().toMillis() + 1);
        int flushLoopCount = 0;
        while (context.segmentAggregator.mustFlush()) {
            try {
                flushCount++;
                context.segmentAggregator.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
            } catch (Exception ex) {
                errorCount++;
                Assert.assertTrue("", Exceptions.unwrap(ex) instanceof BadOffsetException);
            }
            flushLoopCount++;
            AssertExtensions.assertLessThan("Too many flush-loops for a single attempt.", maxFlushLoopCount, flushLoopCount);
        }
    }
    AssertExtensions.assertGreaterThan("At least one flush was expected.", 0, flushCount);
    AssertExtensions.assertGreaterThan("At least one BadOffsetException was expected.", 0, errorCount);
    // Verify data.
    byte[] expectedData = writtenData.toByteArray();
    byte[] actualData = new byte[expectedData.length];
    long storageLength = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join().getLength();
    Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageLength);
    context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join();
    Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}
Also used : FixedByteArrayOutputStream(io.pravega.common.io.FixedByteArrayOutputStream) ByteArrayInputStream(java.io.ByteArrayInputStream) InputStream(java.io.InputStream) ArrayList(java.util.ArrayList) ByteArrayOutputStream(java.io.ByteArrayOutputStream) FixedByteArrayOutputStream(io.pravega.common.io.FixedByteArrayOutputStream) Cleanup(lombok.Cleanup) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) IntentionalException(io.pravega.test.common.IntentionalException) IOException(java.io.IOException) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) ByteArrayInputStream(java.io.ByteArrayInputStream) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) Test(org.junit.Test)

Example 13 with BadOffsetException

use of io.pravega.segmentstore.contracts.BadOffsetException in project pravega by pravega.

the class ExtendedS3Storage method doWrite.

private Void doWrite(SegmentHandle handle, long offset, InputStream data, int length) throws StreamSegmentException {
    Preconditions.checkArgument(!handle.isReadOnly(), "handle must not be read-only.");
    long traceId = LoggerHelpers.traceEnter(log, "write", handle.getSegmentName(), offset, length);
    SegmentProperties si = doGetStreamSegmentInfo(handle.getSegmentName());
    if (si.isSealed()) {
        throw new StreamSegmentSealedException(handle.getSegmentName());
    }
    if (si.getLength() != offset) {
        throw new BadOffsetException(handle.getSegmentName(), si.getLength(), offset);
    }
    client.putObject(this.config.getBucket(), this.config.getRoot() + handle.getSegmentName(), Range.fromOffsetLength(offset, length), data);
    LoggerHelpers.traceLeave(log, "write", traceId);
    return null;
}
Also used : StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties)

Example 14 with BadOffsetException

use of io.pravega.segmentstore.contracts.BadOffsetException in project pravega by pravega.

the class AppendProcessorTest method testConditionalAppendFailure.

@Test
public void testConditionalAppendFailure() {
    String streamSegmentName = "testConditionalAppendFailure";
    UUID clientId = UUID.randomUUID();
    byte[] data = new byte[] { 1, 2, 3, 4, 6, 7, 8, 9 };
    StreamSegmentStore store = mock(StreamSegmentStore.class);
    ServerConnection connection = mock(ServerConnection.class);
    AppendProcessor processor = new AppendProcessor(store, connection, new FailingRequestProcessor(), null);
    setupGetStreamSegmentInfo(streamSegmentName, clientId, store);
    CompletableFuture<Void> result = CompletableFuture.completedFuture(null);
    when(store.append(streamSegmentName, data, updateEventNumber(clientId, 1), AppendProcessor.TIMEOUT)).thenReturn(result);
    processor.setupAppend(new SetupAppend(1, clientId, streamSegmentName, ""));
    processor.append(new Append(streamSegmentName, clientId, 1, Unpooled.wrappedBuffer(data), null));
    result = Futures.failedFuture(new BadOffsetException(streamSegmentName, data.length, 0));
    when(store.append(streamSegmentName, 0, data, updateEventNumber(clientId, 2, 1, 1), AppendProcessor.TIMEOUT)).thenReturn(result);
    processor.append(new Append(streamSegmentName, clientId, 2, Unpooled.wrappedBuffer(data), 0L));
    verify(store).getStreamSegmentInfo(anyString(), eq(true), eq(AppendProcessor.TIMEOUT));
    verify(store).append(streamSegmentName, data, updateEventNumber(clientId, 1), AppendProcessor.TIMEOUT);
    verify(store).append(streamSegmentName, 0L, data, updateEventNumber(clientId, 2, 1, 1), AppendProcessor.TIMEOUT);
    verify(connection).send(new AppendSetup(1, streamSegmentName, clientId, 0));
    verify(connection, atLeast(0)).resumeReading();
    verify(connection).send(new DataAppended(clientId, 1, 0));
    verify(connection).send(new ConditionalCheckFailed(clientId, 2));
    verifyNoMoreInteractions(connection);
    verifyNoMoreInteractions(store);
}
Also used : FailingRequestProcessor(io.pravega.shared.protocol.netty.FailingRequestProcessor) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) AppendSetup(io.pravega.shared.protocol.netty.WireCommands.AppendSetup) ConditionalCheckFailed(io.pravega.shared.protocol.netty.WireCommands.ConditionalCheckFailed) StreamSegmentStore(io.pravega.segmentstore.contracts.StreamSegmentStore) Append(io.pravega.shared.protocol.netty.Append) SetupAppend(io.pravega.shared.protocol.netty.WireCommands.SetupAppend) DataAppended(io.pravega.shared.protocol.netty.WireCommands.DataAppended) SetupAppend(io.pravega.shared.protocol.netty.WireCommands.SetupAppend) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) UUID(java.util.UUID) Test(org.junit.Test)

Example 15 with BadOffsetException

use of io.pravega.segmentstore.contracts.BadOffsetException in project pravega by pravega.

the class WriteOperation method run.

@Override
public void run() throws BadOffsetException, IOException, StorageNotPrimaryException {
    HDFSSegmentHandle handle = getTarget();
    long traceId = LoggerHelpers.traceEnter(log, "write", handle, this.offset, this.length);
    FileDescriptor lastFile = handle.getLastFile();
    Timer timer = new Timer();
    try (FSDataOutputStream stream = this.context.fileSystem.append(lastFile.getPath())) {
        if (this.offset != lastFile.getLastOffset()) {
            // before we throw BadOffsetException.
            throw new BadOffsetException(handle.getSegmentName(), lastFile.getLastOffset(), this.offset);
        } else if (stream.getPos() != lastFile.getLength()) {
            // Looks like the filesystem changed from underneath us. This could be our bug, but it could be something else.
            // Update our knowledge of the filesystem and throw a BadOffsetException - this should cause upstream code
            // to try to reconcile; if it can't then the upstream code should shut down or take other appropriate measures.
            log.warn("File changed detected for '{}'. Expected length = {}, actual length = {}.", lastFile, lastFile.getLength(), stream.getPos());
            lastFile.setLength(stream.getPos());
            throw new BadOffsetException(handle.getSegmentName(), lastFile.getLastOffset(), this.offset);
        }
        if (this.length == 0) {
            // Note: IOUtils.copyBytes with length == 0 will enter an infinite loop, hence the need for this check.
            return;
        }
        // We need to be very careful with IOUtils.copyBytes. There are many overloads with very similar signatures.
        // There is a difference between (InputStream, OutputStream, int, boolean) and (InputStream, OutputStream, long, boolean),
        // in that the one with "int" uses the third arg as a buffer size, and the one with "long" uses it as the number
        // of bytes to copy.
        IOUtils.copyBytes(this.data, stream, (long) this.length, false);
        stream.flush();
        lastFile.increaseLength(this.length);
    } catch (FileNotFoundException | AclException ex) {
        checkForFenceOut(handle.getSegmentName(), handle.getFiles().size(), handle.getLastFile());
        // If we were not fenced out, then this is a legitimate exception - rethrow it.
        throw ex;
    }
    HDFSMetrics.WRITE_LATENCY.reportSuccessEvent(timer.getElapsed());
    HDFSMetrics.WRITE_BYTES.add(this.length);
    LoggerHelpers.traceLeave(log, "write", traceId, handle, offset, length);
}
Also used : Timer(io.pravega.common.Timer) FileNotFoundException(java.io.FileNotFoundException) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) AclException(org.apache.hadoop.hdfs.protocol.AclException)

Aggregations

BadOffsetException (io.pravega.segmentstore.contracts.BadOffsetException)21 Test (org.junit.Test)12 StreamSegmentNotExistsException (io.pravega.segmentstore.contracts.StreamSegmentNotExistsException)8 ByteArrayInputStream (java.io.ByteArrayInputStream)7 lombok.val (lombok.val)6 StreamSegmentSealedException (io.pravega.segmentstore.contracts.StreamSegmentSealedException)5 StorageOperation (io.pravega.segmentstore.server.logs.operations.StorageOperation)5 Storage (io.pravega.segmentstore.storage.Storage)5 DataCorruptionException (io.pravega.segmentstore.server.DataCorruptionException)4 StreamSegmentAppendOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation)4 StreamSegmentTruncateOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentTruncateOperation)4 Cleanup (lombok.Cleanup)4 SegmentProperties (io.pravega.segmentstore.contracts.SegmentProperties)3 MergeTransactionOperation (io.pravega.segmentstore.server.logs.operations.MergeTransactionOperation)3 Operation (io.pravega.segmentstore.server.logs.operations.Operation)3 StreamSegmentSealOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation)3 SegmentHandle (io.pravega.segmentstore.storage.SegmentHandle)3 ByteArrayOutputStream (java.io.ByteArrayOutputStream)3 InputStream (java.io.InputStream)3 CompletableFuture (java.util.concurrent.CompletableFuture)3