Search in sources :

Example 6 with ByteBufferOutputStream

use of io.pravega.common.io.ByteBufferOutputStream in project pravega by pravega.

the class CompositeByteArraySegmentTests method testSliceRead.

/**
 * Tests the {@link CompositeByteArraySegment#slice} method while reading indirectly by invoking
 * {@link CompositeByteArraySegment#getReader(int, int)}.
 */
@Test
public void testSliceRead() {
    testProgressiveCopies((expectedData, s, offset, length) -> {
        @Cleanup val targetStream = new ByteBufferOutputStream(s.getLength());
        s.copyTo(targetStream);
        val targetData = targetStream.getData().getCopy();
        for (int sliceOffset = 0; sliceOffset <= s.getLength() / 2; sliceOffset++) {
            val sliceLength = s.getLength() - 2 * sliceOffset;
            InputStream reader = s.getReader(sliceOffset, sliceLength);
            if (sliceLength == 0) {
                Assert.assertEquals("Unexpected data read for empty slice.", -1, reader.read());
            } else {
                val actualData = StreamHelpers.readAll(reader, sliceLength);
                AssertExtensions.assertArrayEquals("Unexpected data sliced for step " + offset, targetData, sliceOffset, actualData, 0, actualData.length);
            }
        }
    });
}
Also used : lombok.val(lombok.val) ByteBufferOutputStream(io.pravega.common.io.ByteBufferOutputStream) InputStream(java.io.InputStream) Cleanup(lombok.Cleanup) Test(org.junit.Test)

Example 7 with ByteBufferOutputStream

use of io.pravega.common.io.ByteBufferOutputStream in project pravega by pravega.

the class SegmentAggregatorTests method testProgressiveReconcile.

/**
 * Tests the ability of the SegmentAggregator to reconcile operations as they are added to it (it detected a possible
 * data corruption, but it does not yet have all the operations it needs to reconcile - it needs to stay in reconciliation
 * mode until all disagreements have been resolved).
 */
@Test
public void testProgressiveReconcile() throws Exception {
    final WriterConfig config = DEFAULT_CONFIG;
    final int appendCount = 1000;
    final int failEvery = 3;
    final int maxFlushLoopCount = 5;
    @Cleanup TestContext context = new TestContext(config);
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    context.segmentAggregator.initialize(TIMEOUT).join();
    @Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
    ArrayList<StorageOperation> appendOperations = new ArrayList<>();
    ArrayList<InputStream> appendData = new ArrayList<>();
    for (int i = 0; i < appendCount; i++) {
        // Add another operation and record its length.
        StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
        appendOperations.add(appendOp);
        val adStream = new ByteBufferOutputStream((int) appendOp.getLength());
        getAppendData(appendOp, adStream, context);
        appendData.add(adStream.getData().getReader());
        writtenData.write(adStream.getData().getCopy());
    }
    // Add each operation at at time, and every X appends, write ahead to storage (X-1 appends). This will force a
    // good mix of reconciles and normal appends.
    int errorCount = 0;
    int flushCount = 0;
    for (int i = 0; i < appendOperations.size(); i++) {
        StorageOperation op = appendOperations.get(i);
        context.segmentAggregator.add(op);
        if (i % failEvery == 0) {
            // Corrupt the storage by adding the next failEvery-1 ops to Storage.
            for (int j = i; j < i + failEvery - 1 && j < appendOperations.size(); j++) {
                long offset = context.storage.getStreamSegmentInfo(SEGMENT_NAME, TIMEOUT).join().getLength();
                context.storage.write(writeHandle(SEGMENT_NAME), offset, appendData.get(j), appendData.get(j).available(), TIMEOUT).join();
            }
        }
        // Force a flush by incrementing the time by a lot.
        context.increaseTime(config.getFlushThresholdTime().toMillis() + 1);
        int flushLoopCount = 0;
        while (context.segmentAggregator.mustFlush()) {
            try {
                flushCount++;
                context.segmentAggregator.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
            } catch (Exception ex) {
                errorCount++;
                Assert.assertTrue("", Exceptions.unwrap(ex) instanceof BadOffsetException);
            }
            flushLoopCount++;
            AssertExtensions.assertLessThan("Too many flush-loops for a single attempt.", maxFlushLoopCount, flushLoopCount);
        }
    }
    AssertExtensions.assertGreaterThan("At least one flush was expected.", 0, flushCount);
    AssertExtensions.assertGreaterThan("At least one BadOffsetException was expected.", 0, errorCount);
    // Verify data.
    byte[] expectedData = writtenData.toByteArray();
    byte[] actualData = new byte[expectedData.length];
    long storageLength = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join().getLength();
    Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageLength);
    context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join();
    Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}
Also used : lombok.val(lombok.val) ByteArrayInputStream(java.io.ByteArrayInputStream) InputStream(java.io.InputStream) ArrayList(java.util.ArrayList) ByteArrayOutputStream(java.io.ByteArrayOutputStream) Cleanup(lombok.Cleanup) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) IntentionalException(io.pravega.test.common.IntentionalException) IOException(java.io.IOException) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) ByteBufferOutputStream(io.pravega.common.io.ByteBufferOutputStream) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) Test(org.junit.Test)

Example 8 with ByteBufferOutputStream

use of io.pravega.common.io.ByteBufferOutputStream in project pravega by pravega.

the class TagRecord method compressArrayOption.

private static ByteArraySegment compressArrayOption(final Set<String> tags) throws IOException {
    ByteBufferOutputStream baos = new ByteBufferOutputStream();
    DataOutputStream dout = new DataOutputStream(new DeflaterOutputStream(baos));
    for (String t : tags) {
        dout.writeUTF(t);
    }
    dout.flush();
    dout.close();
    return baos.getData();
}
Also used : ByteBufferOutputStream(io.pravega.common.io.ByteBufferOutputStream) DataOutputStream(java.io.DataOutputStream) DeflaterOutputStream(java.util.zip.DeflaterOutputStream)

Example 9 with ByteBufferOutputStream

use of io.pravega.common.io.ByteBufferOutputStream in project pravega by pravega.

the class WireCommandsTest method testTableIterators.

private <T extends WireCommand> void testTableIterators(Function<WireCommands.TableIteratorArgs, T> createWireCommand) throws IOException {
    // Continuation Token.
    ByteBuf buf2 = buf.copy().setInt(0, Integer.MAX_VALUE);
    WireCommands.TableIteratorArgs args = new WireCommands.TableIteratorArgs(buf, Unpooled.EMPTY_BUFFER, Unpooled.EMPTY_BUFFER, Unpooled.EMPTY_BUFFER);
    T cmd = createWireCommand.apply(args);
    testCommand(cmd);
    // From/To.
    args = new WireCommands.TableIteratorArgs(Unpooled.EMPTY_BUFFER, Unpooled.EMPTY_BUFFER, buf, buf2);
    cmd = createWireCommand.apply(args);
    testCommand(cmd);
    // Test that we are able to read fields from an older version.
    ByteBuf buf3 = buf.copy().setInt(0, Integer.MAX_VALUE - 1);
    args = new WireCommands.TableIteratorArgs(buf, Unpooled.EMPTY_BUFFER, buf2, buf3);
    cmd = createWireCommand.apply(args);
    ByteBufferOutputStream bout = new ByteBufferOutputStream();
    cmd.writeFields(new DataOutputStream(bout));
    T cmd2 = createWireCommand.apply(new WireCommands.TableIteratorArgs(buf, Unpooled.EMPTY_BUFFER, Unpooled.EMPTY_BUFFER, Unpooled.EMPTY_BUFFER));
    testCommandFromByteArray(bout.getData().slice(0, bout.size() - 2 * Integer.BYTES - buf2.readableBytes() - buf3.readableBytes()).getCopy(), cmd2);
}
Also used : ByteBufferOutputStream(io.pravega.common.io.ByteBufferOutputStream) DataOutputStream(java.io.DataOutputStream) ByteBuf(io.netty.buffer.ByteBuf)

Example 10 with ByteBufferOutputStream

use of io.pravega.common.io.ByteBufferOutputStream in project pravega by pravega.

the class HDFSStorageTest method testZombieFencing.

// region Fencing tests
/**
 * A special test case of fencing to verify the behavior of HDFSStorage in the presence of an instance that has
 * been fenced out. This case verifies that any ongoing writes properly fail upon fencing. Specifically, we have a
 * fenced-out instance that keeps writing and we verify that the write fails once the ownership changes.
 * The HDFS behavior is such in this case is that ongoing writes that execute before the rename
 * complete successfully.
 */
@Test(timeout = 60000)
public void testZombieFencing() throws Exception {
    final long epochCount = 30;
    final int writeSize = 1000;
    final String segmentName = "Segment";
    @Cleanup val writtenData = new ByteBufferOutputStream();
    final Random rnd = new Random(0);
    int currentEpoch = 1;
    // Create initial adapter.
    val currentStorage = new AtomicReference<Storage>();
    currentStorage.set(createStorage());
    currentStorage.get().initialize(currentEpoch);
    // Create the Segment and open it for the first time.
    val currentHandle = new AtomicReference<SegmentHandle>(currentStorage.get().create(segmentName, TIMEOUT).thenCompose(v -> currentStorage.get().openWrite(segmentName)).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS));
    // Run a number of epochs.
    while (currentEpoch <= epochCount) {
        val oldStorage = currentStorage.get();
        val handle = currentHandle.get();
        val writeBuffer = new byte[writeSize];
        val appends = Futures.loop(() -> true, () -> {
            rnd.nextBytes(writeBuffer);
            return oldStorage.write(handle, writtenData.size(), new ByteArrayInputStream(writeBuffer), writeBuffer.length, TIMEOUT).thenRun(() -> writtenData.write(writeBuffer));
        }, executorService());
        // Create a new Storage adapter with a new epoch and open-write the Segment, remembering its handle.
        val newStorage = createStorage();
        try {
            newStorage.initialize(++currentEpoch);
            currentHandle.set(newStorage.openWrite(segmentName).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS));
        } catch (Exception ex) {
            newStorage.close();
            throw ex;
        }
        currentStorage.set(newStorage);
        try {
            appends.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
            Assert.fail("Continuous appends on older epoch Adapter did not fail.");
        } catch (Exception ex) {
            val cause = Exceptions.unwrap(ex);
            if (!(cause instanceof StorageNotPrimaryException || cause instanceof StreamSegmentSealedException || cause instanceof StreamSegmentNotExistsException)) {
                // We only expect the appends to fail because they were fenced out or the Segment was sealed.
                Assert.fail("Unexpected exception " + cause);
            }
        } finally {
            oldStorage.close();
        }
    }
    byte[] expectedData = writtenData.getData().getCopy();
    byte[] readData = new byte[expectedData.length];
    @Cleanup val readStorage = createStorage();
    readStorage.initialize(++currentEpoch);
    int bytesRead = readStorage.openRead(segmentName).thenCompose(handle -> readStorage.read(handle, 0, readData, 0, readData.length, TIMEOUT)).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    Assert.assertEquals("Unexpected number of bytes read.", readData.length, bytesRead);
    Assert.assertArrayEquals("Unexpected data read back.", expectedData, readData);
}
Also used : lombok.val(lombok.val) Storage(io.pravega.segmentstore.storage.Storage) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Progressable(org.apache.hadoop.util.Progressable) ByteArrayOutputStream(java.io.ByteArrayOutputStream) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) SneakyThrows(lombok.SneakyThrows) AssertExtensions(io.pravega.test.common.AssertExtensions) FileSystem(org.apache.hadoop.fs.FileSystem) AclException(org.apache.hadoop.hdfs.protocol.AclException) Exceptions(io.pravega.common.Exceptions) StorageNotPrimaryException(io.pravega.segmentstore.storage.StorageNotPrimaryException) AsyncStorageWrapper(io.pravega.segmentstore.storage.AsyncStorageWrapper) Cleanup(lombok.Cleanup) Random(java.util.Random) AtomicReference(java.util.concurrent.atomic.AtomicReference) FsAction(org.apache.hadoop.fs.permission.FsAction) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) SegmentHandle(io.pravega.segmentstore.storage.SegmentHandle) ByteArrayInputStream(java.io.ByteArrayInputStream) StorageTestBase(io.pravega.segmentstore.storage.StorageTestBase) Configuration(org.apache.hadoop.conf.Configuration) After(org.junit.After) Timeout(org.junit.rules.Timeout) Path(org.apache.hadoop.fs.Path) RollingStorageTestBase(io.pravega.segmentstore.storage.rolling.RollingStorageTestBase) Before(org.junit.Before) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Files(java.nio.file.Files) lombok.val(lombok.val) IOException(java.io.IOException) Test(org.junit.Test) FileHelpers(io.pravega.common.io.FileHelpers) File(java.io.File) ByteBufferOutputStream(io.pravega.common.io.ByteBufferOutputStream) TimeUnit(java.util.concurrent.TimeUnit) Rule(org.junit.Rule) Assert(org.junit.Assert) Futures(io.pravega.common.concurrent.Futures) TemporaryFolder(org.junit.rules.TemporaryFolder) AtomicReference(java.util.concurrent.atomic.AtomicReference) Cleanup(lombok.Cleanup) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) AclException(org.apache.hadoop.hdfs.protocol.AclException) StorageNotPrimaryException(io.pravega.segmentstore.storage.StorageNotPrimaryException) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) IOException(java.io.IOException) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) Random(java.util.Random) ByteBufferOutputStream(io.pravega.common.io.ByteBufferOutputStream) ByteArrayInputStream(java.io.ByteArrayInputStream) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) StorageNotPrimaryException(io.pravega.segmentstore.storage.StorageNotPrimaryException) Test(org.junit.Test)

Aggregations

ByteBufferOutputStream (io.pravega.common.io.ByteBufferOutputStream)19 lombok.val (lombok.val)15 Cleanup (lombok.Cleanup)13 Test (org.junit.Test)9 ByteArrayInputStream (java.io.ByteArrayInputStream)3 DataOutputStream (java.io.DataOutputStream)3 IOException (java.io.IOException)3 InputStream (java.io.InputStream)3 ArrayList (java.util.ArrayList)3 SneakyThrows (lombok.SneakyThrows)3 StreamSegmentNotExistsException (io.pravega.segmentstore.contracts.StreamSegmentNotExistsException)2 AssertExtensions (io.pravega.test.common.AssertExtensions)2 ByteArrayOutputStream (java.io.ByteArrayOutputStream)2 DataInputStream (java.io.DataInputStream)2 Random (java.util.Random)2 Assert (org.junit.Assert)2 Charsets (com.google.common.base.Charsets)1 ByteBuf (io.netty.buffer.ByteBuf)1 Exceptions (io.pravega.common.Exceptions)1 Futures (io.pravega.common.concurrent.Futures)1