use of io.pravega.common.io.ByteBufferOutputStream in project pravega by pravega.
the class CompositeByteArraySegmentTests method testSliceRead.
/**
* Tests the {@link CompositeByteArraySegment#slice} method while reading indirectly by invoking
* {@link CompositeByteArraySegment#getReader(int, int)}.
*/
@Test
public void testSliceRead() {
testProgressiveCopies((expectedData, s, offset, length) -> {
@Cleanup val targetStream = new ByteBufferOutputStream(s.getLength());
s.copyTo(targetStream);
val targetData = targetStream.getData().getCopy();
for (int sliceOffset = 0; sliceOffset <= s.getLength() / 2; sliceOffset++) {
val sliceLength = s.getLength() - 2 * sliceOffset;
InputStream reader = s.getReader(sliceOffset, sliceLength);
if (sliceLength == 0) {
Assert.assertEquals("Unexpected data read for empty slice.", -1, reader.read());
} else {
val actualData = StreamHelpers.readAll(reader, sliceLength);
AssertExtensions.assertArrayEquals("Unexpected data sliced for step " + offset, targetData, sliceOffset, actualData, 0, actualData.length);
}
}
});
}
use of io.pravega.common.io.ByteBufferOutputStream in project pravega by pravega.
the class SegmentAggregatorTests method testProgressiveReconcile.
/**
* Tests the ability of the SegmentAggregator to reconcile operations as they are added to it (it detected a possible
* data corruption, but it does not yet have all the operations it needs to reconcile - it needs to stay in reconciliation
* mode until all disagreements have been resolved).
*/
@Test
public void testProgressiveReconcile() throws Exception {
final WriterConfig config = DEFAULT_CONFIG;
final int appendCount = 1000;
final int failEvery = 3;
final int maxFlushLoopCount = 5;
@Cleanup TestContext context = new TestContext(config);
context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
context.segmentAggregator.initialize(TIMEOUT).join();
@Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
ArrayList<StorageOperation> appendOperations = new ArrayList<>();
ArrayList<InputStream> appendData = new ArrayList<>();
for (int i = 0; i < appendCount; i++) {
// Add another operation and record its length.
StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
appendOperations.add(appendOp);
val adStream = new ByteBufferOutputStream((int) appendOp.getLength());
getAppendData(appendOp, adStream, context);
appendData.add(adStream.getData().getReader());
writtenData.write(adStream.getData().getCopy());
}
// Add each operation at at time, and every X appends, write ahead to storage (X-1 appends). This will force a
// good mix of reconciles and normal appends.
int errorCount = 0;
int flushCount = 0;
for (int i = 0; i < appendOperations.size(); i++) {
StorageOperation op = appendOperations.get(i);
context.segmentAggregator.add(op);
if (i % failEvery == 0) {
// Corrupt the storage by adding the next failEvery-1 ops to Storage.
for (int j = i; j < i + failEvery - 1 && j < appendOperations.size(); j++) {
long offset = context.storage.getStreamSegmentInfo(SEGMENT_NAME, TIMEOUT).join().getLength();
context.storage.write(writeHandle(SEGMENT_NAME), offset, appendData.get(j), appendData.get(j).available(), TIMEOUT).join();
}
}
// Force a flush by incrementing the time by a lot.
context.increaseTime(config.getFlushThresholdTime().toMillis() + 1);
int flushLoopCount = 0;
while (context.segmentAggregator.mustFlush()) {
try {
flushCount++;
context.segmentAggregator.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
} catch (Exception ex) {
errorCount++;
Assert.assertTrue("", Exceptions.unwrap(ex) instanceof BadOffsetException);
}
flushLoopCount++;
AssertExtensions.assertLessThan("Too many flush-loops for a single attempt.", maxFlushLoopCount, flushLoopCount);
}
}
AssertExtensions.assertGreaterThan("At least one flush was expected.", 0, flushCount);
AssertExtensions.assertGreaterThan("At least one BadOffsetException was expected.", 0, errorCount);
// Verify data.
byte[] expectedData = writtenData.toByteArray();
byte[] actualData = new byte[expectedData.length];
long storageLength = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join().getLength();
Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageLength);
context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join();
Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}
use of io.pravega.common.io.ByteBufferOutputStream in project pravega by pravega.
the class TagRecord method compressArrayOption.
private static ByteArraySegment compressArrayOption(final Set<String> tags) throws IOException {
ByteBufferOutputStream baos = new ByteBufferOutputStream();
DataOutputStream dout = new DataOutputStream(new DeflaterOutputStream(baos));
for (String t : tags) {
dout.writeUTF(t);
}
dout.flush();
dout.close();
return baos.getData();
}
use of io.pravega.common.io.ByteBufferOutputStream in project pravega by pravega.
the class WireCommandsTest method testTableIterators.
private <T extends WireCommand> void testTableIterators(Function<WireCommands.TableIteratorArgs, T> createWireCommand) throws IOException {
// Continuation Token.
ByteBuf buf2 = buf.copy().setInt(0, Integer.MAX_VALUE);
WireCommands.TableIteratorArgs args = new WireCommands.TableIteratorArgs(buf, Unpooled.EMPTY_BUFFER, Unpooled.EMPTY_BUFFER, Unpooled.EMPTY_BUFFER);
T cmd = createWireCommand.apply(args);
testCommand(cmd);
// From/To.
args = new WireCommands.TableIteratorArgs(Unpooled.EMPTY_BUFFER, Unpooled.EMPTY_BUFFER, buf, buf2);
cmd = createWireCommand.apply(args);
testCommand(cmd);
// Test that we are able to read fields from an older version.
ByteBuf buf3 = buf.copy().setInt(0, Integer.MAX_VALUE - 1);
args = new WireCommands.TableIteratorArgs(buf, Unpooled.EMPTY_BUFFER, buf2, buf3);
cmd = createWireCommand.apply(args);
ByteBufferOutputStream bout = new ByteBufferOutputStream();
cmd.writeFields(new DataOutputStream(bout));
T cmd2 = createWireCommand.apply(new WireCommands.TableIteratorArgs(buf, Unpooled.EMPTY_BUFFER, Unpooled.EMPTY_BUFFER, Unpooled.EMPTY_BUFFER));
testCommandFromByteArray(bout.getData().slice(0, bout.size() - 2 * Integer.BYTES - buf2.readableBytes() - buf3.readableBytes()).getCopy(), cmd2);
}
use of io.pravega.common.io.ByteBufferOutputStream in project pravega by pravega.
the class HDFSStorageTest method testZombieFencing.
// region Fencing tests
/**
* A special test case of fencing to verify the behavior of HDFSStorage in the presence of an instance that has
* been fenced out. This case verifies that any ongoing writes properly fail upon fencing. Specifically, we have a
* fenced-out instance that keeps writing and we verify that the write fails once the ownership changes.
* The HDFS behavior is such in this case is that ongoing writes that execute before the rename
* complete successfully.
*/
@Test(timeout = 60000)
public void testZombieFencing() throws Exception {
final long epochCount = 30;
final int writeSize = 1000;
final String segmentName = "Segment";
@Cleanup val writtenData = new ByteBufferOutputStream();
final Random rnd = new Random(0);
int currentEpoch = 1;
// Create initial adapter.
val currentStorage = new AtomicReference<Storage>();
currentStorage.set(createStorage());
currentStorage.get().initialize(currentEpoch);
// Create the Segment and open it for the first time.
val currentHandle = new AtomicReference<SegmentHandle>(currentStorage.get().create(segmentName, TIMEOUT).thenCompose(v -> currentStorage.get().openWrite(segmentName)).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS));
// Run a number of epochs.
while (currentEpoch <= epochCount) {
val oldStorage = currentStorage.get();
val handle = currentHandle.get();
val writeBuffer = new byte[writeSize];
val appends = Futures.loop(() -> true, () -> {
rnd.nextBytes(writeBuffer);
return oldStorage.write(handle, writtenData.size(), new ByteArrayInputStream(writeBuffer), writeBuffer.length, TIMEOUT).thenRun(() -> writtenData.write(writeBuffer));
}, executorService());
// Create a new Storage adapter with a new epoch and open-write the Segment, remembering its handle.
val newStorage = createStorage();
try {
newStorage.initialize(++currentEpoch);
currentHandle.set(newStorage.openWrite(segmentName).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS));
} catch (Exception ex) {
newStorage.close();
throw ex;
}
currentStorage.set(newStorage);
try {
appends.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.fail("Continuous appends on older epoch Adapter did not fail.");
} catch (Exception ex) {
val cause = Exceptions.unwrap(ex);
if (!(cause instanceof StorageNotPrimaryException || cause instanceof StreamSegmentSealedException || cause instanceof StreamSegmentNotExistsException)) {
// We only expect the appends to fail because they were fenced out or the Segment was sealed.
Assert.fail("Unexpected exception " + cause);
}
} finally {
oldStorage.close();
}
}
byte[] expectedData = writtenData.getData().getCopy();
byte[] readData = new byte[expectedData.length];
@Cleanup val readStorage = createStorage();
readStorage.initialize(++currentEpoch);
int bytesRead = readStorage.openRead(segmentName).thenCompose(handle -> readStorage.read(handle, 0, readData, 0, readData.length, TIMEOUT)).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertEquals("Unexpected number of bytes read.", readData.length, bytesRead);
Assert.assertArrayEquals("Unexpected data read back.", expectedData, readData);
}
Aggregations