use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.
the class EventSerializerTest method testToBuffer.
@Test
public void testToBuffer() throws IOException {
for (AbstractEvent evt : events) {
Buffer buffer = EventSerializer.toBuffer(evt, false);
assertFalse(buffer.isBuffer());
assertTrue(buffer.readableBytes() > 0);
assertFalse(buffer.isRecycled());
if (evt instanceof CheckpointBarrier) {
assertTrue(buffer.getDataType().isBlockingUpstream());
} else {
assertEquals(Buffer.DataType.EVENT_BUFFER, buffer.getDataType());
}
}
}
use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.
the class SpanningWrapperTest method toByteArray.
private byte[] toByteArray(CloseableIterator<Buffer> unconsumed) {
final List<Buffer> buffers = new ArrayList<>();
try {
unconsumed.forEachRemaining(buffers::add);
byte[] result = new byte[buffers.stream().mapToInt(Buffer::readableBytes).sum()];
int offset = 0;
for (Buffer buffer : buffers) {
int len = buffer.readableBytes();
buffer.getNioBuffer(0, len).get(result, offset, len);
offset += len;
}
return result;
} finally {
buffers.forEach(Buffer::recycleBuffer);
}
}
use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.
the class SpanningWrapperTest method testLargeUnconsumedSegment.
@Test
public void testLargeUnconsumedSegment() throws Exception {
int recordLen = 100;
int firstChunk = (int) (recordLen * .9);
int spillingThreshold = (int) (firstChunk * .9);
byte[] record1 = recordBytes(recordLen);
byte[] record2 = recordBytes(recordLen * 2);
File canNotEecutableFile = folder.newFolder();
canNotEecutableFile.setExecutable(false);
// Always pick 'canNotEecutableFile' first as the Spilling Channel TmpDir. Thus trigger an
// IOException.
SpanningWrapper spanningWrapper = new SpanningWrapper(new String[] { folder.newFolder().getAbsolutePath(), canNotEecutableFile.getAbsolutePath() + File.separator + "pathdonotexit" }, spillingThreshold, recordLen);
spanningWrapper.transferFrom(wrapNonSpanning(record1, firstChunk), recordLen);
spanningWrapper.addNextChunkFromMemorySegment(wrap(record1), firstChunk, recordLen - firstChunk + LENGTH_BYTES);
spanningWrapper.addNextChunkFromMemorySegment(wrap(record2), 0, record2.length);
CloseableIterator<Buffer> unconsumedSegment = spanningWrapper.getUnconsumedSegment();
spanningWrapper.getInputView().readFully(new byte[recordLen], 0, // read out from file
recordLen);
// clear any leftover
spanningWrapper.transferLeftOverTo(new NonSpanningWrapper());
spanningWrapper.transferFrom(wrapNonSpanning(recordBytes(recordLen), recordLen), // overwrite with new data
recordLen);
canNotEecutableFile.setExecutable(true);
assertArrayEquals(concat(record1, record2), toByteArray(unconsumedSegment));
}
use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.
the class ChannelPersistenceITCase method collectBytes.
private <T> byte[] collectBytes(SupplierWithException<Optional<T>, Exception> entrySupplier, Function<T, Buffer> bufferExtractor) throws Exception {
ArrayList<Buffer> buffers = new ArrayList<>();
for (Optional<T> entry = entrySupplier.get(); entry.isPresent(); entry = entrySupplier.get()) {
entry.map(bufferExtractor).filter(buffer -> buffer.getDataType().isBuffer()).ifPresent(buffers::add);
}
ByteBuffer result = ByteBuffer.wrap(new byte[buffers.stream().mapToInt(Buffer::getSize).sum()]);
buffers.forEach(buffer -> {
result.put(buffer.getNioBufferReadable());
buffer.recycleBuffer();
});
return result.array();
}
use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.
the class DemultiplexingRecordDeserializerTest method testWatermarks.
/**
* Tests that Watermarks are only forwarded when all watermarks are received.
*/
@Test
public void testWatermarks() throws IOException {
DemultiplexingRecordDeserializer<Long> deserializer = DemultiplexingRecordDeserializer.create(new InputChannelInfo(0, 0), rescalingDescriptor(to(0, 1), array(mappings(to(0, 1), to(4, 5))), emptySet()), unused -> new SpillingAdaptiveSpanningRecordDeserializer<>(ioManager.getSpillingDirectoriesPaths()), unused -> RecordFilter.all());
assertEquals(4, deserializer.getVirtualChannelSelectors().size());
for (Iterator<SubtaskConnectionDescriptor> iterator = deserializer.getVirtualChannelSelectors().iterator(); iterator.hasNext(); ) {
SubtaskConnectionDescriptor selector = iterator.next();
MemorySegment memorySegment = allocateUnpooledSegment(128);
try (BufferBuilder bufferBuilder = createBufferBuilder(memorySegment)) {
final long ts = 42L + selector.getInputSubtaskIndex() + selector.getOutputSubtaskIndex();
Buffer buffer = write(bufferBuilder, new Watermark(ts));
deserializer.select(selector);
deserializer.setNextBuffer(buffer);
}
if (iterator.hasNext()) {
assertEquals(Collections.emptyList(), read(deserializer));
} else {
// last channel, min should be 42 + 0 + 0
assertEquals(Arrays.asList(new Watermark(42)), read(deserializer));
}
assertTrue(memorySegment.isFreed());
}
}
Aggregations