use of org.apache.flink.runtime.io.network.buffer.BufferBuilder in project flink by splunk.
the class PipelinedSubpartitionWithReadViewTest method testAddEmptyNonFinishedBuffer.
@Test
public void testAddEmptyNonFinishedBuffer() throws IOException {
assertEquals(0, availablityListener.getNumNotifications());
BufferBuilder bufferBuilder = createBufferBuilder();
subpartition.add(bufferBuilder.createBufferConsumer());
assertEquals(0, availablityListener.getNumNotifications());
assertNull(readView.getNextBuffer());
bufferBuilder.finish();
bufferBuilder = createBufferBuilder();
subpartition.add(bufferBuilder.createBufferConsumer());
assertEquals(1, subpartition.getBuffersInBacklogUnsafe());
assertEquals(1, availablityListener.getNumNotifications());
assertNull(readView.getNextBuffer());
assertEquals(0, subpartition.getBuffersInBacklogUnsafe());
}
use of org.apache.flink.runtime.io.network.buffer.BufferBuilder in project flink by splunk.
the class ResultSubpartitionRecoveredStateHandler method recover.
@Override
public void recover(ResultSubpartitionInfo subpartitionInfo, int oldSubtaskIndex, BufferWithContext<BufferBuilder> bufferWithContext) throws IOException {
try (BufferBuilder bufferBuilder = bufferWithContext.context) {
try (BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumerFromBeginning()) {
bufferBuilder.finish();
if (bufferConsumer.isDataAvailable()) {
final List<CheckpointedResultSubpartition> channels = getMappedChannels(subpartitionInfo);
for (final CheckpointedResultSubpartition channel : channels) {
// channel selector is created from the downstream's point of view: the
// subtask of downstream = subpartition index of recovered buffer
final SubtaskConnectionDescriptor channelSelector = new SubtaskConnectionDescriptor(subpartitionInfo.getSubPartitionIdx(), oldSubtaskIndex);
channel.addRecovered(EventSerializer.toBufferConsumer(channelSelector, false));
channel.addRecovered(bufferConsumer.copy());
}
}
}
}
}
use of org.apache.flink.runtime.io.network.buffer.BufferBuilder in project flink by splunk.
the class ResultSubpartitionRecoveredStateHandler method getBuffer.
@Override
public BufferWithContext<BufferBuilder> getBuffer(ResultSubpartitionInfo subpartitionInfo) throws IOException, InterruptedException {
// request the buffer from any mapped subpartition as they all will receive the same buffer
final List<CheckpointedResultSubpartition> channels = getMappedChannels(subpartitionInfo);
BufferBuilder bufferBuilder = channels.get(0).requestBufferBuilderBlocking();
return new BufferWithContext<>(wrap(bufferBuilder), bufferBuilder);
}
use of org.apache.flink.runtime.io.network.buffer.BufferBuilder in project flink by splunk.
the class DemultiplexingRecordDeserializerTest method testUpscale.
/**
* Tests {@link SubtaskConnectionDescriptor} by mixing buffers from 4 different virtual
* channels.
*/
@Test
public void testUpscale() throws IOException {
DemultiplexingRecordDeserializer<Long> deserializer = DemultiplexingRecordDeserializer.create(new InputChannelInfo(2, 0), rescalingDescriptor(to(0, 1), array(mappings(), mappings(), mappings(to(2, 3), to(4, 5))), emptySet()), unused -> new SpillingAdaptiveSpanningRecordDeserializer<>(ioManager.getSpillingDirectoriesPaths()), unused -> RecordFilter.all());
assertEquals(Sets.newSet(new SubtaskConnectionDescriptor(0, 2), new SubtaskConnectionDescriptor(0, 3), new SubtaskConnectionDescriptor(1, 2), new SubtaskConnectionDescriptor(1, 3)), deserializer.getVirtualChannelSelectors());
for (int i = 0; i < 100; i++) {
SubtaskConnectionDescriptor selector = Iterables.get(deserializer.getVirtualChannelSelectors(), random.nextInt(4));
long start = selector.getInputSubtaskIndex() << 4 | selector.getOutputSubtaskIndex();
MemorySegment memorySegment = allocateUnpooledSegment(128);
try (BufferBuilder bufferBuilder = createBufferBuilder(memorySegment)) {
Buffer buffer = writeLongs(bufferBuilder, start + 1L, start + 2L, start + 3L);
deserializer.select(selector);
deserializer.setNextBuffer(buffer);
}
assertEquals(Arrays.asList(start + 1L, start + 2L, start + 3L), readLongs(deserializer));
assertTrue(memorySegment.isFreed());
}
}
use of org.apache.flink.runtime.io.network.buffer.BufferBuilder in project flink by splunk.
the class DemultiplexingRecordDeserializerTest method testWatermarks.
/**
* Tests that Watermarks are only forwarded when all watermarks are received.
*/
@Test
public void testWatermarks() throws IOException {
DemultiplexingRecordDeserializer<Long> deserializer = DemultiplexingRecordDeserializer.create(new InputChannelInfo(0, 0), rescalingDescriptor(to(0, 1), array(mappings(to(0, 1), to(4, 5))), emptySet()), unused -> new SpillingAdaptiveSpanningRecordDeserializer<>(ioManager.getSpillingDirectoriesPaths()), unused -> RecordFilter.all());
assertEquals(4, deserializer.getVirtualChannelSelectors().size());
for (Iterator<SubtaskConnectionDescriptor> iterator = deserializer.getVirtualChannelSelectors().iterator(); iterator.hasNext(); ) {
SubtaskConnectionDescriptor selector = iterator.next();
MemorySegment memorySegment = allocateUnpooledSegment(128);
try (BufferBuilder bufferBuilder = createBufferBuilder(memorySegment)) {
final long ts = 42L + selector.getInputSubtaskIndex() + selector.getOutputSubtaskIndex();
Buffer buffer = write(bufferBuilder, new Watermark(ts));
deserializer.select(selector);
deserializer.setNextBuffer(buffer);
}
if (iterator.hasNext()) {
assertEquals(Collections.emptyList(), read(deserializer));
} else {
// last channel, min should be 42 + 0 + 0
assertEquals(Arrays.asList(new Watermark(42)), read(deserializer));
}
assertTrue(memorySegment.isFreed());
}
}
Aggregations