use of org.apache.flink.runtime.io.network.partition.consumer.InputChannel.BufferAndAvailability in project flink by apache.
the class StreamTestSingleInputGate method setupInputChannels.
@SuppressWarnings("unchecked")
private void setupInputChannels() throws IOException, InterruptedException {
for (int i = 0; i < numInputChannels; i++) {
final int channelIndex = i;
final RecordSerializer<SerializationDelegate<Object>> recordSerializer = new SpanningRecordSerializer<SerializationDelegate<Object>>();
final SerializationDelegate<Object> delegate = (SerializationDelegate<Object>) (SerializationDelegate<?>) new SerializationDelegate<StreamElement>(new StreamElementSerializer<T>(serializer));
inputQueues[channelIndex] = new ConcurrentLinkedQueue<InputValue<Object>>();
inputChannels[channelIndex] = new TestInputChannel(inputGate, i);
final Answer<BufferAndAvailability> answer = new Answer<BufferAndAvailability>() {
@Override
public BufferAndAvailability answer(InvocationOnMock invocationOnMock) throws Throwable {
InputValue<Object> input = inputQueues[channelIndex].poll();
if (input != null && input.isStreamEnd()) {
when(inputChannels[channelIndex].getInputChannel().isReleased()).thenReturn(true);
return new BufferAndAvailability(EventSerializer.toBuffer(EndOfPartitionEvent.INSTANCE), false);
} else if (input != null && input.isStreamRecord()) {
Object inputElement = input.getStreamRecord();
final Buffer buffer = new Buffer(MemorySegmentFactory.allocateUnpooledSegment(bufferSize), mock(BufferRecycler.class));
recordSerializer.setNextBuffer(buffer);
delegate.setInstance(inputElement);
recordSerializer.addRecord(delegate);
// Call getCurrentBuffer to ensure size is set
return new BufferAndAvailability(recordSerializer.getCurrentBuffer(), false);
} else if (input != null && input.isEvent()) {
AbstractEvent event = input.getEvent();
return new BufferAndAvailability(EventSerializer.toBuffer(event), false);
} else {
synchronized (inputQueues[channelIndex]) {
inputQueues[channelIndex].wait();
return answer(invocationOnMock);
}
}
}
};
when(inputChannels[channelIndex].getInputChannel().getNextBuffer()).thenAnswer(answer);
inputGate.setInputChannel(new IntermediateResultPartitionID(), inputChannels[channelIndex].getInputChannel());
}
}
use of org.apache.flink.runtime.io.network.partition.consumer.InputChannel.BufferAndAvailability in project flink by apache.
the class SequenceNumberingViewReader method getNextBuffer.
public BufferAndAvailability getNextBuffer() throws IOException, InterruptedException {
Buffer next = subpartitionView.getNextBuffer();
if (next != null) {
long remaining = numBuffersAvailable.decrementAndGet();
sequenceNumber++;
if (remaining >= 0) {
return new BufferAndAvailability(next, remaining > 0);
} else {
throw new IllegalStateException("no buffer available");
}
} else {
return null;
}
}
use of org.apache.flink.runtime.io.network.partition.consumer.InputChannel.BufferAndAvailability in project flink by apache.
the class SingleInputGate method getNextBufferOrEvent.
// ------------------------------------------------------------------------
// Consume
// ------------------------------------------------------------------------
@Override
public BufferOrEvent getNextBufferOrEvent() throws IOException, InterruptedException {
if (hasReceivedAllEndOfPartitionEvents) {
return null;
}
if (isReleased) {
throw new IllegalStateException("Released");
}
requestPartitions();
InputChannel currentChannel;
boolean moreAvailable;
synchronized (inputChannelsWithData) {
while (inputChannelsWithData.size() == 0) {
if (isReleased) {
throw new IllegalStateException("Released");
}
inputChannelsWithData.wait();
}
currentChannel = inputChannelsWithData.remove();
moreAvailable = inputChannelsWithData.size() > 0;
}
final BufferAndAvailability result = currentChannel.getNextBuffer();
// Sanity check that notifications only happen when data is available
if (result == null) {
throw new IllegalStateException("Bug in input gate/channel logic: input gate got " + "notified by channel about available data, but none was available.");
}
// will come for that channel
if (result.moreAvailable()) {
queueChannel(currentChannel);
}
final Buffer buffer = result.buffer();
if (buffer.isBuffer()) {
return new BufferOrEvent(buffer, currentChannel.getChannelIndex(), moreAvailable);
} else {
final AbstractEvent event = EventSerializer.fromBuffer(buffer, getClass().getClassLoader());
if (event.getClass() == EndOfPartitionEvent.class) {
channelsWithEndOfPartitionEvents.set(currentChannel.getChannelIndex());
if (channelsWithEndOfPartitionEvents.cardinality() == numberOfInputChannels) {
hasReceivedAllEndOfPartitionEvents = true;
}
currentChannel.notifySubpartitionConsumed();
currentChannel.releaseAllResources();
}
return new BufferOrEvent(event, currentChannel.getChannelIndex(), moreAvailable);
}
}
use of org.apache.flink.runtime.io.network.partition.consumer.InputChannel.BufferAndAvailability in project flink by apache.
the class PartitionRequestQueue method writeAndFlushNextMessageIfPossible.
private void writeAndFlushNextMessageIfPossible(final Channel channel) throws IOException {
if (fatalError) {
return;
}
// The logic here is very similar to the combined input gate and local
// input channel logic. You can think of this class acting as the input
// gate and the consumed views as the local input channels.
BufferAndAvailability next = null;
try {
if (channel.isWritable()) {
while (true) {
SequenceNumberingViewReader reader = nonEmptyReader.poll();
// of the write callbacks that are executed after each write.
if (reader == null) {
return;
}
next = reader.getNextBuffer();
if (next == null) {
if (reader.isReleased()) {
markAsReleased(reader.getReceiverId());
Throwable cause = reader.getFailureCause();
if (cause != null) {
ErrorResponse msg = new ErrorResponse(new ProducerFailedException(cause), reader.getReceiverId());
ctx.writeAndFlush(msg);
}
} else {
IllegalStateException err = new IllegalStateException("Bug in Netty consumer logic: reader queue got notified by partition " + "about available data, but none was available.");
handleException(ctx.channel(), err);
return;
}
} else {
// "non-empty" notification will come for that reader from the queue.
if (next.moreAvailable()) {
nonEmptyReader.add(reader);
}
BufferResponse msg = new BufferResponse(next.buffer(), reader.getSequenceNumber(), reader.getReceiverId());
if (isEndOfPartitionEvent(next.buffer())) {
reader.notifySubpartitionConsumed();
reader.releaseAllResources();
markAsReleased(reader.getReceiverId());
}
// Write and flush and wait until this is done before
// trying to continue with the next buffer.
channel.writeAndFlush(msg).addListener(writeListener);
return;
}
}
}
} catch (Throwable t) {
if (next != null) {
next.buffer().recycle();
}
throw new IOException(t.getMessage(), t);
}
}
Aggregations