use of org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter in project flink by apache.
the class StreamRecordWriterTest method getMockWriter.
private static ResultPartitionWriter getMockWriter(int numPartitions) throws Exception {
BufferProvider mockProvider = mock(BufferProvider.class);
when(mockProvider.requestBufferBlocking()).thenAnswer(new Answer<Buffer>() {
@Override
public Buffer answer(InvocationOnMock invocation) {
return new Buffer(MemorySegmentFactory.allocateUnpooledSegment(4096), FreeingBufferRecycler.INSTANCE);
}
});
ResultPartitionWriter mockWriter = mock(ResultPartitionWriter.class);
when(mockWriter.getBufferProvider()).thenReturn(mockProvider);
when(mockWriter.getNumberOfOutputChannels()).thenReturn(numPartitions);
return mockWriter;
}
use of org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter in project flink by apache.
the class StreamTask method performCheckpoint.
private boolean performCheckpoint(CheckpointMetaData checkpointMetaData, CheckpointOptions checkpointOptions, CheckpointMetrics checkpointMetrics) throws Exception {
LOG.debug("Starting checkpoint ({}) {} on task {}", checkpointMetaData.getCheckpointId(), checkpointOptions.getCheckpointType(), getName());
synchronized (lock) {
if (isRunning) {
// we can do a checkpoint
// Since both state checkpointing and downstream barrier emission occurs in this
// lock scope, they are an atomic operation regardless of the order in which they occur.
// Given this, we immediately emit the checkpoint barriers, so the downstream operators
// can start their checkpoint work as soon as possible
operatorChain.broadcastCheckpointBarrier(checkpointMetaData.getCheckpointId(), checkpointMetaData.getTimestamp(), checkpointOptions);
checkpointState(checkpointMetaData, checkpointOptions, checkpointMetrics);
return true;
} else {
// we cannot perform our checkpoint - let the downstream operators know that they
// should not wait for any input from this operator
// we cannot broadcast the cancellation markers on the 'operator chain', because it may not
// yet be created
final CancelCheckpointMarker message = new CancelCheckpointMarker(checkpointMetaData.getCheckpointId());
Exception exception = null;
for (ResultPartitionWriter output : getEnvironment().getAllWriters()) {
try {
output.writeBufferToAllChannels(EventSerializer.toBuffer(message));
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(new Exception("Could not send cancel checkpoint marker to downstream tasks.", e), exception);
}
}
if (exception != null) {
throw exception;
}
return false;
}
}
}
use of org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter in project flink by apache.
the class OperatorChain method createStreamOutput.
private <T> RecordWriterOutput<T> createStreamOutput(StreamEdge edge, StreamConfig upStreamConfig, int outputIndex, Environment taskEnvironment, String taskName) {
// OutputTag, return null if not sideOutput
OutputTag sideOutputTag = edge.getOutputTag();
TypeSerializer outSerializer = null;
if (edge.getOutputTag() != null) {
// side output
outSerializer = upStreamConfig.getTypeSerializerSideOut(edge.getOutputTag(), taskEnvironment.getUserClassLoader());
} else {
// main output
outSerializer = upStreamConfig.getTypeSerializerOut(taskEnvironment.getUserClassLoader());
}
@SuppressWarnings("unchecked") StreamPartitioner<T> outputPartitioner = (StreamPartitioner<T>) edge.getPartitioner();
LOG.debug("Using partitioner {} for output {} of task ", outputPartitioner, outputIndex, taskName);
ResultPartitionWriter bufferWriter = taskEnvironment.getWriter(outputIndex);
// we initialize the partitioner here with the number of key groups (aka max. parallelism)
if (outputPartitioner instanceof ConfigurableStreamPartitioner) {
int numKeyGroups = bufferWriter.getNumTargetKeyGroups();
if (0 < numKeyGroups) {
((ConfigurableStreamPartitioner) outputPartitioner).configure(numKeyGroups);
}
}
StreamRecordWriter<SerializationDelegate<StreamRecord<T>>> output = new StreamRecordWriter<>(bufferWriter, outputPartitioner, upStreamConfig.getBufferTimeout());
output.setMetricGroup(taskEnvironment.getMetricGroup().getIOMetricGroup());
return new RecordWriterOutput<>(output, outSerializer, sideOutputTag, this);
}
use of org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter in project flink by apache.
the class StreamMockEnvironment method addOutput.
public <T> void addOutput(final Queue<Object> outputList, final TypeSerializer<T> serializer) {
try {
// The record-oriented writers wrap the buffer writer. We mock it
// to collect the returned buffers and deserialize the content to
// the output list
BufferProvider mockBufferProvider = mock(BufferProvider.class);
when(mockBufferProvider.requestBufferBlocking()).thenAnswer(new Answer<Buffer>() {
@Override
public Buffer answer(InvocationOnMock invocationOnMock) throws Throwable {
return new Buffer(MemorySegmentFactory.allocateUnpooledSegment(bufferSize), mock(BufferRecycler.class));
}
});
ResultPartitionWriter mockWriter = mock(ResultPartitionWriter.class);
when(mockWriter.getNumberOfOutputChannels()).thenReturn(1);
when(mockWriter.getBufferProvider()).thenReturn(mockBufferProvider);
final RecordDeserializer<DeserializationDelegate<T>> recordDeserializer = new AdaptiveSpanningRecordDeserializer<DeserializationDelegate<T>>();
final NonReusingDeserializationDelegate<T> delegate = new NonReusingDeserializationDelegate<T>(serializer);
// Add records and events from the buffer to the output list
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocationOnMock) throws Throwable {
Buffer buffer = (Buffer) invocationOnMock.getArguments()[0];
addBufferToOutputList(recordDeserializer, delegate, buffer, outputList);
return null;
}
}).when(mockWriter).writeBuffer(any(Buffer.class), anyInt());
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocationOnMock) throws Throwable {
Buffer buffer = (Buffer) invocationOnMock.getArguments()[0];
addBufferToOutputList(recordDeserializer, delegate, buffer, outputList);
return null;
}
}).when(mockWriter).writeBufferToAllChannels(any(Buffer.class));
outputs.add(mockWriter);
} catch (Throwable t) {
t.printStackTrace();
fail(t.getMessage());
}
}
Aggregations