use of org.apache.flink.runtime.plugable.SerializationDelegate in project flink by apache.
the class StreamTestSingleInputGate method setupInputChannels.
@SuppressWarnings("unchecked")
private void setupInputChannels() throws IOException, InterruptedException {
for (int i = 0; i < numInputChannels; i++) {
final int channelIndex = i;
final RecordSerializer<SerializationDelegate<Object>> recordSerializer = new SpanningRecordSerializer<SerializationDelegate<Object>>();
final SerializationDelegate<Object> delegate = (SerializationDelegate<Object>) (SerializationDelegate<?>) new SerializationDelegate<StreamElement>(new StreamElementSerializer<T>(serializer));
inputQueues[channelIndex] = new ConcurrentLinkedQueue<InputValue<Object>>();
inputChannels[channelIndex] = new TestInputChannel(inputGate, i);
final Answer<BufferAndAvailability> answer = new Answer<BufferAndAvailability>() {
@Override
public BufferAndAvailability answer(InvocationOnMock invocationOnMock) throws Throwable {
InputValue<Object> input = inputQueues[channelIndex].poll();
if (input != null && input.isStreamEnd()) {
when(inputChannels[channelIndex].getInputChannel().isReleased()).thenReturn(true);
return new BufferAndAvailability(EventSerializer.toBuffer(EndOfPartitionEvent.INSTANCE), false);
} else if (input != null && input.isStreamRecord()) {
Object inputElement = input.getStreamRecord();
final Buffer buffer = new Buffer(MemorySegmentFactory.allocateUnpooledSegment(bufferSize), mock(BufferRecycler.class));
recordSerializer.setNextBuffer(buffer);
delegate.setInstance(inputElement);
recordSerializer.addRecord(delegate);
// Call getCurrentBuffer to ensure size is set
return new BufferAndAvailability(recordSerializer.getCurrentBuffer(), false);
} else if (input != null && input.isEvent()) {
AbstractEvent event = input.getEvent();
return new BufferAndAvailability(EventSerializer.toBuffer(event), false);
} else {
synchronized (inputQueues[channelIndex]) {
inputQueues[channelIndex].wait();
return answer(invocationOnMock);
}
}
}
};
when(inputChannels[channelIndex].getInputChannel().getNextBuffer()).thenAnswer(answer);
inputGate.setInputChannel(new IntermediateResultPartitionID(), inputChannels[channelIndex].getInputChannel());
}
}
use of org.apache.flink.runtime.plugable.SerializationDelegate in project flink by apache.
the class BatchTask method getOutputCollector.
// --------------------------------------------------------------------------------------------
// Result Shipping and Chained Tasks
// --------------------------------------------------------------------------------------------
/**
* Creates the {@link Collector} for the given task, as described by the given configuration. The
* output collector contains the writers that forward the data to the different tasks that the given task
* is connected to. Each writer applies the partitioning as described in the configuration.
*
* @param task The task that the output collector is created for.
* @param config The configuration describing the output shipping strategies.
* @param cl The classloader used to load user defined types.
* @param eventualOutputs The output writers that this task forwards to the next task for each output.
* @param outputOffset The offset to start to get the writers for the outputs
* @param numOutputs The number of outputs described in the configuration.
*
* @return The OutputCollector that data produced in this task is submitted to.
*/
public static <T> Collector<T> getOutputCollector(AbstractInvokable task, TaskConfig config, ClassLoader cl, List<RecordWriter<?>> eventualOutputs, int outputOffset, int numOutputs) throws Exception {
if (numOutputs == 0) {
return null;
}
// get the factory for the serializer
final TypeSerializerFactory<T> serializerFactory = config.getOutputSerializer(cl);
final List<RecordWriter<SerializationDelegate<T>>> writers = new ArrayList<>(numOutputs);
// create a writer for each output
for (int i = 0; i < numOutputs; i++) {
// create the OutputEmitter from output ship strategy
final ShipStrategyType strategy = config.getOutputShipStrategy(i);
final int indexInSubtaskGroup = task.getIndexInSubtaskGroup();
final TypeComparatorFactory<T> compFactory = config.getOutputComparator(i, cl);
final ChannelSelector<SerializationDelegate<T>> oe;
if (compFactory == null) {
oe = new OutputEmitter<T>(strategy, indexInSubtaskGroup);
} else {
final DataDistribution dataDist = config.getOutputDataDistribution(i, cl);
final Partitioner<?> partitioner = config.getOutputPartitioner(i, cl);
final TypeComparator<T> comparator = compFactory.createComparator();
oe = new OutputEmitter<T>(strategy, indexInSubtaskGroup, comparator, partitioner, dataDist);
}
final RecordWriter<SerializationDelegate<T>> recordWriter = new RecordWriter<SerializationDelegate<T>>(task.getEnvironment().getWriter(outputOffset + i), oe);
recordWriter.setMetricGroup(task.getEnvironment().getMetricGroup().getIOMetricGroup());
writers.add(recordWriter);
}
if (eventualOutputs != null) {
eventualOutputs.addAll(writers);
}
return new OutputCollector<T>(writers, serializerFactory.getSerializer());
}
use of org.apache.flink.runtime.plugable.SerializationDelegate in project flink by apache.
the class OutputEmitterTest method testForcedRebalance.
@Test
public void testForcedRebalance() {
// Test for IntValue
int numChannels = 100;
int toTaskIndex = numChannels * 6 / 7;
int fromTaskIndex = toTaskIndex + numChannels;
int extraRecords = numChannels / 3;
int numRecords = 50000 + extraRecords;
final ChannelSelector<SerializationDelegate<Record>> oe1 = new OutputEmitter<Record>(ShipStrategyType.PARTITION_FORCED_REBALANCE, fromTaskIndex);
final SerializationDelegate<Record> delegate = new SerializationDelegate<Record>(new RecordSerializerFactory().getSerializer());
int[] hit = new int[numChannels];
for (int i = 0; i < numRecords; i++) {
IntValue k = new IntValue(i);
Record rec = new Record(k);
delegate.setInstance(rec);
int[] chans = oe1.selectChannels(delegate, hit.length);
for (int chan : chans) {
hit[chan]++;
}
}
int cnt = 0;
for (int i = 0; i < hit.length; i++) {
if (toTaskIndex <= i || i < toTaskIndex + extraRecords - numChannels) {
assertTrue(hit[i] == (numRecords / numChannels) + 1);
} else {
assertTrue(hit[i] == numRecords / numChannels);
}
cnt += hit[i];
}
assertTrue(cnt == numRecords);
// Test for StringValue
numChannels = 100;
toTaskIndex = numChannels / 5;
fromTaskIndex = toTaskIndex + 2 * numChannels;
extraRecords = numChannels * 2 / 9;
numRecords = 10000 + extraRecords;
final ChannelSelector<SerializationDelegate<Record>> oe2 = new OutputEmitter<Record>(ShipStrategyType.PARTITION_FORCED_REBALANCE, fromTaskIndex);
hit = new int[numChannels];
for (int i = 0; i < numRecords; i++) {
StringValue k = new StringValue(i + "");
Record rec = new Record(k);
delegate.setInstance(rec);
int[] chans = oe2.selectChannels(delegate, hit.length);
for (int chan : chans) {
hit[chan]++;
}
}
cnt = 0;
for (int i = 0; i < hit.length; i++) {
if (toTaskIndex <= i && i < toTaskIndex + extraRecords) {
assertTrue(hit[i] == (numRecords / numChannels) + 1);
} else {
assertTrue(hit[i] == numRecords / numChannels);
}
cnt += hit[i];
}
assertTrue(cnt == numRecords);
}
use of org.apache.flink.runtime.plugable.SerializationDelegate in project flink by apache.
the class OutputEmitterTest method testMissingKey.
@Test
public void testMissingKey() {
// Test for IntValue
@SuppressWarnings({ "unchecked", "rawtypes" }) final TypeComparator<Record> intComp = new RecordComparatorFactory(new int[] { 1 }, new Class[] { IntValue.class }).createComparator();
final ChannelSelector<SerializationDelegate<Record>> oe1 = new OutputEmitter<Record>(ShipStrategyType.PARTITION_HASH, intComp);
final SerializationDelegate<Record> delegate = new SerializationDelegate<Record>(new RecordSerializerFactory().getSerializer());
Record rec = new Record(0);
rec.setField(0, new IntValue(1));
delegate.setInstance(rec);
try {
oe1.selectChannels(delegate, 100);
} catch (KeyFieldOutOfBoundsException re) {
Assert.assertEquals(1, re.getFieldNumber());
return;
}
Assert.fail("Expected a KeyFieldOutOfBoundsException.");
}
use of org.apache.flink.runtime.plugable.SerializationDelegate in project flink by apache.
the class OutputEmitterTest method testForward.
@Test
public void testForward() {
// Test for IntValue
@SuppressWarnings({ "unchecked", "rawtypes" }) final TypeComparator<Record> intComp = new RecordComparatorFactory(new int[] { 0 }, new Class[] { IntValue.class }).createComparator();
final ChannelSelector<SerializationDelegate<Record>> oe1 = new OutputEmitter<Record>(ShipStrategyType.FORWARD, intComp);
final SerializationDelegate<Record> delegate = new SerializationDelegate<Record>(new RecordSerializerFactory().getSerializer());
int numChannels = 100;
int numRecords = 50000 + numChannels / 2;
int[] hit = new int[numChannels];
for (int i = 0; i < numRecords; i++) {
IntValue k = new IntValue(i);
Record rec = new Record(k);
delegate.setInstance(rec);
int[] chans = oe1.selectChannels(delegate, hit.length);
for (int chan : chans) {
hit[chan]++;
}
}
assertTrue(hit[0] == numRecords);
for (int i = 1; i < hit.length; i++) {
assertTrue(hit[i] == 0);
}
// Test for StringValue
@SuppressWarnings({ "unchecked", "rawtypes" }) final TypeComparator<Record> stringComp = new RecordComparatorFactory(new int[] { 0 }, new Class[] { StringValue.class }).createComparator();
final ChannelSelector<SerializationDelegate<Record>> oe2 = new OutputEmitter<Record>(ShipStrategyType.FORWARD, stringComp);
numChannels = 100;
numRecords = 10000 + numChannels / 2;
hit = new int[numChannels];
for (int i = 0; i < numRecords; i++) {
StringValue k = new StringValue(i + "");
Record rec = new Record(k);
delegate.setInstance(rec);
int[] chans = oe2.selectChannels(delegate, hit.length);
for (int chan : chans) {
hit[chan]++;
}
}
assertTrue(hit[0] == numRecords);
for (int i = 1; i < hit.length; i++) {
assertTrue(hit[i] == 0);
}
}
Aggregations