use of org.apache.flink.streaming.api.operators.StreamingRuntimeContext in project flink by apache.
the class PrintSinkFunction method open.
@Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
StreamingRuntimeContext context = (StreamingRuntimeContext) getRuntimeContext();
writer.open(context.getIndexOfThisSubtask(), context.getNumberOfParallelSubtasks());
}
use of org.apache.flink.streaming.api.operators.StreamingRuntimeContext in project flink by apache.
the class KafkaConsumer08Test method testAllBoostrapServerHostsAreInvalid.
@Test
public void testAllBoostrapServerHostsAreInvalid() {
try {
String unknownHost = "foobar:11111";
URL unknownHostURL = NetUtils.getCorrectHostnamePort(unknownHost);
PowerMockito.mockStatic(InetAddress.class);
when(InetAddress.getByName(Matchers.eq(unknownHostURL.getHost()))).thenThrow(new UnknownHostException("Test exception"));
String zookeeperConnect = "localhost:56794";
String groupId = "non-existent-group";
Properties props = createKafkaProps(zookeeperConnect, unknownHost, groupId);
FlinkKafkaConsumer08<String> consumer = new FlinkKafkaConsumer08<>(Collections.singletonList("no op topic"), new SimpleStringSchema(), props);
StreamingRuntimeContext mockRuntimeContext = mock(StreamingRuntimeContext.class);
Mockito.when(mockRuntimeContext.isCheckpointingEnabled()).thenReturn(true);
consumer.setRuntimeContext(mockRuntimeContext);
consumer.open(new Configuration());
fail();
} catch (Exception e) {
assertTrue("Exception should be thrown containing 'all bootstrap servers invalid' message!", e.getMessage().contains("All the servers provided in: '" + ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG + "' config are invalid"));
}
}
use of org.apache.flink.streaming.api.operators.StreamingRuntimeContext in project flink by apache.
the class BoltWrapperTest method testWrapper.
@SuppressWarnings({ "rawtypes", "unchecked" })
private void testWrapper(final int numberOfAttributes) throws Exception {
assert ((-1 <= numberOfAttributes) && (numberOfAttributes <= 25));
Tuple flinkTuple = null;
String rawTuple = null;
if (numberOfAttributes == -1) {
rawTuple = "test";
} else {
flinkTuple = Tuple.getTupleClass(numberOfAttributes).newInstance();
}
final String[] schema;
if (numberOfAttributes == -1) {
schema = new String[1];
} else {
schema = new String[numberOfAttributes];
}
for (int i = 0; i < schema.length; ++i) {
schema[i] = "a" + i;
}
final StreamRecord record = mock(StreamRecord.class);
if (numberOfAttributes == -1) {
when(record.getValue()).thenReturn(rawTuple);
} else {
when(record.getValue()).thenReturn(flinkTuple);
}
final StreamingRuntimeContext taskContext = mock(StreamingRuntimeContext.class);
when(taskContext.getExecutionConfig()).thenReturn(mock(ExecutionConfig.class));
when(taskContext.getTaskName()).thenReturn("name");
when(taskContext.getMetricGroup()).thenReturn(new UnregisteredMetricsGroup());
final IRichBolt bolt = mock(IRichBolt.class);
final SetupOutputFieldsDeclarer declarer = new SetupOutputFieldsDeclarer();
declarer.declare(new Fields(schema));
PowerMockito.whenNew(SetupOutputFieldsDeclarer.class).withNoArguments().thenReturn(declarer);
final BoltWrapper wrapper = new BoltWrapper(bolt, (Fields) null);
wrapper.setup(createMockStreamTask(), new StreamConfig(new Configuration()), mock(Output.class));
wrapper.open();
wrapper.processElement(record);
if (numberOfAttributes == -1) {
verify(bolt).execute(eq(new StormTuple<String>(rawTuple, null, -1, null, null, MessageId.makeUnanchored())));
} else {
verify(bolt).execute(eq(new StormTuple<Tuple>(flinkTuple, null, -1, null, null, MessageId.makeUnanchored())));
}
}
use of org.apache.flink.streaming.api.operators.StreamingRuntimeContext in project flink by apache.
the class SpoutWrapperTest method runAndExecuteFiniteSpout.
@SuppressWarnings("unchecked")
@Test
public void runAndExecuteFiniteSpout() throws Exception {
final FiniteSpout stormSpout = mock(FiniteSpout.class);
when(stormSpout.reachedEnd()).thenReturn(false, false, false, true, false, false, true);
final StreamingRuntimeContext taskContext = mock(StreamingRuntimeContext.class);
when(taskContext.getExecutionConfig()).thenReturn(mock(ExecutionConfig.class));
when(taskContext.getTaskName()).thenReturn("name");
final SpoutWrapper<?> wrapper = new SpoutWrapper<Object>(stormSpout);
wrapper.setRuntimeContext(taskContext);
wrapper.run(mock(SourceContext.class));
verify(stormSpout, times(3)).nextTuple();
}
use of org.apache.flink.streaming.api.operators.StreamingRuntimeContext in project flink by apache.
the class WrapperSetupInLocalClusterTest method testCreateTopologyContext.
@Test
public void testCreateTopologyContext() {
HashMap<String, Integer> dops = new HashMap<String, Integer>();
dops.put("spout1", 1);
dops.put("spout2", 3);
dops.put("bolt1", 1);
dops.put("bolt2", 2);
dops.put("sink", 1);
HashMap<String, Integer> taskCounter = new HashMap<String, Integer>();
taskCounter.put("spout1", 0);
taskCounter.put("spout2", 0);
taskCounter.put("bolt1", 0);
taskCounter.put("bolt2", 0);
taskCounter.put("sink", 0);
HashMap<String, IComponent> operators = new HashMap<String, IComponent>();
operators.put("spout1", new TestDummySpout());
operators.put("spout2", new TestDummySpout());
operators.put("bolt1", new TestDummyBolt());
operators.put("bolt2", new TestDummyBolt());
operators.put("sink", new TestSink());
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout1", (IRichSpout) operators.get("spout1"), dops.get("spout1"));
builder.setSpout("spout2", (IRichSpout) operators.get("spout2"), dops.get("spout2"));
builder.setBolt("bolt1", (IRichBolt) operators.get("bolt1"), dops.get("bolt1")).shuffleGrouping("spout1");
builder.setBolt("bolt2", (IRichBolt) operators.get("bolt2"), dops.get("bolt2")).allGrouping("spout2");
builder.setBolt("sink", (IRichBolt) operators.get("sink"), dops.get("sink")).shuffleGrouping("bolt1", TestDummyBolt.groupingStreamId).shuffleGrouping("bolt1", TestDummyBolt.shuffleStreamId).shuffleGrouping("bolt2", TestDummyBolt.groupingStreamId).shuffleGrouping("bolt2", TestDummyBolt.shuffleStreamId);
LocalCluster cluster = new LocalCluster();
Config c = new Config();
c.setNumAckers(0);
cluster.submitTopology("test", c, builder.createTopology());
while (TestSink.result.size() != 8) {
Utils.sleep(100);
}
cluster.shutdown();
final FlinkTopology flinkBuilder = FlinkTopology.createTopology(builder);
StormTopology stormTopology = flinkBuilder.getStormTopology();
Set<Integer> taskIds = new HashSet<Integer>();
for (TopologyContext expectedContext : TestSink.result) {
final String thisComponentId = expectedContext.getThisComponentId();
int index = taskCounter.get(thisComponentId);
StreamingRuntimeContext context = mock(StreamingRuntimeContext.class);
when(context.getTaskName()).thenReturn(thisComponentId);
when(context.getNumberOfParallelSubtasks()).thenReturn(dops.get(thisComponentId));
when(context.getIndexOfThisSubtask()).thenReturn(index);
taskCounter.put(thisComponentId, ++index);
Config stormConfig = new Config();
stormConfig.put(WrapperSetupHelper.TOPOLOGY_NAME, "test");
TopologyContext topologyContext = WrapperSetupHelper.createTopologyContext(context, operators.get(thisComponentId), thisComponentId, stormTopology, stormConfig);
ComponentCommon expcetedCommon = expectedContext.getComponentCommon(thisComponentId);
ComponentCommon common = topologyContext.getComponentCommon(thisComponentId);
Assert.assertNull(topologyContext.getCodeDir());
Assert.assertNull(common.get_json_conf());
Assert.assertNull(topologyContext.getExecutorData(null));
Assert.assertNull(topologyContext.getPIDDir());
Assert.assertNull(topologyContext.getResource(null));
Assert.assertNull(topologyContext.getSharedExecutor());
Assert.assertNull(expectedContext.getTaskData(null));
Assert.assertNull(topologyContext.getThisWorkerPort());
Assert.assertTrue(expectedContext.getStormId().startsWith(topologyContext.getStormId()));
Assert.assertEquals(expcetedCommon.get_inputs(), common.get_inputs());
Assert.assertEquals(expcetedCommon.get_parallelism_hint(), common.get_parallelism_hint());
Assert.assertEquals(expcetedCommon.get_streams(), common.get_streams());
Assert.assertEquals(expectedContext.getComponentIds(), topologyContext.getComponentIds());
Assert.assertEquals(expectedContext.getComponentStreams(thisComponentId), topologyContext.getComponentStreams(thisComponentId));
Assert.assertEquals(thisComponentId, topologyContext.getThisComponentId());
Assert.assertEquals(expectedContext.getThisSources(), topologyContext.getThisSources());
Assert.assertEquals(expectedContext.getThisStreams(), topologyContext.getThisStreams());
Assert.assertEquals(expectedContext.getThisTargets(), topologyContext.getThisTargets());
Assert.assertEquals(0, topologyContext.getThisWorkerTasks().size());
for (int taskId : topologyContext.getComponentTasks(thisComponentId)) {
Assert.assertEquals(thisComponentId, topologyContext.getComponentId(taskId));
}
for (String componentId : expectedContext.getComponentIds()) {
Assert.assertEquals(expectedContext.getSources(componentId), topologyContext.getSources(componentId));
Assert.assertEquals(expectedContext.getTargets(componentId), topologyContext.getTargets(componentId));
for (String streamId : expectedContext.getComponentStreams(componentId)) {
Assert.assertEquals(expectedContext.getComponentOutputFields(componentId, streamId).toList(), topologyContext.getComponentOutputFields(componentId, streamId).toList());
}
}
for (String streamId : expectedContext.getThisStreams()) {
Assert.assertEquals(expectedContext.getThisOutputFields(streamId).toList(), topologyContext.getThisOutputFields(streamId).toList());
}
HashMap<Integer, String> taskToComponents = new HashMap<Integer, String>();
Set<Integer> allTaskIds = new HashSet<Integer>();
for (String componentId : expectedContext.getComponentIds()) {
List<Integer> possibleTasks = expectedContext.getComponentTasks(componentId);
List<Integer> tasks = topologyContext.getComponentTasks(componentId);
Iterator<Integer> p_it = possibleTasks.iterator();
Iterator<Integer> t_it = tasks.iterator();
while (p_it.hasNext()) {
Assert.assertTrue(t_it.hasNext());
Assert.assertNull(taskToComponents.put(p_it.next(), componentId));
Assert.assertTrue(allTaskIds.add(t_it.next()));
}
Assert.assertFalse(t_it.hasNext());
}
Assert.assertEquals(taskToComponents, expectedContext.getTaskToComponent());
Assert.assertTrue(taskIds.add(topologyContext.getThisTaskId()));
try {
topologyContext.getHooks();
Assert.fail();
} catch (UnsupportedOperationException e) {
/* expected */
}
try {
topologyContext.getRegisteredMetricByName(null);
Assert.fail();
} catch (UnsupportedOperationException e) {
/* expected */
}
}
}
Aggregations