use of org.apache.storm.task.WorkerTopologyContext in project storm by apache.
the class StormCommon method makeWorkerContext.
@SuppressWarnings("unchecked")
public static WorkerTopologyContext makeWorkerContext(Map<String, Object> workerData) {
try {
StormTopology stormTopology = (StormTopology) workerData.get(Constants.SYSTEM_TOPOLOGY);
Map stormConf = (Map) workerData.get(Constants.STORM_CONF);
Map<Integer, String> taskToComponent = (Map<Integer, String>) workerData.get(Constants.TASK_TO_COMPONENT);
Map<String, List<Integer>> componentToSortedTasks = (Map<String, List<Integer>>) workerData.get(Constants.COMPONENT_TO_SORTED_TASKS);
Map<String, Map<String, Fields>> componentToStreamToFields = (Map<String, Map<String, Fields>>) workerData.get(Constants.COMPONENT_TO_STREAM_TO_FIELDS);
String stormId = (String) workerData.get(Constants.STORM_ID);
Map conf = (Map) workerData.get(Constants.CONF);
Integer port = (Integer) workerData.get(Constants.PORT);
String codeDir = ConfigUtils.supervisorStormResourcesPath(ConfigUtils.supervisorStormDistRoot(conf, stormId));
String pidDir = ConfigUtils.workerPidsRoot(conf, stormId);
List<Integer> workerTasks = (List<Integer>) workerData.get(Constants.TASK_IDS);
Map<String, Object> defaultResources = (Map<String, Object>) workerData.get(Constants.DEFAULT_SHARED_RESOURCES);
Map<String, Object> userResources = (Map<String, Object>) workerData.get(Constants.USER_SHARED_RESOURCES);
return new WorkerTopologyContext(stormTopology, stormConf, taskToComponent, componentToSortedTasks, componentToStreamToFields, stormId, codeDir, pidDir, port, workerTasks, defaultResources, userResources);
} catch (IOException e) {
throw Utils.wrapInRuntime(e);
}
}
use of org.apache.storm.task.WorkerTopologyContext in project storm by apache.
the class WorkerState method workerOutboundTasks.
/**
*
* @return seq of task ids that receive messages from this worker
*/
private Set<Integer> workerOutboundTasks() {
WorkerTopologyContext context = getWorkerTopologyContext();
Set<String> components = new HashSet<>();
for (Integer taskId : taskIds) {
for (Map<String, Grouping> value : context.getTargets(context.getComponentId(taskId)).values()) {
components.addAll(value.keySet());
}
}
Set<Integer> outboundTasks = new HashSet<>();
for (Map.Entry<String, List<Integer>> entry : Utils.reverseMap(taskToComponent).entrySet()) {
if (components.contains(entry.getKey())) {
outboundTasks.addAll(entry.getValue());
}
}
return outboundTasks;
}
use of org.apache.storm.task.WorkerTopologyContext in project storm by apache.
the class ReportErrorTest method testReport.
@Test
public void testReport() {
final String topo = "topology";
final String comp = "component";
final Long port = new Long(8080);
final AtomicLong errorCount = new AtomicLong(0l);
WorkerTopologyContext context = mock(WorkerTopologyContext.class);
when(context.getThisWorkerPort()).thenReturn(port.intValue());
IStormClusterState state = mock(IStormClusterState.class);
doAnswer((invocation) -> errorCount.incrementAndGet()).when(state).reportError(eq(topo), eq(comp), any(String.class), eq(port), any(Throwable.class));
Map<String, Object> conf = new HashMap<>();
conf.put(Config.TOPOLOGY_ERROR_THROTTLE_INTERVAL_SECS, 10);
conf.put(Config.TOPOLOGY_MAX_ERROR_REPORT_PER_INTERVAL, 4);
try (SimulatedTime t = new SimulatedTime()) {
ReportError report = new ReportError(conf, state, topo, comp, context);
report.report(new RuntimeException("ERROR-1"));
assertEquals(1, errorCount.get());
report.report(new RuntimeException("ERROR-2"));
assertEquals(2, errorCount.get());
report.report(new RuntimeException("ERROR-3"));
assertEquals(3, errorCount.get());
report.report(new RuntimeException("ERROR-4"));
assertEquals(4, errorCount.get());
//Too fast not reported
report.report(new RuntimeException("ERROR-5"));
assertEquals(4, errorCount.get());
Time.advanceTime(9000);
report.report(new RuntimeException("ERROR-6"));
assertEquals(4, errorCount.get());
Time.advanceTime(2000);
report.report(new RuntimeException("ERROR-7"));
assertEquals(5, errorCount.get());
}
}
use of org.apache.storm.task.WorkerTopologyContext in project storm by apache.
the class PartialKeyGroupingTest method testChooseTasksFields.
@Test
public void testChooseTasksFields() {
PartialKeyGrouping pkg = new PartialKeyGrouping(new Fields("test"));
WorkerTopologyContext context = mock(WorkerTopologyContext.class);
when(context.getComponentOutputFields(any(GlobalStreamId.class))).thenReturn(new Fields("test"));
pkg.prepare(context, null, Lists.newArrayList(0, 1, 2, 3, 4, 5));
Values message = new Values("key1");
List<Integer> choice1 = pkg.chooseTasks(0, message);
assertThat(choice1.size(), is(1));
List<Integer> choice2 = pkg.chooseTasks(0, message);
assertThat(choice2, is(not(choice1)));
List<Integer> choice3 = pkg.chooseTasks(0, message);
assertThat(choice3, is(not(choice2)));
assertThat(choice3, is(choice1));
}
use of org.apache.storm.task.WorkerTopologyContext in project storm by apache.
the class ShuffleGroupingTest method testShuffleGrouping.
/**
* Tests that we round robbin correctly using ShuffleGrouping implementation.
* */
@Test
public void testShuffleGrouping() {
final int numTasks = 6;
final ShuffleGrouping grouper = new ShuffleGrouping();
// Task Id not used, so just pick a static value
final int inputTaskId = 100;
// Define our taskIds
final List<Integer> availableTaskIds = Lists.newArrayList();
for (int i = 0; i < numTasks; i++) {
availableTaskIds.add(i);
}
WorkerTopologyContext context = mock(WorkerTopologyContext.class);
grouper.prepare(context, null, availableTaskIds);
// Keep track of how many times we see each taskId
int[] taskCounts = new int[numTasks];
for (int i = 1; i <= 30000; i++) {
List<Integer> taskIds = grouper.chooseTasks(inputTaskId, Lists.newArrayList());
// Validate a single task id return
assertNotNull("Not null taskId list returned", taskIds);
assertEquals("Single task Id returned", 1, taskIds.size());
int taskId = taskIds.get(0);
assertTrue("TaskId should exist", taskId >= 0 && taskId < numTasks);
taskCounts[taskId]++;
}
for (int i = 0; i < numTasks; i++) {
assertEquals("Distribution should be even for all nodes", 5000, taskCounts[i]);
}
}
Aggregations