use of com.twitter.ambrose.model.WorkflowId in project ambrose by twitter.
the class HRavenStatsReadService method getDagNodeNameMap.
/**
* Gets the dag nodes for this workflowId. Returns null if the workflow does not exist.
*
* @param workflowId the id of the workflow
* @return a map of nodeIds to DAGNodes
* @throws IOException
*/
@SuppressWarnings("rawtypes")
@Override
public Map<String, DAGNode> getDagNodeNameMap(String workflowId) throws IOException {
WorkflowId id = WorkflowId.parseString(workflowId);
Flow flow = flowQueueService.getFlowFromQueue(id.getCluster(), id.getTimestamp(), id.getFlowId());
if (flow == null) {
return null;
}
// TODO This may not work nicely with multiple type of jobs
// See: https://github.com/twitter/ambrose/pull/131
Map<String, DAGNode> dagMap = JSONUtil.toObject(flow.getJobGraphJSON(), new TypeReference<Map<String, DAGNode>>() {
});
return dagMap;
}
use of com.twitter.ambrose.model.WorkflowId in project ambrose by twitter.
the class HRavenStatsReadService method getEventsSinceId.
@SuppressWarnings("rawtypes")
@Override
public List<Event> getEventsSinceId(String workflowId, int eventId, int maxEvents) throws IOException {
Preconditions.checkArgument(maxEvents > 0);
WorkflowId id = WorkflowId.parseString(workflowId);
FlowEventKey flowEventKey = new FlowEventKey(toFlowKey(id), eventId);
List<FlowEvent> flowEventList = flowEventService.getFlowEventsSince(flowEventKey);
// TODO push this limit into the FlowEventService
int numElems = 0;
List<Event> workflowEvents = Lists.newArrayListWithCapacity(maxEvents);
for (FlowEvent flowEvent : flowEventList) {
if (numElems >= maxEvents) {
break;
}
String eventDataJson = flowEvent.getEventDataJSON();
try {
Event event = Event.fromJson(eventDataJson);
numElems++;
workflowEvents.add(event);
} catch (JsonMappingException e) {
LOG.error("Could not deserialize json: " + eventDataJson, e);
}
}
return workflowEvents;
}
Aggregations