use of com.linkedin.drelephant.analysis.JobType in project dr-elephant by linkedin.
the class Web method restSearchOptions.
/**
* This returns the rest search options which are filled in the forms for the search page.
* @return Returns the json object which should be filled in the search form.
* return object:
* <pre>
* *{
* "search-options": {
* "jobcategory": [
* {
* "name": "SPARK",
* "jobtypes": [
* {
* "name": "Spark"
* }
* ],
* "heuristics": [
* {
* "name": "Spark Configuration Best Practice"
* },
* {
* "name": "Spark Memory Limit"
* },
* {
* "name": "Spark Stage Runtime"
* },
* {
* "name": "Spark Job Runtime"
* },
* {
* "name": "Spark Executor Load Balance"
* },
* {
* "name": "Spark Event Log Limit"
* }
* ]
* },
* {
* "name": "MAPREDUCE",
* "jobtypes": [
* {
* "name": "Pig"
* },
* {
* "name": "Hive"
* },
* {
* "name": "Cascading"
* },
* {
* "name": "Voldemort"
* },
* {
* "name": "Kafka"
* },
* {
* "name": "HadoopJava"
* }
* ],
* "heuristics": [
* {
* "name": "Mapper Data Skew"
* },
* {
* "name": "Mapper GC"
* },
* {
* "name": "Mapper Time"
* },
* {
* "name": "Mapper Speed"
* },
* {
* "name": "Mapper Spill"
* },
* {
* "name": "Mapper Memory"
* },
* {
* "name": "Reducer Data Skew"
* },
* {
* "name": "Reducer GC"
* },
* {
* "name": "Reducer Time"
* },
* {
* "name": "Reducer Memory"
* },
* {
* "name": "Shuffle & Sort"
* },
* {
* "name": "Exception"
* }
* ]
* }
* ],
* "severities": [
* {
* "name": "Critical",
* "value": 4
* },
* {
* "name": "Severe",
* "value": 3
* },
* {
* "name": "Moderate",
* "value": 2
* },
* {
* "name": "Low",
* "value": 1
* },
* {
* "name": "None",
* "value": 0
* }
* ],
* "id": "search"
* }
*}
* </pre>
*/
public static Result restSearchOptions() {
JsonObject searchOptions = new JsonObject();
JsonArray jobCategory = new JsonArray();
JsonArray severities = new JsonArray();
Map<ApplicationType, List<JobType>> applicationTypeListMap = ElephantContext.instance().getAppTypeToJobTypes();
for (ApplicationType key : applicationTypeListMap.keySet()) {
JsonObject applicationType = new JsonObject();
JsonArray jobTypes = new JsonArray();
JsonArray heuristics = new JsonArray();
for (JobType jobtype : applicationTypeListMap.get(key)) {
JsonObject jobTypeNode = new JsonObject();
jobTypeNode.addProperty(JsonKeys.NAME, jobtype.getName());
jobTypes.add(jobTypeNode);
}
for (Heuristic heuristic : ElephantContext.instance().getHeuristicsForApplicationType(key)) {
JsonObject heuristicNode = new JsonObject();
heuristicNode.addProperty(JsonKeys.NAME, heuristic.getHeuristicConfData().getHeuristicName());
heuristics.add(heuristicNode);
}
applicationType.addProperty(JsonKeys.NAME, key.getName());
applicationType.add(JsonKeys.JOB_TYPES, jobTypes);
applicationType.add(JsonKeys.HEURISTICS, heuristics);
jobCategory.add(applicationType);
}
for (Severity severity : Severity.values()) {
JsonObject severityObject = new JsonObject();
severityObject.addProperty(JsonKeys.NAME, severity.getText());
severityObject.addProperty(JsonKeys.VALUE, severity.getValue());
severities.add(severityObject);
}
searchOptions.add(JsonKeys.JOB_CATEGORY, jobCategory);
searchOptions.add(JsonKeys.SEVERITIES, severities);
searchOptions.addProperty(JsonKeys.ID, "search");
JsonObject parent = new JsonObject();
parent.add(JsonKeys.SEARCH_OPTS, searchOptions);
return ok(new Gson().toJson(parent));
}
use of com.linkedin.drelephant.analysis.JobType in project dr-elephant by linkedin.
the class ElephantContext method configureSupportedApplicationTypes.
/**
* Decides what application types can be supported.
*
* An application type is supported if all the below are true.
* 1. A Fetcher is defined in FetcherConf.xml for the application type.
* 2. At least one Heuristic is configured in HeuristicConf.xml for the application type.
* 3. At least one job type is configured in JobTypeConf.xml for the application type.
*/
private void configureSupportedApplicationTypes() {
Set<ApplicationType> supportedTypes = Sets.intersection(_typeToFetcher.keySet(), _typeToHeuristics.keySet());
supportedTypes = Sets.intersection(supportedTypes, _appTypeToJobTypes.keySet());
supportedTypes = Sets.intersection(supportedTypes, _typeToAggregator.keySet());
_typeToAggregator.keySet().retainAll(supportedTypes);
_typeToFetcher.keySet().retainAll(supportedTypes);
_typeToHeuristics.keySet().retainAll(supportedTypes);
_appTypeToJobTypes.keySet().retainAll(supportedTypes);
logger.info("Configuring ElephantContext...");
for (ApplicationType type : supportedTypes) {
_nameToType.put(type.getName(), type);
List<String> classes = new ArrayList<String>();
List<Heuristic> heuristics = _typeToHeuristics.get(type);
for (Heuristic heuristic : heuristics) {
classes.add(heuristic.getClass().getName());
}
List<JobType> jobTypes = _appTypeToJobTypes.get(type);
logger.info("Supports " + type.getName() + " application type, using " + _typeToFetcher.get(type).toString() + " fetcher class with Heuristics [" + StringUtils.join(classes, ", ") + "] and following JobTypes [" + StringUtils.join(jobTypes, ", ") + "].");
}
}
use of com.linkedin.drelephant.analysis.JobType in project dr-elephant by linkedin.
the class ElephantContext method matchJobType.
/**
* Get the matched job type given a
*
* @param data The HadoopApplicationData to check
* @return The matched job type
*/
public JobType matchJobType(HadoopApplicationData data) {
if (data != null) {
List<JobType> jobTypeList = _appTypeToJobTypes.get(data.getApplicationType());
Properties jobProp = data.getConf();
for (JobType type : jobTypeList) {
if (type.matchType(jobProp)) {
return type;
}
}
}
return null;
}
use of com.linkedin.drelephant.analysis.JobType in project dr-elephant by linkedin.
the class JobTypeConfiguration method parseJobTypeConfiguration.
private void parseJobTypeConfiguration(Element configuration) {
Map<ApplicationType, JobType> defaultMap = new HashMap<ApplicationType, JobType>();
NodeList nodes = configuration.getChildNodes();
int n = 0;
for (int i = 0; i < nodes.getLength(); i++) {
Node node = nodes.item(i);
if (node.getNodeType() == Node.ELEMENT_NODE) {
n++;
Element jobTypeNode = (Element) node;
String jobTypeName;
Node jobTypeNameNode = jobTypeNode.getElementsByTagName("name").item(0);
if (jobTypeNameNode == null) {
throw new RuntimeException("No tag 'jobtype' in jobtype " + n);
}
jobTypeName = jobTypeNameNode.getTextContent();
if (jobTypeName.equals("")) {
throw new RuntimeException("Empty tag 'jobtype' in jobtype " + n);
}
// Truncate jobtype length for db constraint
if (jobTypeName.length() > TYPE_LEN_LIMIT) {
logger.info("Truncate type " + jobTypeName.length());
jobTypeName = jobTypeName.substring(0, TYPE_LEN_LIMIT);
}
String jobConfName;
Node jobConfNameNode = jobTypeNode.getElementsByTagName("conf").item(0);
if (jobConfNameNode == null) {
throw new RuntimeException("No tag 'conf' in jobtype " + jobTypeName);
}
jobConfName = jobConfNameNode.getTextContent();
if (jobConfName.equals("")) {
throw new RuntimeException("Empty tag 'conf' in jobtype " + jobTypeName);
}
String jobConfValue;
Node jobConfValueNode = jobTypeNode.getElementsByTagName("value").item(0);
if (jobConfValueNode == null) {
// Default regex. match any char one or more times
jobConfValue = ".*";
} else {
jobConfValue = jobConfValueNode.getTextContent();
if (jobConfValue.equals("")) {
jobConfValue = ".*";
}
}
String appTypeName;
Node appTypeNameNode = jobTypeNode.getElementsByTagName("applicationtype").item(0);
if (appTypeNameNode == null) {
throw new RuntimeException("No tag 'applicationtype' in jobtype " + jobTypeName);
}
appTypeName = appTypeNameNode.getTextContent();
ApplicationType appType = new ApplicationType(appTypeName);
boolean isDefault = jobTypeNode.getElementsByTagName("isDefault").item(0) != null;
JobType newJobType = null;
try {
newJobType = new JobType(jobTypeName, jobConfName, jobConfValue);
} catch (PatternSyntaxException e) {
throw new RuntimeException("Error processing this pattern. Pattern:" + jobConfValue + " jobtype:" + jobTypeName);
}
String newJobTypeStr = String.format("jobType:%s, for application type:%s, isDefault:%s, confName:%s, confValue:%s.", jobTypeName, appTypeName, isDefault, jobConfName, jobConfValue);
logger.info("Loaded " + newJobTypeStr);
if (isDefault) {
if (defaultMap.containsKey(appType)) {
throw new RuntimeException("Each application type should have one and only one default job type. Duplicate default job type: " + newJobTypeStr + " for application type: " + appType.getName());
} else {
defaultMap.put(appType, newJobType);
}
} else {
List<JobType> jobTypes = getJobTypeList(appType);
jobTypes.add(newJobType);
}
}
}
// Append default maps to the end of each job type list
for (Map.Entry<ApplicationType, JobType> entry : defaultMap.entrySet()) {
ApplicationType appType = entry.getKey();
JobType jobType = entry.getValue();
List<JobType> jobTypes = getJobTypeList(appType);
jobTypes.add(jobType);
}
// Sanity check
for (ApplicationType appType : _appTypeToJobTypeList.keySet()) {
if (!defaultMap.containsKey(appType)) {
throw new RuntimeException("Each application type should have one and only one default job type, there is" + " none for application type: " + appType.getName() + ". Use <isDefault/> to tag one.");
}
}
Integer jobTypesSize = 0;
for (List<JobType> jobTypes : _appTypeToJobTypeList.values()) {
jobTypesSize += jobTypes.size();
}
logger.info("Loaded total " + jobTypesSize + " job types for " + _appTypeToJobTypeList.size() + " app types");
}
Aggregations