use of com.linkedin.drelephant.analysis.HeuristicResult in project dr-elephant by linkedin.
the class MapperSpeedHeuristic method apply.
public HeuristicResult apply(TezApplicationData data) {
if (!data.getSucceeded()) {
return null;
}
TezTaskData[] tasks = data.getMapTaskData();
List<Long> inputSizes = new ArrayList<Long>();
List<Long> speeds = new ArrayList<Long>();
List<Long> runtimesMs = new ArrayList<Long>();
for (TezTaskData task : tasks) {
if (task.isSampled()) {
long inputBytes = 0;
for (TezCounterData.CounterName counterName : _counterNames) {
inputBytes += task.getCounters().get(counterName);
}
long runtimeMs = task.getTotalRunTimeMs();
inputSizes.add(inputBytes);
runtimesMs.add(runtimeMs);
// Speed is records per second
speeds.add((1000 * inputBytes) / (runtimeMs));
}
}
long medianSpeed;
long medianSize;
long medianRuntimeMs;
if (tasks.length != 0) {
medianSpeed = Statistics.median(speeds);
medianSize = Statistics.median(inputSizes);
medianRuntimeMs = Statistics.median(runtimesMs);
} else {
medianSpeed = 0;
medianSize = 0;
medianRuntimeMs = 0;
}
Severity severity = getDiskSpeedSeverity(medianSpeed);
// This reduces severity if task runtime is insignificant
severity = Severity.min(severity, getRuntimeSeverity(medianRuntimeMs));
HeuristicResult result = new HeuristicResult(_heuristicConfData.getClassName(), _heuristicConfData.getHeuristicName(), severity, Utils.getHeuristicScore(severity, tasks.length));
result.addResultDetail("Number of tasks", Integer.toString(tasks.length));
result.addResultDetail("Median task input ", FileUtils.byteCountToDisplaySize(medianSize));
result.addResultDetail("Median task runtime", Statistics.readableTimespan(medianRuntimeMs));
result.addResultDetail("Median task speed", FileUtils.byteCountToDisplaySize(medianSpeed) + "/s");
return result;
}
use of com.linkedin.drelephant.analysis.HeuristicResult in project dr-elephant by linkedin.
the class MapperSpillHeuristic method apply.
@Override
public HeuristicResult apply(TezApplicationData data) {
if (!data.getSucceeded()) {
return null;
}
TezTaskData[] tasks = data.getMapTaskData();
long totalSpills = 0;
long totalOutputRecords = 0;
double ratioSpills = 0.0;
for (TezTaskData task : tasks) {
if (task.isSampled()) {
totalSpills += task.getCounters().get(TezCounterData.CounterName.SPILLED_RECORDS);
totalOutputRecords += task.getCounters().get(TezCounterData.CounterName.OUTPUT_RECORDS);
}
}
// If both totalSpills and totalOutputRecords are zero then set ratioSpills to zero.
if (totalSpills == 0) {
ratioSpills = 0;
} else {
ratioSpills = (double) totalSpills / (double) totalOutputRecords;
}
Severity severity = getSpillSeverity(ratioSpills);
// Severity is reduced if number of tasks is small
Severity taskSeverity = getNumTasksSeverity(tasks.length);
severity = Severity.min(severity, taskSeverity);
HeuristicResult result = new HeuristicResult(_heuristicConfData.getClassName(), _heuristicConfData.getHeuristicName(), severity, Utils.getHeuristicScore(severity, tasks.length));
result.addResultDetail("Number of tasks", Integer.toString(tasks.length));
result.addResultDetail("Avg spilled records per task", tasks.length == 0 ? "0" : Long.toString(totalSpills / tasks.length));
result.addResultDetail("Avg output records per task", tasks.length == 0 ? "0" : Long.toString(totalOutputRecords / tasks.length));
result.addResultDetail("Ratio of spilled records to output records", Double.toString(ratioSpills));
return result;
}
use of com.linkedin.drelephant.analysis.HeuristicResult in project dr-elephant by linkedin.
the class MapperTimeHeuristic method apply.
public HeuristicResult apply(TezApplicationData data) {
if (!data.getSucceeded()) {
return null;
}
TezTaskData[] tasks = data.getMapTaskData();
List<Long> inputSizes = new ArrayList<Long>();
List<Long> runtimesMs = new ArrayList<Long>();
long taskMinMs = Long.MAX_VALUE;
long taskMaxMs = 0;
for (TezTaskData task : tasks) {
if (task.isSampled()) {
long inputByte = 0;
for (TezCounterData.CounterName counterName : _counterNames) {
inputByte += task.getCounters().get(counterName);
}
inputSizes.add(inputByte);
long taskTime = task.getTotalRunTimeMs();
runtimesMs.add(taskTime);
taskMinMs = Math.min(taskMinMs, taskTime);
taskMaxMs = Math.max(taskMaxMs, taskTime);
}
}
if (taskMinMs == Long.MAX_VALUE) {
taskMinMs = 0;
}
long averageSize = Statistics.average(inputSizes);
long averageTimeMs = Statistics.average(runtimesMs);
Severity shortTaskSeverity = shortTaskSeverity(tasks.length, averageTimeMs);
Severity longTaskSeverity = longTaskSeverity(tasks.length, averageTimeMs);
Severity severity = Severity.max(shortTaskSeverity, longTaskSeverity);
HeuristicResult result = new HeuristicResult(_heuristicConfData.getClassName(), _heuristicConfData.getHeuristicName(), severity, Utils.getHeuristicScore(severity, tasks.length));
result.addResultDetail("Number of tasks", Integer.toString(tasks.length));
result.addResultDetail("Average task input size", FileUtils.byteCountToDisplaySize(averageSize));
result.addResultDetail("Average task runtime", Statistics.readableTimespan(averageTimeMs));
result.addResultDetail("Max task runtime", Statistics.readableTimespan(taskMaxMs));
result.addResultDetail("Min task runtime", Statistics.readableTimespan(taskMinMs));
return result;
}
use of com.linkedin.drelephant.analysis.HeuristicResult in project dr-elephant by linkedin.
the class ReducerTimeHeuristicTest method analyzeJob.
private Severity analyzeJob(long runtimeMs, int numTasks) throws IOException {
MapReduceCounterData dummyCounter = new MapReduceCounterData();
MapReduceTaskData[] reducers = new MapReduceTaskData[numTasks + 1];
int i = 0;
for (; i < numTasks; i++) {
reducers[i] = new MapReduceTaskData("task-id-" + i, "task-attempt-id-" + i);
reducers[i].setTimeAndCounter(new long[] { runtimeMs, 0, 0, 0, 0 }, dummyCounter);
}
// Non-sampled task, which does not contain time and counter data
reducers[i] = new MapReduceTaskData("task-id-" + i, "task-attempt-id-" + i);
MapReduceApplicationData data = new MapReduceApplicationData().setCounters(dummyCounter).setReducerData(reducers);
HeuristicResult result = _heuristic.apply(data);
return result.getSeverity();
}
use of com.linkedin.drelephant.analysis.HeuristicResult in project dr-elephant by linkedin.
the class DistributedCacheLimitHeuristicTest method testHeuristicResultCacheFileLimitViolated.
/**
* File size limit exceeded for file in cache.
*/
@Test
public void testHeuristicResultCacheFileLimitViolated() {
jobConf.setProperty("mapreduce.job.cache.files.filesizes", "100,200,600000000");
jobConf.setProperty("mapreduce.job.cache.archives.filesizes", "400,500,600");
MapReduceApplicationData data = new MapReduceApplicationData().setJobConf(jobConf);
HeuristicResult result = _heuristic.apply(data);
assertTrue("Failed to match on expected severity", result.getSeverity() == Severity.CRITICAL);
}
Aggregations