Search in sources :

Example 26 with MapReduceTaskData

use of com.linkedin.drelephant.mapreduce.data.MapReduceTaskData in project dr-elephant by linkedin.

the class GenericSkewHeuristic method apply.

@Override
public HeuristicResult apply(MapReduceApplicationData data) {
    if (!data.getSucceeded()) {
        return null;
    }
    MapReduceTaskData[] tasks = getTasks(data);
    // Gathering data for checking time skew
    List<Long> timeTaken = new ArrayList<Long>();
    for (int i = 0; i < tasks.length; i++) {
        if (tasks[i].isTimeDataPresent()) {
            timeTaken.add(tasks[i].getTotalRunTimeMs());
        }
    }
    long[][] groupsTime = Statistics.findTwoGroups(Longs.toArray(timeTaken));
    long timeAvg1 = Statistics.average(groupsTime[0]);
    long timeAvg2 = Statistics.average(groupsTime[1]);
    // seconds are used for calculating deviation as they provide a better idea than millisecond.
    long timeAvgSec1 = TimeUnit.MILLISECONDS.toSeconds(timeAvg1);
    long timeAvgSec2 = TimeUnit.MILLISECONDS.toSeconds(timeAvg2);
    long minTime = Math.min(timeAvgSec1, timeAvgSec2);
    long diffTime = Math.abs(timeAvgSec1 - timeAvgSec2);
    // using the same deviation limits for time skew as for data skew. It can be changed in the fututre.
    Severity severityTime = getDeviationSeverity(minTime, diffTime);
    // This reduces severity if number of tasks is insignificant
    severityTime = Severity.min(severityTime, Severity.getSeverityAscending(groupsTime[0].length, numTasksLimits[0], numTasksLimits[1], numTasksLimits[2], numTasksLimits[3]));
    // Gather data
    List<Long> inputBytes = new ArrayList<Long>();
    for (int i = 0; i < tasks.length; i++) {
        if (tasks[i].isCounterDataPresent()) {
            long inputByte = 0;
            for (MapReduceCounterData.CounterName counterName : _counterNames) {
                inputByte += tasks[i].getCounters().get(counterName);
            }
            inputBytes.add(inputByte);
        }
    }
    // Ratio of total tasks / sampled tasks
    double scale = ((double) tasks.length) / inputBytes.size();
    // Analyze data. TODO: This is a temp fix. findTwogroups should support list as input
    long[][] groups = Statistics.findTwoGroups(Longs.toArray(inputBytes));
    long avg1 = Statistics.average(groups[0]);
    long avg2 = Statistics.average(groups[1]);
    long min = Math.min(avg1, avg2);
    long diff = Math.abs(avg2 - avg1);
    Severity severityData = getDeviationSeverity(min, diff);
    // This reduces severity if the largest file sizes are insignificant
    severityData = Severity.min(severityData, getFilesSeverity(avg2));
    // This reduces severity if number of tasks is insignificant
    severityData = Severity.min(severityData, Severity.getSeverityAscending(groups[0].length, numTasksLimits[0], numTasksLimits[1], numTasksLimits[2], numTasksLimits[3]));
    Severity severity = Severity.max(severityData, severityTime);
    HeuristicResult result = new HeuristicResult(_heuristicConfData.getClassName(), _heuristicConfData.getHeuristicName(), severity, Utils.getHeuristicScore(severityData, tasks.length));
    result.addResultDetail("Data skew (Number of tasks)", Integer.toString(tasks.length));
    result.addResultDetail("Data skew (Group A)", groups[0].length + " tasks @ " + FileUtils.byteCountToDisplaySize(avg1) + " avg");
    result.addResultDetail("Data skew (Group B)", groups[1].length + " tasks @ " + FileUtils.byteCountToDisplaySize(avg2) + " avg");
    result.addResultDetail("Time skew (Number of tasks)", Integer.toString(tasks.length));
    result.addResultDetail("Time skew (Group A)", groupsTime[0].length + " tasks @ " + convertTimeMs(timeAvg1) + " avg");
    result.addResultDetail("Time skew (Group B)", groupsTime[1].length + " tasks @ " + convertTimeMs(timeAvg2) + " avg");
    return result;
}
Also used : MapReduceCounterData(com.linkedin.drelephant.mapreduce.data.MapReduceCounterData) ArrayList(java.util.ArrayList) Severity(com.linkedin.drelephant.analysis.Severity) MapReduceTaskData(com.linkedin.drelephant.mapreduce.data.MapReduceTaskData) HeuristicResult(com.linkedin.drelephant.analysis.HeuristicResult)

Example 27 with MapReduceTaskData

use of com.linkedin.drelephant.mapreduce.data.MapReduceTaskData in project dr-elephant by linkedin.

the class MapperSpeedHeuristic method apply.

@Override
public HeuristicResult apply(MapReduceApplicationData data) {
    if (!data.getSucceeded()) {
        return null;
    }
    long totalInputByteSize = 0;
    MapReduceTaskData[] tasks = data.getMapperData();
    List<Long> inputByteSizes = new ArrayList<Long>();
    List<Long> speeds = new ArrayList<Long>();
    List<Long> runtimesMs = new ArrayList<Long>();
    for (MapReduceTaskData task : tasks) {
        if (task.isTimeAndCounterDataPresent()) {
            long inputBytes = 0;
            for (MapReduceCounterData.CounterName counterName : _counterNames) {
                inputBytes += task.getCounters().get(counterName);
            }
            long runtimeMs = task.getTotalRunTimeMs();
            inputByteSizes.add(inputBytes);
            totalInputByteSize += inputBytes;
            runtimesMs.add(runtimeMs);
            // Speed is bytes per second
            speeds.add((1000 * inputBytes) / (runtimeMs));
        }
    }
    long medianSpeed;
    long medianSize;
    long medianRuntimeMs;
    if (tasks.length != 0) {
        medianSpeed = Statistics.median(speeds);
        medianSize = Statistics.median(inputByteSizes);
        medianRuntimeMs = Statistics.median(runtimesMs);
    } else {
        medianSpeed = 0;
        medianSize = 0;
        medianRuntimeMs = 0;
    }
    Severity severity = getDiskSpeedSeverity(medianSpeed);
    // This reduces severity if task runtime is insignificant
    severity = Severity.min(severity, getRuntimeSeverity(medianRuntimeMs));
    HeuristicResult result = new HeuristicResult(_heuristicConfData.getClassName(), _heuristicConfData.getHeuristicName(), severity, Utils.getHeuristicScore(severity, tasks.length));
    result.addResultDetail("Number of tasks", Integer.toString(tasks.length));
    result.addResultDetail("Median task input size", FileUtils.byteCountToDisplaySize(medianSize));
    result.addResultDetail("Median task runtime", Statistics.readableTimespan(medianRuntimeMs));
    result.addResultDetail("Median task speed", FileUtils.byteCountToDisplaySize(medianSpeed) + "/s");
    result.addResultDetail(CommonConstantsHeuristic.TOTAL_INPUT_SIZE_IN_MB, totalInputByteSize * 1.0 / (FileUtils.ONE_MB) + "");
    return result;
}
Also used : MapReduceCounterData(com.linkedin.drelephant.mapreduce.data.MapReduceCounterData) MapReduceTaskData(com.linkedin.drelephant.mapreduce.data.MapReduceTaskData) ArrayList(java.util.ArrayList) Severity(com.linkedin.drelephant.analysis.Severity) HeuristicResult(com.linkedin.drelephant.analysis.HeuristicResult)

Example 28 with MapReduceTaskData

use of com.linkedin.drelephant.mapreduce.data.MapReduceTaskData in project dr-elephant by linkedin.

the class MapperSpillHeuristic method apply.

@Override
public HeuristicResult apply(MapReduceApplicationData data) {
    if (!data.getSucceeded()) {
        return null;
    }
    MapReduceTaskData[] tasks = data.getMapperData();
    long totalSpills = 0;
    long totalOutputRecords = 0;
    double ratioSpills = 0.0;
    for (MapReduceTaskData task : tasks) {
        if (task.isCounterDataPresent()) {
            totalSpills += task.getCounters().get(MapReduceCounterData.CounterName.SPILLED_RECORDS);
            totalOutputRecords += task.getCounters().get(MapReduceCounterData.CounterName.MAP_OUTPUT_RECORDS);
        }
    }
    // If both totalSpills and totalOutputRecords are zero then set ratioSpills to zero.
    if (totalSpills == 0) {
        ratioSpills = 0;
    } else {
        ratioSpills = (double) totalSpills / (double) totalOutputRecords;
    }
    Severity severity = getSpillSeverity(ratioSpills);
    // Severity is reduced if number of tasks is small
    Severity taskSeverity = getNumTasksSeverity(tasks.length);
    severity = Severity.min(severity, taskSeverity);
    HeuristicResult result = new HeuristicResult(_heuristicConfData.getClassName(), _heuristicConfData.getHeuristicName(), severity, Utils.getHeuristicScore(severity, tasks.length));
    result.addResultDetail("Number of tasks", Integer.toString(tasks.length));
    result.addResultDetail("Avg spilled records per task", tasks.length == 0 ? "0" : Long.toString(totalSpills / tasks.length));
    result.addResultDetail("Avg output records per task", tasks.length == 0 ? "0" : Long.toString(totalOutputRecords / tasks.length));
    result.addResultDetail("Ratio of spilled records to output records", Double.toString(ratioSpills));
    return result;
}
Also used : MapReduceTaskData(com.linkedin.drelephant.mapreduce.data.MapReduceTaskData) Severity(com.linkedin.drelephant.analysis.Severity) HeuristicResult(com.linkedin.drelephant.analysis.HeuristicResult)

Example 29 with MapReduceTaskData

use of com.linkedin.drelephant.mapreduce.data.MapReduceTaskData in project dr-elephant by linkedin.

the class MapperTimeHeuristic method apply.

@Override
public HeuristicResult apply(MapReduceApplicationData data) {
    if (!data.getSucceeded()) {
        return null;
    }
    MapReduceTaskData[] tasks = data.getMapperData();
    List<Long> inputBytes = new ArrayList<Long>();
    List<Long> runtimesMs = new ArrayList<Long>();
    long taskMinMs = Long.MAX_VALUE;
    long taskMaxMs = 0;
    for (MapReduceTaskData task : tasks) {
        if (task.isTimeAndCounterDataPresent()) {
            long inputByte = 0;
            for (MapReduceCounterData.CounterName counterName : _counterNames) {
                inputByte += task.getCounters().get(counterName);
            }
            inputBytes.add(inputByte);
            long taskTime = task.getTotalRunTimeMs();
            runtimesMs.add(taskTime);
            taskMinMs = Math.min(taskMinMs, taskTime);
            taskMaxMs = Math.max(taskMaxMs, taskTime);
        }
    }
    if (taskMinMs == Long.MAX_VALUE) {
        taskMinMs = 0;
    }
    long averageSize = Statistics.average(inputBytes);
    long averageTimeMs = Statistics.average(runtimesMs);
    Severity shortTaskSeverity = shortTaskSeverity(tasks.length, averageTimeMs);
    Severity longTaskSeverity = longTaskSeverity(tasks.length, averageTimeMs);
    Severity severity = Severity.max(shortTaskSeverity, longTaskSeverity);
    HeuristicResult result = new HeuristicResult(_heuristicConfData.getClassName(), _heuristicConfData.getHeuristicName(), severity, Utils.getHeuristicScore(severity, tasks.length));
    result.addResultDetail("Number of tasks", Integer.toString(tasks.length));
    result.addResultDetail("Average task input size", FileUtils.byteCountToDisplaySize(averageSize));
    result.addResultDetail("Average task runtime", Statistics.readableTimespan(averageTimeMs));
    result.addResultDetail("Max task runtime", Statistics.readableTimespan(taskMaxMs));
    result.addResultDetail("Min task runtime", Statistics.readableTimespan(taskMinMs));
    return result;
}
Also used : MapReduceCounterData(com.linkedin.drelephant.mapreduce.data.MapReduceCounterData) MapReduceTaskData(com.linkedin.drelephant.mapreduce.data.MapReduceTaskData) ArrayList(java.util.ArrayList) Severity(com.linkedin.drelephant.analysis.Severity) HeuristicResult(com.linkedin.drelephant.analysis.HeuristicResult)

Example 30 with MapReduceTaskData

use of com.linkedin.drelephant.mapreduce.data.MapReduceTaskData in project dr-elephant by linkedin.

the class TaskLevelAggregatedMetrics method compute.

/**
 * Computes the aggregated metrics -> peakMemory, delay, total task duration, wasted resources and memory usage.
 * Aggregated metrics are expected to be approximation when sampling is enabled.
 * @param taskDatas
 * @param containerSize
 * @param idealStartTime
 */
private void compute(MapReduceTaskData[] taskDatas, long containerSize, long idealStartTime) {
    long peakMemoryNeed = 0;
    long taskFinishTimeMax = 0;
    long taskDurationMax = 0;
    // if there are zero tasks, then nothing to compute.
    if (taskDatas == null || taskDatas.length == 0) {
        return;
    }
    for (MapReduceTaskData taskData : taskDatas) {
        if (!taskData.isTimeAndCounterDataPresent()) {
            continue;
        }
        // MB
        long taskMemory = taskData.getCounters().get(MapReduceCounterData.CounterName.PHYSICAL_MEMORY_BYTES) / FileUtils.ONE_MB;
        // MB
        long taskVM = taskData.getCounters().get(MapReduceCounterData.CounterName.VIRTUAL_MEMORY_BYTES) / FileUtils.ONE_MB;
        // Milliseconds
        long taskDuration = taskData.getFinishTimeMs() - taskData.getStartTimeMs();
        // MB Seconds
        long taskCost = (containerSize) * (taskDuration / Statistics.SECOND_IN_MS);
        durations.add(taskDuration);
        finishTimes.add(taskData.getFinishTimeMs());
        // peak Memory usage
        long memoryRequiredForVM = (long) (taskVM / CLUSTER_MEMORY_FACTOR);
        long biggerMemoryRequirement = memoryRequiredForVM > taskMemory ? memoryRequiredForVM : taskMemory;
        peakMemoryNeed = biggerMemoryRequirement > peakMemoryNeed ? biggerMemoryRequirement : peakMemoryNeed;
        if (taskFinishTimeMax < taskData.getFinishTimeMs()) {
            taskFinishTimeMax = taskData.getFinishTimeMs();
        }
        if (taskDurationMax < taskDuration) {
            taskDurationMax = taskDuration;
        }
        _resourceUsed += taskCost;
    }
    // Compute the delay in starting the task.
    _delay = taskFinishTimeMax - (idealStartTime + taskDurationMax);
    // invalid delay
    if (_delay < 0) {
        _delay = 0;
    }
    // wastedResources
    // give a 50% buffer
    long wastedMemory = containerSize - (long) (peakMemoryNeed * MEMORY_BUFFER);
    if (wastedMemory > 0) {
        for (long duration : durations) {
            // MB Seconds
            _resourceWasted += (wastedMemory) * (duration / Statistics.SECOND_IN_MS);
        }
    }
}
Also used : MapReduceTaskData(com.linkedin.drelephant.mapreduce.data.MapReduceTaskData)

Aggregations

MapReduceTaskData (com.linkedin.drelephant.mapreduce.data.MapReduceTaskData)30 HeuristicResult (com.linkedin.drelephant.analysis.HeuristicResult)23 MapReduceCounterData (com.linkedin.drelephant.mapreduce.data.MapReduceCounterData)20 MapReduceApplicationData (com.linkedin.drelephant.mapreduce.data.MapReduceApplicationData)17 ArrayList (java.util.ArrayList)11 Severity (com.linkedin.drelephant.analysis.Severity)9 Properties (java.util.Properties)7 IOException (java.io.IOException)4 JobHistoryParser (org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser)3 Test (org.junit.Test)3 FetcherConfiguration (com.linkedin.drelephant.configurations.fetcher.FetcherConfiguration)1 MapperSkewHeuristic (com.linkedin.drelephant.mapreduce.heuristics.MapperSkewHeuristic)1 MalformedURLException (java.net.MalformedURLException)1 URL (java.net.URL)1 List (java.util.List)1 Map (java.util.Map)1 Expectations (mockit.Expectations)1 AppResult (models.AppResult)1 Configuration (org.apache.hadoop.conf.Configuration)1 Path (org.apache.hadoop.fs.Path)1