Search in sources :

Example 16 with OperatorProfile

use of org.apache.drill.exec.proto.UserBitShared.OperatorProfile in project drill by axbaretto.

the class TestDrillFileSystem method testIOStats.

@Test
public void testIOStats() throws Exception {
    DrillFileSystem dfs = null;
    InputStream is = null;
    Configuration conf = new Configuration();
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, FileSystem.DEFAULT_FS);
    OpProfileDef profileDef = new OpProfileDef(0, /*operatorId*/
    0, /*operatorType*/
    0);
    OperatorStats stats = new OperatorStats(profileDef, null);
    // start wait time method in OperatorStats expects the OperatorStats state to be in "processing"
    stats.startProcessing();
    try {
        dfs = new DrillFileSystem(conf, stats);
        is = dfs.open(new Path(tempFilePath));
        byte[] buf = new byte[8000];
        while (is.read(buf, 0, buf.length) != -1) {
        }
    } finally {
        stats.stopProcessing();
        if (is != null) {
            is.close();
        }
        if (dfs != null) {
            dfs.close();
        }
    }
    OperatorProfile operatorProfile = stats.getProfile();
    assertTrue("Expected wait time is non-zero, but got zero wait time", operatorProfile.getWaitNanos() > 0);
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) OpProfileDef(org.apache.drill.exec.ops.OpProfileDef) InputStream(java.io.InputStream) OperatorProfile(org.apache.drill.exec.proto.UserBitShared.OperatorProfile) OperatorStats(org.apache.drill.exec.ops.OperatorStats) Test(org.junit.Test)

Example 17 with OperatorProfile

use of org.apache.drill.exec.proto.UserBitShared.OperatorProfile in project drill by apache.

the class TestDrillFileSystem method testIOStats.

@Test
public void testIOStats() throws Exception {
    DrillFileSystem dfs = null;
    InputStream is = null;
    Configuration conf = new Configuration();
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, FileSystem.DEFAULT_FS);
    OpProfileDef profileDef = new OpProfileDef(0, /*operatorId*/
    "", /*operatorType*/
    0);
    OperatorStats stats = new OperatorStats(profileDef, null);
    // start wait time method in OperatorStats expects the OperatorStats state to be in "processing"
    stats.startProcessing();
    try {
        dfs = new DrillFileSystem(conf, stats);
        is = dfs.open(new Path(tempFilePath));
        byte[] buf = new byte[8000];
        while (is.read(buf, 0, buf.length) != -1) {
        }
    } finally {
        stats.stopProcessing();
        if (is != null) {
            is.close();
        }
        if (dfs != null) {
            dfs.close();
        }
    }
    OperatorProfile operatorProfile = stats.getProfile();
    assertTrue("Expected wait time is non-zero, but got zero wait time", operatorProfile.getWaitNanos() > 0);
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) OpProfileDef(org.apache.drill.exec.ops.OpProfileDef) InputStream(java.io.InputStream) OperatorProfile(org.apache.drill.exec.proto.UserBitShared.OperatorProfile) OperatorStats(org.apache.drill.exec.ops.OperatorStats) Test(org.junit.Test) BaseTest(org.apache.drill.test.BaseTest)

Example 18 with OperatorProfile

use of org.apache.drill.exec.proto.UserBitShared.OperatorProfile in project drill by apache.

the class OperatorWrapper method getContent.

public String getContent() {
    TableBuilder builder = new TableBuilder(OPERATOR_COLUMNS, OPERATOR_COLUMNS_TOOLTIP, true);
    // Reusing for different fragments
    Map<String, String> attributeMap = new HashMap<>();
    for (ImmutablePair<ImmutablePair<OperatorProfile, Integer>, String> ip : opsAndHosts) {
        int minor = ip.getLeft().getRight();
        OperatorProfile op = ip.getLeft().getLeft();
        // Overwrite values from previous fragments
        attributeMap.put(HtmlAttribute.DATA_ORDER, String.valueOf(minor));
        String path = new OperatorPathBuilder().setMajor(major).setMinor(minor).setOperator(op).build();
        builder.appendCell(path, attributeMap);
        builder.appendCell(ip.getRight());
        builder.appendNanos(op.getSetupNanos());
        builder.appendNanos(op.getProcessNanos());
        builder.appendNanos(op.getWaitNanos());
        long maxBatches = Long.MIN_VALUE;
        long maxRecords = Long.MIN_VALUE;
        for (StreamProfile sp : op.getInputProfileList()) {
            maxBatches = Math.max(sp.getBatches(), maxBatches);
            maxRecords = Math.max(sp.getRecords(), maxRecords);
        }
        builder.appendFormattedInteger(maxBatches);
        builder.appendFormattedInteger(maxRecords);
        builder.appendBytes(op.getPeakLocalMemoryAllocated());
    }
    return builder.build();
}
Also used : ImmutablePair(org.apache.commons.lang3.tuple.ImmutablePair) HashMap(java.util.HashMap) OperatorProfile(org.apache.drill.exec.proto.UserBitShared.OperatorProfile) StreamProfile(org.apache.drill.exec.proto.UserBitShared.StreamProfile)

Example 19 with OperatorProfile

use of org.apache.drill.exec.proto.UserBitShared.OperatorProfile in project drill by apache.

the class OperatorWrapper method addSummary.

public void addSummary(TableBuilder tb, Map<String, Long> majorFragmentBusyTally, long majorFragmentBusyTallyTotal) {
    // Select background color from palette
    String opTblBgColor = OPERATOR_OVERVIEW_BGCOLOR_PALETTE[major % OPERATOR_OVERVIEW_BGCOLOR_PALETTE.length];
    String path = new OperatorPathBuilder().setMajor(major).setOperator(firstProfile).build();
    tb.appendCell(path, opTblBgColor, null);
    tb.appendCell(operatorName);
    // Check if spill information is available
    int spillCycleMetricIndex = getSpillCycleMetricIndex(operatorType);
    boolean isSpillableOp = (spillCycleMetricIndex != NO_SPILL_METRIC_INDEX);
    boolean hasSpilledToDisk = false;
    boolean isScanOp = operatorName.endsWith("SCAN");
    // Get MajorFragment Busy+Wait Time Tally
    long majorBusyNanos = majorFragmentBusyTally.get(new OperatorPathBuilder().setMajor(major).build());
    double setupSum = 0.0;
    double processSum = 0.0;
    double waitSum = 0.0;
    double memSum = 0.0;
    double spillCycleSum = 0.0;
    long spillCycleMax = 0L;
    long recordSum = 0L;
    // Construct list for sorting purposes (using legacy Comparators)
    final List<ImmutablePair<OperatorProfile, Integer>> opList = new ArrayList<>();
    for (ImmutablePair<ImmutablePair<OperatorProfile, Integer>, String> ip : opsAndHosts) {
        OperatorProfile profile = ip.getLeft().getLeft();
        setupSum += profile.getSetupNanos();
        processSum += profile.getProcessNanos();
        waitSum += profile.getWaitNanos();
        memSum += profile.getPeakLocalMemoryAllocated();
        for (final StreamProfile sp : profile.getInputProfileList()) {
            recordSum += sp.getRecords();
        }
        opList.add(ip.getLeft());
        // Check to ensure index < #metrics (old profiles have less metrics); else reset isSpillableOp
        if (isSpillableOp) {
            // Forced to iterate list
            for (MetricValue metricVal : profile.getMetricList()) {
                if (metricVal.getMetricId() == spillCycleMetricIndex) {
                    long spillCycles = metricVal.getLongValue();
                    spillCycleMax = Math.max(spillCycles, spillCycleMax);
                    spillCycleSum += spillCycles;
                    hasSpilledToDisk = (spillCycleSum > 0.0);
                }
            }
        }
    }
    final ImmutablePair<OperatorProfile, Integer> longSetup = Collections.max(opList, Comparators.setupTime);
    tb.appendNanos(Math.round(setupSum / size));
    tb.appendNanos(longSetup.getLeft().getSetupNanos());
    Map<String, String> timeSkewMap = null;
    final ImmutablePair<OperatorProfile, Integer> longProcess = Collections.max(opList, Comparators.processTime);
    // Calculating average processing time
    long avgProcTime = Math.round(processSum / size);
    tb.appendNanos(avgProcTime);
    long maxProcTime = longProcess.getLeft().getProcessNanos();
    // Calculating skew of longest processing fragment w.r.t. average
    double maxSkew = (avgProcTime > 0) ? maxProcTime / (double) avgProcTime : 0.0d;
    // Marking skew if both thresholds are crossed
    if (avgProcTime > TimeUnit.SECONDS.toNanos(timeSkewMin) && maxSkew > timeSkewRatio) {
        timeSkewMap = new HashMap<>();
        timeSkewMap.put(HtmlAttribute.CLASS, HtmlAttribute.CLASS_VALUE_TIME_SKEW_TAG);
        timeSkewMap.put(HtmlAttribute.TITLE, "One fragment took " + DECIMAL_FORMATTER.format(maxSkew) + " longer than average");
        timeSkewMap.put(HtmlAttribute.STYLE, HtmlAttribute.STYLE_VALUE_CURSOR_HELP);
    }
    tb.appendNanos(maxProcTime, timeSkewMap);
    final ImmutablePair<OperatorProfile, Integer> shortWait = Collections.min(opList, Comparators.waitTime);
    final ImmutablePair<OperatorProfile, Integer> longWait = Collections.max(opList, Comparators.waitTime);
    tb.appendNanos(shortWait.getLeft().getWaitNanos());
    // Calculating average wait time for fragment
    long avgWaitTime = Math.round(waitSum / size);
    // Slow Scan Warning
    Map<String, String> slowScanMap = null;
    // Marking slow scan if threshold is crossed and wait was longer than processing
    if (isScanOp && (avgWaitTime > TimeUnit.SECONDS.toNanos(scanWaitMin)) && (avgWaitTime > avgProcTime)) {
        slowScanMap = new HashMap<>();
        slowScanMap.put(HtmlAttribute.CLASS, HtmlAttribute.CLASS_VALUE_SCAN_WAIT_TAG);
        slowScanMap.put(HtmlAttribute.TITLE, "Avg Wait Time &gt; Avg Processing Time");
        slowScanMap.put(HtmlAttribute.STYLE, HtmlAttribute.STYLE_VALUE_CURSOR_HELP);
    }
    tb.appendNanos(avgWaitTime, slowScanMap);
    long maxWaitTime = longWait.getLeft().getWaitNanos();
    // Skewed Wait Warning
    // Resetting
    timeSkewMap = null;
    // Calculating skew of longest waiting fragment w.r.t. average
    maxSkew = (avgWaitTime > 0) ? maxWaitTime / (double) avgWaitTime : 0.0d;
    // Marking skew if both thresholds are crossed
    if (avgWaitTime > TimeUnit.SECONDS.toNanos(timeSkewMin) && maxSkew > waitSkewRatio) {
        timeSkewMap = new HashMap<>();
        timeSkewMap.put(HtmlAttribute.CLASS, HtmlAttribute.CLASS_VALUE_TIME_SKEW_TAG);
        timeSkewMap.put(HtmlAttribute.TITLE, "One fragment waited " + DECIMAL_FORMATTER.format(maxSkew) + " longer than average");
        timeSkewMap.put(HtmlAttribute.STYLE, HtmlAttribute.STYLE_VALUE_CURSOR_HELP);
    }
    tb.appendNanos(maxWaitTime, timeSkewMap);
    tb.appendPercent(processSum / majorBusyNanos);
    tb.appendPercent(processSum / majorFragmentBusyTallyTotal);
    Map<String, String> estRowcountMap = new HashMap<>();
    estRowcountMap.put(HtmlAttribute.CLASS, HtmlAttribute.CLASS_VALUE_EST_ROWS_ANCHOR);
    estRowcountMap.put(HtmlAttribute.KEY, path.replaceAll("-xx-", "-"));
    tb.appendFormattedInteger(recordSum, estRowcountMap);
    final ImmutablePair<OperatorProfile, Integer> peakMem = Collections.max(opList, Comparators.operatorPeakMemory);
    // Inject spill-to-disk attributes
    Map<String, String> avgSpillMap = null;
    Map<String, String> maxSpillMap = null;
    if (hasSpilledToDisk) {
        avgSpillMap = new HashMap<>();
        // Average SpillCycle
        double avgSpillCycle = spillCycleSum / size;
        avgSpillMap.put(HtmlAttribute.TITLE, DECIMAL_FORMATTER.format(avgSpillCycle) + " spills on average");
        avgSpillMap.put(HtmlAttribute.STYLE, HtmlAttribute.STYLE_VALUE_CURSOR_HELP);
        // JScript will inject Icon
        avgSpillMap.put(HtmlAttribute.CLASS, HtmlAttribute.CLASS_VALUE_SPILL_TAG);
        // JScript will inject Count
        avgSpillMap.put(HtmlAttribute.SPILLS, DECIMAL_FORMATTER.format(avgSpillCycle));
        maxSpillMap = new HashMap<>();
        maxSpillMap.put(HtmlAttribute.TITLE, "Most # spills: " + spillCycleMax);
        maxSpillMap.put(HtmlAttribute.STYLE, HtmlAttribute.STYLE_VALUE_CURSOR_HELP);
        // JScript will inject Icon
        maxSpillMap.put(HtmlAttribute.CLASS, HtmlAttribute.CLASS_VALUE_SPILL_TAG);
        // JScript will inject Count
        maxSpillMap.put(HtmlAttribute.SPILLS, String.valueOf(spillCycleMax));
    }
    tb.appendBytes(Math.round(memSum / size), avgSpillMap);
    tb.appendBytes(peakMem.getLeft().getPeakLocalMemoryAllocated(), maxSpillMap);
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) StreamProfile(org.apache.drill.exec.proto.UserBitShared.StreamProfile) MetricValue(org.apache.drill.exec.proto.UserBitShared.MetricValue) ImmutablePair(org.apache.commons.lang3.tuple.ImmutablePair) OperatorProfile(org.apache.drill.exec.proto.UserBitShared.OperatorProfile)

Aggregations

OperatorProfile (org.apache.drill.exec.proto.UserBitShared.OperatorProfile)19 ArrayList (java.util.ArrayList)8 MinorFragmentProfile (org.apache.drill.exec.proto.UserBitShared.MinorFragmentProfile)8 HashMap (java.util.HashMap)7 StreamProfile (org.apache.drill.exec.proto.UserBitShared.StreamProfile)7 ImmutablePair (org.apache.commons.lang3.tuple.ImmutablePair)6 MetricValue (org.apache.drill.exec.proto.UserBitShared.MetricValue)3 InputStream (java.io.InputStream)2 TreeSet (java.util.TreeSet)2 OpProfileDef (org.apache.drill.exec.ops.OpProfileDef)2 OperatorStats (org.apache.drill.exec.ops.OperatorStats)2 MajorFragmentProfile (org.apache.drill.exec.proto.UserBitShared.MajorFragmentProfile)2 Builder (org.apache.drill.exec.proto.UserBitShared.OperatorProfile.Builder)2 Configuration (org.apache.hadoop.conf.Configuration)2 Path (org.apache.hadoop.fs.Path)2 Test (org.junit.Test)2 CoreOperatorType (org.apache.drill.exec.server.rest.profile.CoreOperatorType)1 BaseTest (org.apache.drill.test.BaseTest)1