use of org.apache.drill.exec.proto.UserBitShared.MetricValue in project drill by axbaretto.
the class OperatorWrapper method getMetricsTable.
public String getMetricsTable() {
if (operatorType == null) {
return "";
}
final String[] metricNames = OperatorMetricRegistry.getMetricNames(operatorType.getNumber());
if (metricNames == null) {
return "";
}
final String[] metricsTableColumnNames = new String[metricNames.length + 1];
metricsTableColumnNames[0] = "Minor Fragment";
int i = 1;
for (final String metricName : metricNames) {
metricsTableColumnNames[i++] = metricName;
}
final TableBuilder builder = new TableBuilder(metricsTableColumnNames, null);
for (final ImmutablePair<ImmutablePair<OperatorProfile, Integer>, String> ip : opsAndHosts) {
final OperatorProfile op = ip.getLeft().getLeft();
builder.appendCell(new OperatorPathBuilder().setMajor(major).setMinor(ip.getLeft().getRight()).setOperator(op).build());
final Number[] values = new Number[metricNames.length];
// Track new/Unknown Metrics
final Set<Integer> unknownMetrics = new TreeSet<Integer>();
for (final MetricValue metric : op.getMetricList()) {
if (metric.getMetricId() < metricNames.length) {
if (metric.hasLongValue()) {
values[metric.getMetricId()] = metric.getLongValue();
} else if (metric.hasDoubleValue()) {
values[metric.getMetricId()] = metric.getDoubleValue();
}
} else {
// Tracking unknown metric IDs
unknownMetrics.add(metric.getMetricId());
}
}
for (final Number value : values) {
if (value != null) {
builder.appendFormattedNumber(value);
} else {
builder.appendCell("");
}
}
}
return builder.build();
}
use of org.apache.drill.exec.proto.UserBitShared.MetricValue in project drill by apache.
the class TestPartitionSender method testThreadsHelper.
/**
* Core of the testing
* @param hashToRandomExchange
* @param drillbitContext
* @param options
* @param incoming
* @param registry
* @param planReader
* @param planningSet
* @param rootFragment
* @param expectedThreadsCount
* @throws Exception
*/
private void testThreadsHelper(HashToRandomExchange hashToRandomExchange, DrillbitContext drillbitContext, OptionList options, RecordBatch incoming, FunctionImplementationRegistry registry, PhysicalPlanReader planReader, PlanningSet planningSet, Fragment rootFragment, int expectedThreadsCount) throws Exception {
final QueryContextInformation queryContextInfo = Utilities.createQueryContextInfo("dummySchemaName", "938ea2d9-7cb9-4baf-9414-a5a0b7777e8e");
final QueryWorkUnit qwu = PARALLELIZER.generateWorkUnit(options, drillbitContext.getEndpoint(), QueryId.getDefaultInstance(), drillbitContext.getBits(), rootFragment, USER_SESSION, queryContextInfo);
qwu.applyPlan(planReader);
final List<MinorFragmentEndpoint> mfEndPoints = PhysicalOperatorUtil.getIndexOrderedEndpoints(Lists.newArrayList(drillbitContext.getBits()));
for (PlanFragment planFragment : qwu.getFragments()) {
if (!planFragment.getFragmentJson().contains("hash-partition-sender")) {
continue;
}
MockPartitionSenderRootExec partionSenderRootExec = null;
FragmentContextImpl context = null;
try {
context = new FragmentContextImpl(drillbitContext, planFragment, null, registry);
context.setExecutorState(new MockExecutorState());
final int majorFragmentId = planFragment.getHandle().getMajorFragmentId();
final HashPartitionSender partSender = new HashPartitionSender(majorFragmentId, hashToRandomExchange, hashToRandomExchange.getExpression(), mfEndPoints);
partionSenderRootExec = new MockPartitionSenderRootExec(context, incoming, partSender);
assertEquals("Number of threads calculated", expectedThreadsCount, partionSenderRootExec.getNumberPartitions());
partionSenderRootExec.createPartitioner();
final PartitionerDecorator partDecor = partionSenderRootExec.getPartitioner();
assertNotNull(partDecor);
List<Partitioner> partitioners = partDecor.getPartitioners();
assertNotNull(partitioners);
final int actualThreads = DRILLBITS_COUNT > expectedThreadsCount ? expectedThreadsCount : DRILLBITS_COUNT;
assertEquals("Number of partitioners", actualThreads, partitioners.size());
for (int i = 0; i < mfEndPoints.size(); i++) {
assertNotNull("PartitionOutgoingBatch", partDecor.getOutgoingBatches(i));
}
// check distribution of PartitionOutgoingBatch - should be even distribution
boolean isFirst = true;
int prevBatchCountSize = 0;
int batchCountSize = 0;
for (Partitioner part : partitioners) {
@SuppressWarnings("unchecked") final List<PartitionOutgoingBatch> outBatch = (List<PartitionOutgoingBatch>) part.getOutgoingBatches();
batchCountSize = outBatch.size();
if (!isFirst) {
assertTrue(Math.abs(batchCountSize - prevBatchCountSize) <= 1);
} else {
isFirst = false;
}
prevBatchCountSize = batchCountSize;
}
partionSenderRootExec.getStats().startProcessing();
try {
partDecor.partitionBatch(incoming);
} finally {
partionSenderRootExec.getStats().stopProcessing();
}
if (actualThreads == 1) {
assertEquals("With single thread parent and child waitNanos should match", partitioners.get(0).getStats().getWaitNanos(), partionSenderRootExec.getStats().getWaitNanos());
}
// testing values distribution
partitioners = partDecor.getPartitioners();
isFirst = true;
// since we have fake Nullvector distribution is skewed
for (Partitioner part : partitioners) {
@SuppressWarnings("unchecked") final List<PartitionOutgoingBatch> outBatches = (List<PartitionOutgoingBatch>) part.getOutgoingBatches();
for (PartitionOutgoingBatch partOutBatch : outBatches) {
final int recordCount = ((VectorAccessible) partOutBatch).getRecordCount();
if (isFirst) {
assertEquals("RecordCount", 100, recordCount);
isFirst = false;
} else {
assertEquals("RecordCount", 0, recordCount);
}
}
}
// test exceptions within threads
// test stats merging
partionSenderRootExec.getStats().startProcessing();
try {
partDecor.executeMethodLogic(new InjectExceptionTest());
fail("executeMethodLogic should throw an exception.");
} catch (ExecutionException e) {
final OperatorProfile.Builder oPBuilder = OperatorProfile.newBuilder();
partionSenderRootExec.getStats().addAllMetrics(oPBuilder);
final List<MetricValue> metrics = oPBuilder.getMetricList();
for (MetricValue metric : metrics) {
if (Metric.BYTES_SENT.metricId() == metric.getMetricId()) {
assertEquals("Should add metricValue irrespective of exception", 5 * actualThreads, metric.getLongValue());
}
if (Metric.SENDING_THREADS_COUNT.metricId() == metric.getMetricId()) {
assertEquals(actualThreads, metric.getLongValue());
}
}
assertTrue(e.getCause() instanceof IOException);
assertEquals(actualThreads - 1, e.getCause().getSuppressed().length);
} finally {
partionSenderRootExec.getStats().stopProcessing();
}
} finally {
// cleanup
partionSenderRootExec.close();
context.close();
}
}
}
use of org.apache.drill.exec.proto.UserBitShared.MetricValue in project drill by apache.
the class OperatorWrapper method getMetricsTable.
public String getMetricsTable() {
if (operatorType == null) {
return "";
}
final String[] metricNames = OperatorMetricRegistry.getMetricNames(operatorType);
if (metricNames == null) {
return "";
}
final String[] metricsTableColumnNames = new String[metricNames.length + 1];
metricsTableColumnNames[0] = "Minor Fragment";
int i = 1;
for (final String metricName : metricNames) {
metricsTableColumnNames[i++] = metricName;
}
final TableBuilder builder = new TableBuilder(metricsTableColumnNames, null);
for (final ImmutablePair<ImmutablePair<OperatorProfile, Integer>, String> ip : opsAndHosts) {
final OperatorProfile op = ip.getLeft().getLeft();
builder.appendCell(new OperatorPathBuilder().setMajor(major).setMinor(ip.getLeft().getRight()).setOperator(op).build());
final Number[] values = new Number[metricNames.length];
// Track new/Unknown Metrics
final Set<Integer> unknownMetrics = new TreeSet<>();
for (final MetricValue metric : op.getMetricList()) {
if (metric.getMetricId() < metricNames.length) {
if (metric.hasLongValue()) {
values[metric.getMetricId()] = metric.getLongValue();
} else if (metric.hasDoubleValue()) {
values[metric.getMetricId()] = metric.getDoubleValue();
}
} else {
// Tracking unknown metric IDs
unknownMetrics.add(metric.getMetricId());
}
}
for (final Number value : values) {
if (value != null) {
builder.appendFormattedNumber(value);
} else {
builder.appendCell("");
}
}
}
return builder.build();
}
use of org.apache.drill.exec.proto.UserBitShared.MetricValue in project drill by axbaretto.
the class TestPartitionSender method testThreadsHelper.
/**
* Core of the testing
* @param hashToRandomExchange
* @param drillbitContext
* @param options
* @param incoming
* @param registry
* @param planReader
* @param planningSet
* @param rootFragment
* @param expectedThreadsCount
* @throws Exception
*/
private void testThreadsHelper(HashToRandomExchange hashToRandomExchange, DrillbitContext drillbitContext, OptionList options, RecordBatch incoming, FunctionImplementationRegistry registry, PhysicalPlanReader planReader, PlanningSet planningSet, Fragment rootFragment, int expectedThreadsCount) throws Exception {
final QueryContextInformation queryContextInfo = Utilities.createQueryContextInfo("dummySchemaName", "938ea2d9-7cb9-4baf-9414-a5a0b7777e8e");
final QueryWorkUnit qwu = PARALLELIZER.getFragments(options, drillbitContext.getEndpoint(), QueryId.getDefaultInstance(), drillbitContext.getBits(), rootFragment, USER_SESSION, queryContextInfo);
qwu.applyPlan(planReader);
final List<MinorFragmentEndpoint> mfEndPoints = PhysicalOperatorUtil.getIndexOrderedEndpoints(Lists.newArrayList(drillbitContext.getBits()));
for (PlanFragment planFragment : qwu.getFragments()) {
if (!planFragment.getFragmentJson().contains("hash-partition-sender")) {
continue;
}
MockPartitionSenderRootExec partionSenderRootExec = null;
FragmentContextImpl context = null;
try {
context = new FragmentContextImpl(drillbitContext, planFragment, null, registry);
final int majorFragmentId = planFragment.getHandle().getMajorFragmentId();
final HashPartitionSender partSender = new HashPartitionSender(majorFragmentId, hashToRandomExchange, hashToRandomExchange.getExpression(), mfEndPoints);
partionSenderRootExec = new MockPartitionSenderRootExec(context, incoming, partSender);
assertEquals("Number of threads calculated", expectedThreadsCount, partionSenderRootExec.getNumberPartitions());
partionSenderRootExec.createPartitioner();
final PartitionerDecorator partDecor = partionSenderRootExec.getPartitioner();
assertNotNull(partDecor);
List<Partitioner> partitioners = partDecor.getPartitioners();
assertNotNull(partitioners);
final int actualThreads = DRILLBITS_COUNT > expectedThreadsCount ? expectedThreadsCount : DRILLBITS_COUNT;
assertEquals("Number of partitioners", actualThreads, partitioners.size());
for (int i = 0; i < mfEndPoints.size(); i++) {
assertNotNull("PartitionOutgoingBatch", partDecor.getOutgoingBatches(i));
}
// check distribution of PartitionOutgoingBatch - should be even distribution
boolean isFirst = true;
int prevBatchCountSize = 0;
int batchCountSize = 0;
for (Partitioner part : partitioners) {
final List<PartitionOutgoingBatch> outBatch = (List<PartitionOutgoingBatch>) part.getOutgoingBatches();
batchCountSize = outBatch.size();
if (!isFirst) {
assertTrue(Math.abs(batchCountSize - prevBatchCountSize) <= 1);
} else {
isFirst = false;
}
prevBatchCountSize = batchCountSize;
}
partionSenderRootExec.getStats().startProcessing();
try {
partDecor.partitionBatch(incoming);
} finally {
partionSenderRootExec.getStats().stopProcessing();
}
if (actualThreads == 1) {
assertEquals("With single thread parent and child waitNanos should match", partitioners.get(0).getStats().getWaitNanos(), partionSenderRootExec.getStats().getWaitNanos());
}
// testing values distribution
partitioners = partDecor.getPartitioners();
isFirst = true;
// since we have fake Nullvector distribution is skewed
for (Partitioner part : partitioners) {
final List<PartitionOutgoingBatch> outBatches = (List<PartitionOutgoingBatch>) part.getOutgoingBatches();
for (PartitionOutgoingBatch partOutBatch : outBatches) {
final int recordCount = ((VectorAccessible) partOutBatch).getRecordCount();
if (isFirst) {
assertEquals("RecordCount", 100, recordCount);
isFirst = false;
} else {
assertEquals("RecordCount", 0, recordCount);
}
}
}
// test exceptions within threads
// test stats merging
partionSenderRootExec.getStats().startProcessing();
try {
partDecor.executeMethodLogic(new InjectExceptionTest());
fail("Should throw IOException here");
} catch (IOException ioe) {
final OperatorProfile.Builder oPBuilder = OperatorProfile.newBuilder();
partionSenderRootExec.getStats().addAllMetrics(oPBuilder);
final List<MetricValue> metrics = oPBuilder.getMetricList();
for (MetricValue metric : metrics) {
if (Metric.BYTES_SENT.metricId() == metric.getMetricId()) {
assertEquals("Should add metricValue irrespective of exception", 5 * actualThreads, metric.getLongValue());
}
if (Metric.SENDING_THREADS_COUNT.metricId() == metric.getMetricId()) {
assertEquals(actualThreads, metric.getLongValue());
}
}
assertEquals(actualThreads - 1, ioe.getSuppressed().length);
} finally {
partionSenderRootExec.getStats().stopProcessing();
}
} finally {
// cleanup
partionSenderRootExec.close();
context.close();
}
}
}
use of org.apache.drill.exec.proto.UserBitShared.MetricValue in project drill by apache.
the class OperatorWrapper method addSummary.
public void addSummary(TableBuilder tb, Map<String, Long> majorFragmentBusyTally, long majorFragmentBusyTallyTotal) {
// Select background color from palette
String opTblBgColor = OPERATOR_OVERVIEW_BGCOLOR_PALETTE[major % OPERATOR_OVERVIEW_BGCOLOR_PALETTE.length];
String path = new OperatorPathBuilder().setMajor(major).setOperator(firstProfile).build();
tb.appendCell(path, opTblBgColor, null);
tb.appendCell(operatorName);
// Check if spill information is available
int spillCycleMetricIndex = getSpillCycleMetricIndex(operatorType);
boolean isSpillableOp = (spillCycleMetricIndex != NO_SPILL_METRIC_INDEX);
boolean hasSpilledToDisk = false;
boolean isScanOp = operatorName.endsWith("SCAN");
// Get MajorFragment Busy+Wait Time Tally
long majorBusyNanos = majorFragmentBusyTally.get(new OperatorPathBuilder().setMajor(major).build());
double setupSum = 0.0;
double processSum = 0.0;
double waitSum = 0.0;
double memSum = 0.0;
double spillCycleSum = 0.0;
long spillCycleMax = 0L;
long recordSum = 0L;
// Construct list for sorting purposes (using legacy Comparators)
final List<ImmutablePair<OperatorProfile, Integer>> opList = new ArrayList<>();
for (ImmutablePair<ImmutablePair<OperatorProfile, Integer>, String> ip : opsAndHosts) {
OperatorProfile profile = ip.getLeft().getLeft();
setupSum += profile.getSetupNanos();
processSum += profile.getProcessNanos();
waitSum += profile.getWaitNanos();
memSum += profile.getPeakLocalMemoryAllocated();
for (final StreamProfile sp : profile.getInputProfileList()) {
recordSum += sp.getRecords();
}
opList.add(ip.getLeft());
// Check to ensure index < #metrics (old profiles have less metrics); else reset isSpillableOp
if (isSpillableOp) {
// Forced to iterate list
for (MetricValue metricVal : profile.getMetricList()) {
if (metricVal.getMetricId() == spillCycleMetricIndex) {
long spillCycles = metricVal.getLongValue();
spillCycleMax = Math.max(spillCycles, spillCycleMax);
spillCycleSum += spillCycles;
hasSpilledToDisk = (spillCycleSum > 0.0);
}
}
}
}
final ImmutablePair<OperatorProfile, Integer> longSetup = Collections.max(opList, Comparators.setupTime);
tb.appendNanos(Math.round(setupSum / size));
tb.appendNanos(longSetup.getLeft().getSetupNanos());
Map<String, String> timeSkewMap = null;
final ImmutablePair<OperatorProfile, Integer> longProcess = Collections.max(opList, Comparators.processTime);
// Calculating average processing time
long avgProcTime = Math.round(processSum / size);
tb.appendNanos(avgProcTime);
long maxProcTime = longProcess.getLeft().getProcessNanos();
// Calculating skew of longest processing fragment w.r.t. average
double maxSkew = (avgProcTime > 0) ? maxProcTime / (double) avgProcTime : 0.0d;
// Marking skew if both thresholds are crossed
if (avgProcTime > TimeUnit.SECONDS.toNanos(timeSkewMin) && maxSkew > timeSkewRatio) {
timeSkewMap = new HashMap<>();
timeSkewMap.put(HtmlAttribute.CLASS, HtmlAttribute.CLASS_VALUE_TIME_SKEW_TAG);
timeSkewMap.put(HtmlAttribute.TITLE, "One fragment took " + DECIMAL_FORMATTER.format(maxSkew) + " longer than average");
timeSkewMap.put(HtmlAttribute.STYLE, HtmlAttribute.STYLE_VALUE_CURSOR_HELP);
}
tb.appendNanos(maxProcTime, timeSkewMap);
final ImmutablePair<OperatorProfile, Integer> shortWait = Collections.min(opList, Comparators.waitTime);
final ImmutablePair<OperatorProfile, Integer> longWait = Collections.max(opList, Comparators.waitTime);
tb.appendNanos(shortWait.getLeft().getWaitNanos());
// Calculating average wait time for fragment
long avgWaitTime = Math.round(waitSum / size);
// Slow Scan Warning
Map<String, String> slowScanMap = null;
// Marking slow scan if threshold is crossed and wait was longer than processing
if (isScanOp && (avgWaitTime > TimeUnit.SECONDS.toNanos(scanWaitMin)) && (avgWaitTime > avgProcTime)) {
slowScanMap = new HashMap<>();
slowScanMap.put(HtmlAttribute.CLASS, HtmlAttribute.CLASS_VALUE_SCAN_WAIT_TAG);
slowScanMap.put(HtmlAttribute.TITLE, "Avg Wait Time > Avg Processing Time");
slowScanMap.put(HtmlAttribute.STYLE, HtmlAttribute.STYLE_VALUE_CURSOR_HELP);
}
tb.appendNanos(avgWaitTime, slowScanMap);
long maxWaitTime = longWait.getLeft().getWaitNanos();
// Skewed Wait Warning
// Resetting
timeSkewMap = null;
// Calculating skew of longest waiting fragment w.r.t. average
maxSkew = (avgWaitTime > 0) ? maxWaitTime / (double) avgWaitTime : 0.0d;
// Marking skew if both thresholds are crossed
if (avgWaitTime > TimeUnit.SECONDS.toNanos(timeSkewMin) && maxSkew > waitSkewRatio) {
timeSkewMap = new HashMap<>();
timeSkewMap.put(HtmlAttribute.CLASS, HtmlAttribute.CLASS_VALUE_TIME_SKEW_TAG);
timeSkewMap.put(HtmlAttribute.TITLE, "One fragment waited " + DECIMAL_FORMATTER.format(maxSkew) + " longer than average");
timeSkewMap.put(HtmlAttribute.STYLE, HtmlAttribute.STYLE_VALUE_CURSOR_HELP);
}
tb.appendNanos(maxWaitTime, timeSkewMap);
tb.appendPercent(processSum / majorBusyNanos);
tb.appendPercent(processSum / majorFragmentBusyTallyTotal);
Map<String, String> estRowcountMap = new HashMap<>();
estRowcountMap.put(HtmlAttribute.CLASS, HtmlAttribute.CLASS_VALUE_EST_ROWS_ANCHOR);
estRowcountMap.put(HtmlAttribute.KEY, path.replaceAll("-xx-", "-"));
tb.appendFormattedInteger(recordSum, estRowcountMap);
final ImmutablePair<OperatorProfile, Integer> peakMem = Collections.max(opList, Comparators.operatorPeakMemory);
// Inject spill-to-disk attributes
Map<String, String> avgSpillMap = null;
Map<String, String> maxSpillMap = null;
if (hasSpilledToDisk) {
avgSpillMap = new HashMap<>();
// Average SpillCycle
double avgSpillCycle = spillCycleSum / size;
avgSpillMap.put(HtmlAttribute.TITLE, DECIMAL_FORMATTER.format(avgSpillCycle) + " spills on average");
avgSpillMap.put(HtmlAttribute.STYLE, HtmlAttribute.STYLE_VALUE_CURSOR_HELP);
// JScript will inject Icon
avgSpillMap.put(HtmlAttribute.CLASS, HtmlAttribute.CLASS_VALUE_SPILL_TAG);
// JScript will inject Count
avgSpillMap.put(HtmlAttribute.SPILLS, DECIMAL_FORMATTER.format(avgSpillCycle));
maxSpillMap = new HashMap<>();
maxSpillMap.put(HtmlAttribute.TITLE, "Most # spills: " + spillCycleMax);
maxSpillMap.put(HtmlAttribute.STYLE, HtmlAttribute.STYLE_VALUE_CURSOR_HELP);
// JScript will inject Icon
maxSpillMap.put(HtmlAttribute.CLASS, HtmlAttribute.CLASS_VALUE_SPILL_TAG);
// JScript will inject Count
maxSpillMap.put(HtmlAttribute.SPILLS, String.valueOf(spillCycleMax));
}
tb.appendBytes(Math.round(memSum / size), avgSpillMap);
tb.appendBytes(peakMem.getLeft().getPeakLocalMemoryAllocated(), maxSpillMap);
}
Aggregations