use of org.apache.drill.exec.ops.OperatorStats in project drill by apache.
the class TestAllocators method testAllocators.
@Test
public void testAllocators() throws Exception {
// Setup a drillbit (initializes a root allocator)
final DrillConfig config = DrillConfig.create(TEST_CONFIGURATIONS);
try (final RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();
final Drillbit bit = new Drillbit(config, serviceSet)) {
;
bit.run();
final DrillbitContext bitContext = bit.getContext();
FunctionImplementationRegistry functionRegistry = bitContext.getFunctionImplementationRegistry();
StoragePluginRegistry storageRegistry = new StoragePluginRegistryImpl(bitContext);
// Create a few Fragment Contexts
BitControl.PlanFragment.Builder pfBuilder1 = BitControl.PlanFragment.newBuilder();
pfBuilder1.setMemInitial(1500000);
BitControl.PlanFragment pf1 = pfBuilder1.build();
BitControl.PlanFragment.Builder pfBuilder2 = BitControl.PlanFragment.newBuilder();
pfBuilder2.setMemInitial(500000);
BitControl.PlanFragment pf2 = pfBuilder1.build();
FragmentContext fragmentContext1 = new FragmentContext(bitContext, pf1, null, functionRegistry);
FragmentContext fragmentContext2 = new FragmentContext(bitContext, pf2, null, functionRegistry);
// Get a few physical operators. Easiest way is to read a physical plan.
PhysicalPlanReader planReader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(bitContext, storageRegistry);
PhysicalPlan plan = planReader.readPhysicalPlan(Files.toString(FileUtils.getResourceAsFile(planFile), Charsets.UTF_8));
List<PhysicalOperator> physicalOperators = plan.getSortedOperators();
Iterator<PhysicalOperator> physicalOperatorIterator = physicalOperators.iterator();
PhysicalOperator physicalOperator1 = physicalOperatorIterator.next();
PhysicalOperator physicalOperator2 = physicalOperatorIterator.next();
PhysicalOperator physicalOperator3 = physicalOperatorIterator.next();
PhysicalOperator physicalOperator4 = physicalOperatorIterator.next();
PhysicalOperator physicalOperator5 = physicalOperatorIterator.next();
PhysicalOperator physicalOperator6 = physicalOperatorIterator.next();
// Create some bogus Operator profile defs and stats to create operator contexts
OpProfileDef def;
OperatorStats stats;
// Use some bogus operator type to create a new operator context.
def = new OpProfileDef(physicalOperator1.getOperatorId(), UserBitShared.CoreOperatorType.MOCK_SUB_SCAN_VALUE, OperatorUtilities.getChildCount(physicalOperator1));
stats = fragmentContext1.getStats().newOperatorStats(def, fragmentContext1.getAllocator());
// Add a couple of Operator Contexts
// Initial allocation = 1000000 bytes for all operators
OperatorContext oContext11 = fragmentContext1.newOperatorContext(physicalOperator1);
DrillBuf b11 = oContext11.getAllocator().buffer(1000000);
OperatorContext oContext12 = fragmentContext1.newOperatorContext(physicalOperator2, stats);
DrillBuf b12 = oContext12.getAllocator().buffer(500000);
OperatorContext oContext21 = fragmentContext1.newOperatorContext(physicalOperator3);
def = new OpProfileDef(physicalOperator4.getOperatorId(), UserBitShared.CoreOperatorType.TEXT_WRITER_VALUE, OperatorUtilities.getChildCount(physicalOperator4));
stats = fragmentContext2.getStats().newOperatorStats(def, fragmentContext2.getAllocator());
OperatorContext oContext22 = fragmentContext2.newOperatorContext(physicalOperator4, stats);
DrillBuf b22 = oContext22.getAllocator().buffer(2000000);
// New Fragment begins
BitControl.PlanFragment.Builder pfBuilder3 = BitControl.PlanFragment.newBuilder();
pfBuilder3.setMemInitial(1000000);
BitControl.PlanFragment pf3 = pfBuilder3.build();
FragmentContext fragmentContext3 = new FragmentContext(bitContext, pf3, null, functionRegistry);
// New fragment starts an operator that allocates an amount within the limit
def = new OpProfileDef(physicalOperator5.getOperatorId(), UserBitShared.CoreOperatorType.UNION_VALUE, OperatorUtilities.getChildCount(physicalOperator5));
stats = fragmentContext3.getStats().newOperatorStats(def, fragmentContext3.getAllocator());
OperatorContext oContext31 = fragmentContext3.newOperatorContext(physicalOperator5, stats);
DrillBuf b31a = oContext31.getAllocator().buffer(200000);
// Previously running operator completes
b22.release();
((AutoCloseable) oContext22).close();
// Fragment 3 asks for more and fails
boolean outOfMem = false;
try {
oContext31.getAllocator().buffer(44000000);
fail("Fragment 3 should fail to allocate buffer");
} catch (OutOfMemoryException e) {
// Expected.
outOfMem = true;
}
assertTrue(outOfMem);
// Operator is Exempt from Fragment limits. Fragment 3 asks for more and succeeds
OperatorContext oContext32 = fragmentContext3.newOperatorContext(physicalOperator6);
try {
DrillBuf b32 = oContext32.getAllocator().buffer(4400000);
b32.release();
} catch (OutOfMemoryException e) {
fail("Fragment 3 failed to allocate buffer");
} finally {
closeOp(oContext32);
}
b11.release();
closeOp(oContext11);
b12.release();
closeOp(oContext12);
closeOp(oContext21);
b31a.release();
closeOp(oContext31);
fragmentContext1.close();
fragmentContext2.close();
fragmentContext3.close();
}
}
use of org.apache.drill.exec.ops.OperatorStats in project drill by apache.
the class TestRecordIterator method testSimpleIterator.
@Test
public void testSimpleIterator(@Injectable final DrillbitContext bitContext, @Injectable UserClientConnection connection) throws Throwable {
mockDrillbitContext(bitContext);
final PhysicalPlanReader reader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(c);
final String planStr = Files.toString(FileUtils.getResourceAsFile("/record/test_recorditerator.json"), Charsets.UTF_8);
final PhysicalPlan plan = reader.readPhysicalPlan(planStr);
final FunctionImplementationRegistry registry = new FunctionImplementationRegistry(c);
final FragmentContext context = new FragmentContext(bitContext, BitControl.PlanFragment.getDefaultInstance(), connection, registry);
final List<PhysicalOperator> operatorList = plan.getSortedOperators(false);
SimpleRootExec exec = new SimpleRootExec(ImplCreator.getExec(context, (FragmentRoot) operatorList.iterator().next()));
RecordBatch singleBatch = exec.getIncoming();
PhysicalOperator dummyPop = operatorList.iterator().next();
OpProfileDef def = new OpProfileDef(dummyPop.getOperatorId(), UserBitShared.CoreOperatorType.MOCK_SUB_SCAN_VALUE, OperatorUtilities.getChildCount(dummyPop));
OperatorStats stats = exec.getContext().getStats().newOperatorStats(def, exec.getContext().getAllocator());
RecordIterator iter = new RecordIterator(singleBatch, null, exec.getContext().newOperatorContext(dummyPop, stats), 0, false);
int totalRecords = 0;
List<ValueVector> vectors = null;
while (true) {
iter.next();
if (iter.finished()) {
break;
} else {
// First time save vectors.
if (vectors == null) {
vectors = Lists.newArrayList();
for (VectorWrapper vw : iter) {
vectors.add(vw.getValueVector());
}
}
final int position = iter.getCurrentPosition();
if (position % 2 == 0) {
assertTrue(checkValues(vectors, position));
} else {
assertTrue(checkValues(vectors, position));
}
totalRecords++;
}
assertEquals(0, iter.cachedBatches().size());
}
assertEquals(11112, totalRecords);
try {
iter.mark();
assertTrue(false);
} catch (UnsupportedOperationException e) {
}
try {
iter.reset();
assertTrue(false);
} catch (UnsupportedOperationException e) {
}
}
use of org.apache.drill.exec.ops.OperatorStats in project drill by apache.
the class PartitionerDecorator method executeMethodLogic.
/**
* Helper to execute the different methods wrapped into same logic
* @param iface
* @throws IOException
*/
protected void executeMethodLogic(final GeneralExecuteIface iface) throws IOException {
if (partitioners.size() == 1) {
// no need for threads
final OperatorStats localStatsSingle = partitioners.get(0).getStats();
localStatsSingle.clear();
localStatsSingle.startProcessing();
try {
iface.execute(partitioners.get(0));
} finally {
localStatsSingle.stopProcessing();
stats.mergeMetrics(localStatsSingle);
// since main stats did not have any wait time - adjust based of partitioner stats wait time
// main stats processing time started recording in BaseRootExec
stats.adjustWaitNanos(localStatsSingle.getWaitNanos());
}
return;
}
long maxProcessTime = 0l;
// start waiting on main stats to adjust by sum(max(processing)) at the end
stats.startWait();
final CountDownLatch latch = new CountDownLatch(partitioners.size());
final List<CustomRunnable> runnables = Lists.newArrayList();
final List<Future<?>> taskFutures = Lists.newArrayList();
CountDownLatchInjection testCountDownLatch = null;
try {
// To simulate interruption of main fragment thread and interrupting the partition threads, create a
// CountDownInject patch. Partitioner threads await on the latch and main fragment thread counts down or
// interrupts waiting threads. This makes sures that we are actually interrupting the blocked partitioner threads.
testCountDownLatch = injector.getLatch(context.getExecutionControls(), "partitioner-sender-latch");
testCountDownLatch.initialize(1);
for (final Partitioner part : partitioners) {
final CustomRunnable runnable = new CustomRunnable(childThreadPrefix, latch, iface, part, testCountDownLatch);
runnables.add(runnable);
taskFutures.add(executor.submit(runnable));
}
while (true) {
try {
// Wait for main fragment interruption.
injector.injectInterruptiblePause(context.getExecutionControls(), "wait-for-fragment-interrupt", logger);
// If there is no pause inserted at site "wait-for-fragment-interrupt", release the latch.
injector.getLatch(context.getExecutionControls(), "partitioner-sender-latch").countDown();
latch.await();
break;
} catch (final InterruptedException e) {
// If the fragment state says we shouldn't continue, cancel or interrupt partitioner threads
if (!context.shouldContinue()) {
logger.debug("Interrupting partioner threads. Fragment thread {}", tName);
for (Future<?> f : taskFutures) {
f.cancel(true);
}
break;
}
}
}
IOException excep = null;
for (final CustomRunnable runnable : runnables) {
IOException myException = runnable.getException();
if (myException != null) {
if (excep == null) {
excep = myException;
} else {
excep.addSuppressed(myException);
}
}
final OperatorStats localStats = runnable.getPart().getStats();
long currentProcessingNanos = localStats.getProcessingNanos();
// find out max Partitioner processing time
maxProcessTime = (currentProcessingNanos > maxProcessTime) ? currentProcessingNanos : maxProcessTime;
stats.mergeMetrics(localStats);
}
if (excep != null) {
throw excep;
}
} finally {
stats.stopWait();
// scale down main stats wait time based on calculated processing time
// since we did not wait for whole duration of above execution
stats.adjustWaitNanos(-maxProcessTime);
// Done with the latch, close it.
if (testCountDownLatch != null) {
testCountDownLatch.close();
}
}
}
use of org.apache.drill.exec.ops.OperatorStats in project drill by apache.
the class PartitionSenderRootExec method createPartitioner.
@VisibleForTesting
protected void createPartitioner() throws SchemaChangeException {
final int divisor = Math.max(1, outGoingBatchCount / actualPartitions);
final int longTail = outGoingBatchCount % actualPartitions;
final List<Partitioner> subPartitioners = createClassInstances(actualPartitions);
int startIndex = 0;
int endIndex = 0;
boolean success = false;
try {
for (int i = 0; i < actualPartitions; i++) {
startIndex = endIndex;
endIndex = (i < actualPartitions - 1) ? startIndex + divisor : outGoingBatchCount;
if (i < longTail) {
endIndex++;
}
final OperatorStats partitionStats = new OperatorStats(stats, true);
subPartitioners.get(i).setup(context, incoming, popConfig, partitionStats, oContext, startIndex, endIndex);
}
synchronized (this) {
partitioner = new PartitionerDecorator(subPartitioners, stats, context);
for (int index = 0; index < terminations.size(); index++) {
partitioner.getOutgoingBatches(terminations.buffer[index]).terminate();
}
terminations.clear();
}
success = true;
} finally {
if (!success) {
for (Partitioner p : subPartitioners) {
p.clear();
}
}
}
}
use of org.apache.drill.exec.ops.OperatorStats in project drill by apache.
the class HBaseRecordReader method next.
@Override
public int next() {
Stopwatch watch = Stopwatch.createStarted();
if (rowKeyVector != null) {
rowKeyVector.clear();
rowKeyVector.allocateNew();
}
for (ValueVector v : familyVectorMap.values()) {
v.clear();
v.allocateNew();
}
int rowCount = 0;
// if allocated memory for the first row is larger than allowed max in batch, it will be added anyway
do {
Result result = null;
final OperatorStats operatorStats = operatorContext == null ? null : operatorContext.getStats();
try {
if (operatorStats != null) {
operatorStats.startWait();
}
try {
result = resultScanner.next();
} finally {
if (operatorStats != null) {
operatorStats.stopWait();
}
}
} catch (IOException e) {
throw new DrillRuntimeException(e);
}
if (result == null) {
break;
}
// parse the result and populate the value vectors
Cell[] cells = result.rawCells();
if (rowKeyVector != null) {
rowKeyVector.getMutator().setSafe(rowCount, cells[0].getRowArray(), cells[0].getRowOffset(), cells[0].getRowLength());
}
if (!rowKeyOnly) {
for (final Cell cell : cells) {
final int familyOffset = cell.getFamilyOffset();
final int familyLength = cell.getFamilyLength();
final byte[] familyArray = cell.getFamilyArray();
final MapVector mv = getOrCreateFamilyVector(new String(familyArray, familyOffset, familyLength), true);
final int qualifierOffset = cell.getQualifierOffset();
final int qualifierLength = cell.getQualifierLength();
final byte[] qualifierArray = cell.getQualifierArray();
final NullableVarBinaryVector v = getOrCreateColumnVector(mv, new String(qualifierArray, qualifierOffset, qualifierLength));
final int valueOffset = cell.getValueOffset();
final int valueLength = cell.getValueLength();
final byte[] valueArray = cell.getValueArray();
v.getMutator().setSafe(rowCount, valueArray, valueOffset, valueLength);
}
}
rowCount++;
} while (canAddNewRow(rowCount));
setOutputRowCount(rowCount);
logger.debug("Took {} ms to get {} records", watch.elapsed(TimeUnit.MILLISECONDS), rowCount);
return rowCount;
}
Aggregations