use of org.apache.drill.exec.ops.FragmentContext in project drill by apache.
the class TestBitBitKerberos method setupFragmentContextAndManager.
private static void setupFragmentContextAndManager() {
final FragmentContext fcontext = new MockUp<FragmentContext>() {
@SuppressWarnings("unused")
BufferAllocator getAllocator() {
return c1.getAllocator();
}
}.getMockInstance();
manager = new MockUp<FragmentManager>() {
int v = 0;
@Mock
boolean handle(IncomingDataBatch batch) throws FragmentSetupException, IOException {
try {
v++;
if (v % 10 == 0) {
System.out.println("sleeping.");
Thread.sleep(3000);
}
} catch (InterruptedException e) {
}
RawFragmentBatch rfb = batch.newRawFragmentBatch(c1.getAllocator());
rfb.sendOk();
rfb.release();
return true;
}
@SuppressWarnings("unused")
public FragmentContext getFragmentContext() {
return fcontext;
}
}.getMockInstance();
}
use of org.apache.drill.exec.ops.FragmentContext in project drill by apache.
the class TestImplicitCastFunctions method runTest.
public void runTest(@Injectable final DrillbitContext bitContext, @Injectable UserClientConnection connection, Object[] expectedResults, String planPath) throws Throwable {
mockDrillbitContext(bitContext);
final String planString = Resources.toString(Resources.getResource(planPath), Charsets.UTF_8);
if (reader == null) {
reader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(c);
}
if (registry == null) {
registry = new FunctionImplementationRegistry(c);
}
if (context == null) {
context = new FragmentContext(bitContext, PlanFragment.getDefaultInstance(), connection, registry);
}
final PhysicalPlan plan = reader.readPhysicalPlan(planString);
final SimpleRootExec exec = new SimpleRootExec(ImplCreator.getExec(context, (FragmentRoot) plan.getSortedOperators(false).iterator().next()));
// skip schema batch
exec.next();
while (exec.next()) {
final Object[] res = getRunResult(exec);
assertEquals("return count does not match", res.length, expectedResults.length);
for (int i = 0; i < res.length; i++) {
assertEquals(String.format("column %s does not match", i), res[i], expectedResults[i]);
}
}
if (context.getFailureCause() != null) {
throw context.getFailureCause();
}
assertTrue(!context.isFailed());
}
use of org.apache.drill.exec.ops.FragmentContext in project drill by apache.
the class ControlMessageHandler method startNewRemoteFragment.
private void startNewRemoteFragment(final PlanFragment fragment) throws UserRpcException {
logger.debug("Received remote fragment start instruction", fragment);
final DrillbitContext drillbitContext = bee.getContext();
try {
// we either need to start the fragment if it is a leaf fragment, or set up a fragment manager if it is non leaf.
if (fragment.getLeafFragment()) {
final FragmentContext context = new FragmentContext(drillbitContext, fragment, drillbitContext.getFunctionImplementationRegistry());
final ControlTunnel tunnel = drillbitContext.getController().getTunnel(fragment.getForeman());
final FragmentStatusReporter statusReporter = new FragmentStatusReporter(context, tunnel);
final FragmentExecutor fr = new FragmentExecutor(context, fragment, statusReporter);
bee.addFragmentRunner(fr);
} else {
// isIntermediate, store for incoming data.
final NonRootFragmentManager manager = new NonRootFragmentManager(fragment, drillbitContext);
drillbitContext.getWorkBus().addFragmentManager(manager);
}
} catch (final Exception e) {
throw new UserRpcException(drillbitContext.getEndpoint(), "Failure while trying to start remote fragment", e);
} catch (final OutOfMemoryError t) {
if (t.getMessage().startsWith("Direct buffer")) {
throw new UserRpcException(drillbitContext.getEndpoint(), "Out of direct memory while trying to start remote fragment", t);
} else {
throw t;
}
}
}
use of org.apache.drill.exec.ops.FragmentContext in project drill by axbaretto.
the class TestExternalSortInternals method testConfigConstraints.
@Test
public void testConfigConstraints() {
int memConstraint = 40 * ONE_MEG;
int batchSizeConstraint = ONE_MEG / 2;
int mergeSizeConstraint = ONE_MEG;
OperatorFixture.Builder builder = new OperatorFixture.Builder();
builder.configBuilder().put(ExecConstants.EXTERNAL_SORT_MAX_MEMORY, memConstraint).put(ExecConstants.EXTERNAL_SORT_SPILL_BATCH_SIZE, batchSizeConstraint).build();
FragmentContext fragmentContext = builder.build().getFragmentContext();
fragmentContext.getOptions().setLocalOption(ExecConstants.OUTPUT_BATCH_SIZE, mergeSizeConstraint);
SortConfig sortConfig = new SortConfig(fragmentContext.getConfig(), fragmentContext.getOptions());
long memoryLimit = 50 * ONE_MEG;
SortMemoryManager memManager = new SortMemoryManager(sortConfig, memoryLimit);
assertEquals(batchSizeConstraint, memManager.getPreferredSpillBatchSize());
assertEquals(mergeSizeConstraint, memManager.getPreferredMergeBatchSize());
assertEquals(memConstraint, memManager.getMemoryLimit());
int rowWidth = 300;
int rowCount = 10000;
int batchSize = rowWidth * rowCount * 2;
memManager.updateEstimates(batchSize, rowWidth, rowCount);
verifyCalcs(sortConfig, memConstraint, memManager, batchSize, rowWidth, rowCount);
}
use of org.apache.drill.exec.ops.FragmentContext in project drill by axbaretto.
the class TestExternalSortInternals method testMergeCalcs.
@Test
public void testMergeCalcs() {
// No artificial merge limit
int mergeLimitConstraint = 100;
OperatorFixture.Builder builder = new OperatorFixture.Builder();
builder.configBuilder().put(ExecConstants.EXTERNAL_SORT_MERGE_LIMIT, mergeLimitConstraint).build();
FragmentContext fragmentContext = builder.build().getFragmentContext();
SortConfig sortConfig = new SortConfig(fragmentContext.getConfig(), fragmentContext.getOptions());
// Allow four spill batches, 8 MB each, plus one output of 16
// Allow for internal fragmentation
// 96 > (4 * 8 + 16) * 2
long memoryLimit = 96 * ONE_MEG;
SortMemoryManager memManager = new SortMemoryManager(sortConfig, memoryLimit);
// Prime the estimates. Batch size is data size, not buffer size.
int rowWidth = 300;
int rowCount = 10000;
int batchSize = rowWidth * rowCount * 2;
memManager.updateEstimates(batchSize, rowWidth, rowCount);
assertFalse(memManager.isLowMemory());
int spillBatchBufferSize = memManager.getSpillBatchSize().maxBufferSize;
int inputBatchBufferSize = memManager.getInputBatchSize().expectedBufferSize;
// One in-mem batch, no merging.
long allocMemory = inputBatchBufferSize;
MergeTask task = memManager.consolidateBatches(allocMemory, 1, 0);
assertEquals(MergeAction.NONE, task.action);
// Many in-mem batches, just enough to merge
int memBatches = (int) (memManager.getMergeMemoryLimit() / inputBatchBufferSize);
allocMemory = memBatches * inputBatchBufferSize;
task = memManager.consolidateBatches(allocMemory, memBatches, 0);
assertEquals(MergeAction.NONE, task.action);
// Spills if no room to merge spilled and in-memory batches
int spillCount = (int) Math.ceil((memManager.getMergeMemoryLimit() - allocMemory) / (1.0 * spillBatchBufferSize));
assertTrue(spillCount >= 1);
task = memManager.consolidateBatches(allocMemory, memBatches, spillCount);
assertEquals(MergeAction.SPILL, task.action);
// One more in-mem batch: now needs to spill
memBatches++;
allocMemory = memBatches * inputBatchBufferSize;
task = memManager.consolidateBatches(allocMemory, memBatches, 0);
assertEquals(MergeAction.SPILL, task.action);
// No spill for various in-mem/spill run combinations
long freeMem = memManager.getMergeMemoryLimit() - spillBatchBufferSize;
memBatches = (int) (freeMem / inputBatchBufferSize);
allocMemory = memBatches * inputBatchBufferSize;
task = memManager.consolidateBatches(allocMemory, memBatches, 1);
assertEquals(MergeAction.NONE, task.action);
freeMem = memManager.getMergeMemoryLimit() - 2 * spillBatchBufferSize;
memBatches = (int) (freeMem / inputBatchBufferSize);
allocMemory = memBatches * inputBatchBufferSize;
task = memManager.consolidateBatches(allocMemory, memBatches, 2);
assertEquals(MergeAction.NONE, task.action);
// No spill if no in-memory, only spill, and spill fits
freeMem = memManager.getMergeMemoryLimit();
int spillBatches = (int) (freeMem / spillBatchBufferSize);
task = memManager.consolidateBatches(0, 0, spillBatches);
assertEquals(MergeAction.NONE, task.action);
// One more and must merge
task = memManager.consolidateBatches(0, 0, spillBatches + 1);
assertEquals(MergeAction.MERGE, task.action);
assertEquals(2, task.count);
// Two more and will merge more
task = memManager.consolidateBatches(0, 0, spillBatches + 2);
assertEquals(MergeAction.MERGE, task.action);
assertEquals(3, task.count);
// If only one spilled run, and no in-memory batches,
// skip merge.
task = memManager.consolidateBatches(0, 0, 1);
assertEquals(MergeAction.NONE, task.action);
// Very large number of spilled runs. Limit to what fits in memory.
task = memManager.consolidateBatches(0, 0, 1000);
assertEquals(MergeAction.MERGE, task.action);
assertTrue(task.count <= (int) (memoryLimit / spillBatchBufferSize) - 1);
}
Aggregations