use of org.apache.drill.exec.proto.BitControl.QueryContextInformation in project drill by axbaretto.
the class ExpressionInterpreterTest method interpreterDateTest.
@Test
public void interpreterDateTest() throws Exception {
final String[] colNames = { "col1" };
final TypeProtos.MajorType[] colTypes = { Types.optional(TypeProtos.MinorType.INT) };
final String expressionStr = "now()";
final BitControl.PlanFragment planFragment = BitControl.PlanFragment.getDefaultInstance();
final QueryContextInformation queryContextInfo = planFragment.getContext();
final int timeZoneIndex = queryContextInfo.getTimeZone();
final DateTimeZone timeZone = DateTimeZone.forID(org.apache.drill.exec.expr.fn.impl.DateUtility.getTimeZone(timeZoneIndex));
final org.joda.time.DateTime now = new org.joda.time.DateTime(queryContextInfo.getQueryStartTime(), timeZone);
final long queryStartDate = now.getMillis();
final TimeStampHolder out = new TimeStampHolder();
out.value = queryStartDate;
final ByteBuffer buffer = ByteBuffer.allocate(12);
buffer.putLong(out.value);
final long l = buffer.getLong(0);
final DateTime t = new DateTime(l);
final String[] expectedFirstTwoValues = { t.toString(), t.toString() };
doTest(expressionStr, colNames, colTypes, expectedFirstTwoValues, planFragment);
}
use of org.apache.drill.exec.proto.BitControl.QueryContextInformation in project drill by axbaretto.
the class TestFragmentChecker method print.
private void print(String fragmentFile, int bitCount, int expectedFragmentCount) throws Exception {
System.out.println(String.format("=================Building plan fragments for [%s]. Allowing %d total Drillbits.==================", fragmentFile, bitCount));
PhysicalPlanReader ppr = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(CONFIG);
Fragment fragmentRoot = getRootFragment(ppr, fragmentFile);
SimpleParallelizer par = new SimpleParallelizer(1000 * 1000, 5, 10, 1.2);
List<DrillbitEndpoint> endpoints = Lists.newArrayList();
DrillbitEndpoint localBit = null;
for (int i = 0; i < bitCount; i++) {
DrillbitEndpoint b1 = DrillbitEndpoint.newBuilder().setAddress("localhost").setControlPort(1234 + i).build();
if (i == 0) {
localBit = b1;
}
endpoints.add(b1);
}
final QueryContextInformation queryContextInfo = Utilities.createQueryContextInfo("dummySchemaName", "938ea2d9-7cb9-4baf-9414-a5a0b7777e8e");
QueryWorkUnit qwu = par.getFragments(new OptionList(), localBit, QueryId.getDefaultInstance(), endpoints, fragmentRoot, UserSession.Builder.newBuilder().withCredentials(UserBitShared.UserCredentials.newBuilder().setUserName("foo").build()).build(), queryContextInfo);
qwu.applyPlan(ppr);
System.out.println(String.format("=========ROOT FRAGMENT [%d:%d] =========", qwu.getRootFragment().getHandle().getMajorFragmentId(), qwu.getRootFragment().getHandle().getMinorFragmentId()));
System.out.print(qwu.getRootFragment().getFragmentJson());
for (PlanFragment f : qwu.getFragments()) {
System.out.println(String.format("=========Fragment [%d:%d]=====", f.getHandle().getMajorFragmentId(), f.getHandle().getMinorFragmentId()));
System.out.print(f.getFragmentJson());
}
assertEquals(expectedFragmentCount, qwu.getFragments().size() + 1);
}
use of org.apache.drill.exec.proto.BitControl.QueryContextInformation in project drill by apache.
the class TestPartitionSender method testThreadsHelper.
/**
* Core of the testing
* @param hashToRandomExchange
* @param drillbitContext
* @param options
* @param incoming
* @param registry
* @param planReader
* @param planningSet
* @param rootFragment
* @param expectedThreadsCount
* @throws Exception
*/
private void testThreadsHelper(HashToRandomExchange hashToRandomExchange, DrillbitContext drillbitContext, OptionList options, RecordBatch incoming, FunctionImplementationRegistry registry, PhysicalPlanReader planReader, PlanningSet planningSet, Fragment rootFragment, int expectedThreadsCount) throws Exception {
final QueryContextInformation queryContextInfo = Utilities.createQueryContextInfo("dummySchemaName", "938ea2d9-7cb9-4baf-9414-a5a0b7777e8e");
final QueryWorkUnit qwu = PARALLELIZER.generateWorkUnit(options, drillbitContext.getEndpoint(), QueryId.getDefaultInstance(), drillbitContext.getBits(), rootFragment, USER_SESSION, queryContextInfo);
qwu.applyPlan(planReader);
final List<MinorFragmentEndpoint> mfEndPoints = PhysicalOperatorUtil.getIndexOrderedEndpoints(Lists.newArrayList(drillbitContext.getBits()));
for (PlanFragment planFragment : qwu.getFragments()) {
if (!planFragment.getFragmentJson().contains("hash-partition-sender")) {
continue;
}
MockPartitionSenderRootExec partionSenderRootExec = null;
FragmentContextImpl context = null;
try {
context = new FragmentContextImpl(drillbitContext, planFragment, null, registry);
context.setExecutorState(new MockExecutorState());
final int majorFragmentId = planFragment.getHandle().getMajorFragmentId();
final HashPartitionSender partSender = new HashPartitionSender(majorFragmentId, hashToRandomExchange, hashToRandomExchange.getExpression(), mfEndPoints);
partionSenderRootExec = new MockPartitionSenderRootExec(context, incoming, partSender);
assertEquals("Number of threads calculated", expectedThreadsCount, partionSenderRootExec.getNumberPartitions());
partionSenderRootExec.createPartitioner();
final PartitionerDecorator partDecor = partionSenderRootExec.getPartitioner();
assertNotNull(partDecor);
List<Partitioner> partitioners = partDecor.getPartitioners();
assertNotNull(partitioners);
final int actualThreads = DRILLBITS_COUNT > expectedThreadsCount ? expectedThreadsCount : DRILLBITS_COUNT;
assertEquals("Number of partitioners", actualThreads, partitioners.size());
for (int i = 0; i < mfEndPoints.size(); i++) {
assertNotNull("PartitionOutgoingBatch", partDecor.getOutgoingBatches(i));
}
// check distribution of PartitionOutgoingBatch - should be even distribution
boolean isFirst = true;
int prevBatchCountSize = 0;
int batchCountSize = 0;
for (Partitioner part : partitioners) {
@SuppressWarnings("unchecked") final List<PartitionOutgoingBatch> outBatch = (List<PartitionOutgoingBatch>) part.getOutgoingBatches();
batchCountSize = outBatch.size();
if (!isFirst) {
assertTrue(Math.abs(batchCountSize - prevBatchCountSize) <= 1);
} else {
isFirst = false;
}
prevBatchCountSize = batchCountSize;
}
partionSenderRootExec.getStats().startProcessing();
try {
partDecor.partitionBatch(incoming);
} finally {
partionSenderRootExec.getStats().stopProcessing();
}
if (actualThreads == 1) {
assertEquals("With single thread parent and child waitNanos should match", partitioners.get(0).getStats().getWaitNanos(), partionSenderRootExec.getStats().getWaitNanos());
}
// testing values distribution
partitioners = partDecor.getPartitioners();
isFirst = true;
// since we have fake Nullvector distribution is skewed
for (Partitioner part : partitioners) {
@SuppressWarnings("unchecked") final List<PartitionOutgoingBatch> outBatches = (List<PartitionOutgoingBatch>) part.getOutgoingBatches();
for (PartitionOutgoingBatch partOutBatch : outBatches) {
final int recordCount = ((VectorAccessible) partOutBatch).getRecordCount();
if (isFirst) {
assertEquals("RecordCount", 100, recordCount);
isFirst = false;
} else {
assertEquals("RecordCount", 0, recordCount);
}
}
}
// test exceptions within threads
// test stats merging
partionSenderRootExec.getStats().startProcessing();
try {
partDecor.executeMethodLogic(new InjectExceptionTest());
fail("executeMethodLogic should throw an exception.");
} catch (ExecutionException e) {
final OperatorProfile.Builder oPBuilder = OperatorProfile.newBuilder();
partionSenderRootExec.getStats().addAllMetrics(oPBuilder);
final List<MetricValue> metrics = oPBuilder.getMetricList();
for (MetricValue metric : metrics) {
if (Metric.BYTES_SENT.metricId() == metric.getMetricId()) {
assertEquals("Should add metricValue irrespective of exception", 5 * actualThreads, metric.getLongValue());
}
if (Metric.SENDING_THREADS_COUNT.metricId() == metric.getMetricId()) {
assertEquals(actualThreads, metric.getLongValue());
}
}
assertTrue(e.getCause() instanceof IOException);
assertEquals(actualThreads - 1, e.getCause().getSuppressed().length);
} finally {
partionSenderRootExec.getStats().stopProcessing();
}
} finally {
// cleanup
partionSenderRootExec.close();
context.close();
}
}
}
use of org.apache.drill.exec.proto.BitControl.QueryContextInformation in project drill by axbaretto.
the class TestPartitionSender method testThreadsHelper.
/**
* Core of the testing
* @param hashToRandomExchange
* @param drillbitContext
* @param options
* @param incoming
* @param registry
* @param planReader
* @param planningSet
* @param rootFragment
* @param expectedThreadsCount
* @throws Exception
*/
private void testThreadsHelper(HashToRandomExchange hashToRandomExchange, DrillbitContext drillbitContext, OptionList options, RecordBatch incoming, FunctionImplementationRegistry registry, PhysicalPlanReader planReader, PlanningSet planningSet, Fragment rootFragment, int expectedThreadsCount) throws Exception {
final QueryContextInformation queryContextInfo = Utilities.createQueryContextInfo("dummySchemaName", "938ea2d9-7cb9-4baf-9414-a5a0b7777e8e");
final QueryWorkUnit qwu = PARALLELIZER.getFragments(options, drillbitContext.getEndpoint(), QueryId.getDefaultInstance(), drillbitContext.getBits(), rootFragment, USER_SESSION, queryContextInfo);
qwu.applyPlan(planReader);
final List<MinorFragmentEndpoint> mfEndPoints = PhysicalOperatorUtil.getIndexOrderedEndpoints(Lists.newArrayList(drillbitContext.getBits()));
for (PlanFragment planFragment : qwu.getFragments()) {
if (!planFragment.getFragmentJson().contains("hash-partition-sender")) {
continue;
}
MockPartitionSenderRootExec partionSenderRootExec = null;
FragmentContextImpl context = null;
try {
context = new FragmentContextImpl(drillbitContext, planFragment, null, registry);
final int majorFragmentId = planFragment.getHandle().getMajorFragmentId();
final HashPartitionSender partSender = new HashPartitionSender(majorFragmentId, hashToRandomExchange, hashToRandomExchange.getExpression(), mfEndPoints);
partionSenderRootExec = new MockPartitionSenderRootExec(context, incoming, partSender);
assertEquals("Number of threads calculated", expectedThreadsCount, partionSenderRootExec.getNumberPartitions());
partionSenderRootExec.createPartitioner();
final PartitionerDecorator partDecor = partionSenderRootExec.getPartitioner();
assertNotNull(partDecor);
List<Partitioner> partitioners = partDecor.getPartitioners();
assertNotNull(partitioners);
final int actualThreads = DRILLBITS_COUNT > expectedThreadsCount ? expectedThreadsCount : DRILLBITS_COUNT;
assertEquals("Number of partitioners", actualThreads, partitioners.size());
for (int i = 0; i < mfEndPoints.size(); i++) {
assertNotNull("PartitionOutgoingBatch", partDecor.getOutgoingBatches(i));
}
// check distribution of PartitionOutgoingBatch - should be even distribution
boolean isFirst = true;
int prevBatchCountSize = 0;
int batchCountSize = 0;
for (Partitioner part : partitioners) {
final List<PartitionOutgoingBatch> outBatch = (List<PartitionOutgoingBatch>) part.getOutgoingBatches();
batchCountSize = outBatch.size();
if (!isFirst) {
assertTrue(Math.abs(batchCountSize - prevBatchCountSize) <= 1);
} else {
isFirst = false;
}
prevBatchCountSize = batchCountSize;
}
partionSenderRootExec.getStats().startProcessing();
try {
partDecor.partitionBatch(incoming);
} finally {
partionSenderRootExec.getStats().stopProcessing();
}
if (actualThreads == 1) {
assertEquals("With single thread parent and child waitNanos should match", partitioners.get(0).getStats().getWaitNanos(), partionSenderRootExec.getStats().getWaitNanos());
}
// testing values distribution
partitioners = partDecor.getPartitioners();
isFirst = true;
// since we have fake Nullvector distribution is skewed
for (Partitioner part : partitioners) {
final List<PartitionOutgoingBatch> outBatches = (List<PartitionOutgoingBatch>) part.getOutgoingBatches();
for (PartitionOutgoingBatch partOutBatch : outBatches) {
final int recordCount = ((VectorAccessible) partOutBatch).getRecordCount();
if (isFirst) {
assertEquals("RecordCount", 100, recordCount);
isFirst = false;
} else {
assertEquals("RecordCount", 0, recordCount);
}
}
}
// test exceptions within threads
// test stats merging
partionSenderRootExec.getStats().startProcessing();
try {
partDecor.executeMethodLogic(new InjectExceptionTest());
fail("Should throw IOException here");
} catch (IOException ioe) {
final OperatorProfile.Builder oPBuilder = OperatorProfile.newBuilder();
partionSenderRootExec.getStats().addAllMetrics(oPBuilder);
final List<MetricValue> metrics = oPBuilder.getMetricList();
for (MetricValue metric : metrics) {
if (Metric.BYTES_SENT.metricId() == metric.getMetricId()) {
assertEquals("Should add metricValue irrespective of exception", 5 * actualThreads, metric.getLongValue());
}
if (Metric.SENDING_THREADS_COUNT.metricId() == metric.getMetricId()) {
assertEquals(actualThreads, metric.getLongValue());
}
}
assertEquals(actualThreads - 1, ioe.getSuppressed().length);
} finally {
partionSenderRootExec.getStats().stopProcessing();
}
} finally {
// cleanup
partionSenderRootExec.close();
context.close();
}
}
}
use of org.apache.drill.exec.proto.BitControl.QueryContextInformation in project drill by axbaretto.
the class TestLocalExchange method testHelperVerifyPartitionSenderParallelization.
// Verify the number of partition senders in a major fragments is not more than the cluster size and each endpoint
// in the cluster has at most one fragment from a given major fragment that has the partition sender.
private static void testHelperVerifyPartitionSenderParallelization(String plan, boolean isMuxOn, boolean isDeMuxOn) throws Exception {
final DrillbitContext drillbitContext = getDrillbitContext();
final PhysicalPlanReader planReader = drillbitContext.getPlanReader();
final Fragment rootFragment = PopUnitTestBase.getRootFragmentFromPlanString(planReader, plan);
final List<Integer> deMuxFragments = Lists.newLinkedList();
final List<Integer> htrFragments = Lists.newLinkedList();
final PlanningSet planningSet = new PlanningSet();
// Create a planningSet to get the assignment of major fragment ids to fragments.
PARALLELIZER.initFragmentWrappers(rootFragment, planningSet);
findFragmentsWithPartitionSender(rootFragment, planningSet, deMuxFragments, htrFragments);
final QueryContextInformation queryContextInfo = Utilities.createQueryContextInfo("dummySchemaName", "938ea2d9-7cb9-4baf-9414-a5a0b7777e8e");
QueryWorkUnit qwu = PARALLELIZER.getFragments(new OptionList(), drillbitContext.getEndpoint(), QueryId.getDefaultInstance(), drillbitContext.getBits(), rootFragment, USER_SESSION, queryContextInfo);
qwu.applyPlan(planReader);
// Make sure the number of minor fragments with HashPartitioner within a major fragment is not more than the
// number of Drillbits in cluster
ArrayListMultimap<Integer, DrillbitEndpoint> partitionSenderMap = ArrayListMultimap.create();
for (PlanFragment planFragment : qwu.getFragments()) {
if (planFragment.getFragmentJson().contains("hash-partition-sender")) {
int majorFragmentId = planFragment.getHandle().getMajorFragmentId();
DrillbitEndpoint assignedEndpoint = planFragment.getAssignment();
partitionSenderMap.get(majorFragmentId).add(assignedEndpoint);
}
}
if (isMuxOn) {
verifyAssignment(htrFragments, partitionSenderMap);
}
if (isDeMuxOn) {
verifyAssignment(deMuxFragments, partitionSenderMap);
}
}
Aggregations