Search in sources :

Example 46 with DrillConfig

use of org.apache.drill.common.config.DrillConfig in project drill by apache.

the class CreateTableHandler method getPlan.

@Override
public PhysicalPlan getPlan(SqlNode sqlNode) throws ValidationException, RelConversionException, IOException, ForemanSetupException {
    SqlCreateTable sqlCreateTable = unwrap(sqlNode, SqlCreateTable.class);
    String originalTableName = sqlCreateTable.getName();
    final ConvertedRelNode convertedRelNode = validateAndConvert(sqlCreateTable.getQuery());
    final RelDataType validatedRowType = convertedRelNode.getValidatedRowType();
    final RelNode queryRelNode = convertedRelNode.getConvertedNode();
    final RelNode newTblRelNode = SqlHandlerUtil.resolveNewTableRel(false, sqlCreateTable.getFieldNames(), validatedRowType, queryRelNode);
    final DrillConfig drillConfig = context.getConfig();
    final AbstractSchema drillSchema = resolveSchema(sqlCreateTable, config.getConverter().getDefaultSchema(), drillConfig);
    checkDuplicatedObjectExistence(drillSchema, originalTableName, drillConfig, context.getSession());
    final RelNode newTblRelNodeWithPCol = SqlHandlerUtil.qualifyPartitionCol(newTblRelNode, sqlCreateTable.getPartitionColumns());
    log("Calcite", newTblRelNodeWithPCol, logger, null);
    // Convert the query to Drill Logical plan and insert a writer operator on top.
    StorageStrategy storageStrategy = sqlCreateTable.isTemporary() ? StorageStrategy.TEMPORARY : new StorageStrategy(context.getOption(ExecConstants.PERSISTENT_TABLE_UMASK).string_val, false);
    // If we are creating temporary table, initial table name will be replaced with generated table name.
    // Generated table name is unique, UUID.randomUUID() is used for its generation.
    // Original table name is stored in temporary tables cache, so it can be substituted to generated one during querying.
    String newTableName = sqlCreateTable.isTemporary() ? context.getSession().registerTemporaryTable(drillSchema, originalTableName, drillConfig) : originalTableName;
    DrillRel drel = convertToDrel(newTblRelNodeWithPCol, drillSchema, newTableName, sqlCreateTable.getPartitionColumns(), newTblRelNode.getRowType(), storageStrategy);
    Prel prel = convertToPrel(drel, newTblRelNode.getRowType(), sqlCreateTable.getPartitionColumns());
    logAndSetTextPlan("Drill Physical", prel, logger);
    PhysicalOperator pop = convertToPop(prel);
    PhysicalPlan plan = convertToPlan(pop);
    log("Drill Plan", plan, logger);
    String message = String.format("Creating %s table [%s].", sqlCreateTable.isTemporary() ? "temporary" : "persistent", originalTableName);
    logger.info(message);
    return plan;
}
Also used : StorageStrategy(org.apache.drill.exec.store.StorageStrategy) PhysicalPlan(org.apache.drill.exec.physical.PhysicalPlan) RelNode(org.apache.calcite.rel.RelNode) DrillConfig(org.apache.drill.common.config.DrillConfig) AbstractSchema(org.apache.drill.exec.store.AbstractSchema) PhysicalOperator(org.apache.drill.exec.physical.base.PhysicalOperator) DrillRel(org.apache.drill.exec.planner.logical.DrillRel) RelDataType(org.apache.calcite.rel.type.RelDataType) SqlCreateTable(org.apache.drill.exec.planner.sql.parser.SqlCreateTable) WriterPrel(org.apache.drill.exec.planner.physical.WriterPrel) Prel(org.apache.drill.exec.planner.physical.Prel) ProjectAllowDupPrel(org.apache.drill.exec.planner.physical.ProjectAllowDupPrel) ProjectPrel(org.apache.drill.exec.planner.physical.ProjectPrel)

Example 47 with DrillConfig

use of org.apache.drill.common.config.DrillConfig in project drill by apache.

the class TestOpSerialization method testSerializedDeserialize.

@Test
public void testSerializedDeserialize() throws Throwable {
    DrillConfig c = DrillConfig.create();
    PhysicalPlanReader reader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(c);
    MockSubScanPOP s = new MockSubScanPOP("abc", false, null);
    s.setOperatorId(3);
    Filter f = new Filter(s, new ValueExpressions.BooleanExpression("true", ExpressionPosition.UNKNOWN), 0.1f);
    f.setOperatorId(2);
    UnionExchange e = new UnionExchange(f);
    e.setOperatorId(1);
    Screen screen = new Screen(e, CoordinationProtos.DrillbitEndpoint.getDefaultInstance());
    screen.setOperatorId(0);
    boolean reversed = false;
    while (true) {
        List<PhysicalOperator> pops = Lists.newArrayList();
        pops.add(s);
        pops.add(e);
        pops.add(f);
        pops.add(screen);
        if (reversed) {
            pops = Lists.reverse(pops);
        }
        PhysicalPlan plan1 = new PhysicalPlan(PlanProperties.builder().build(), pops);
        LogicalPlanPersistence logicalPlanPersistence = PhysicalPlanReaderTestFactory.defaultLogicalPlanPersistence(c);
        String json = plan1.unparse(logicalPlanPersistence.getMapper().writer());
        PhysicalPlan plan2 = reader.readPhysicalPlan(json);
        PhysicalOperator root = plan2.getSortedOperators(false).iterator().next();
        assertEquals(0, root.getOperatorId());
        PhysicalOperator o1 = root.iterator().next();
        assertEquals(1, o1.getOperatorId());
        PhysicalOperator o2 = o1.iterator().next();
        assertEquals(2, o2.getOperatorId());
        if (reversed) {
            break;
        }
        reversed = !reversed;
    }
}
Also used : PhysicalPlan(org.apache.drill.exec.physical.PhysicalPlan) UnionExchange(org.apache.drill.exec.physical.config.UnionExchange) PhysicalPlanReader(org.apache.drill.exec.planner.PhysicalPlanReader) Screen(org.apache.drill.exec.physical.config.Screen) ValueExpressions(org.apache.drill.common.expression.ValueExpressions) MockSubScanPOP(org.apache.drill.exec.store.mock.MockSubScanPOP) LogicalPlanPersistence(org.apache.drill.common.config.LogicalPlanPersistence) DrillConfig(org.apache.drill.common.config.DrillConfig) Filter(org.apache.drill.exec.physical.config.Filter) PhysicalOperator(org.apache.drill.exec.physical.base.PhysicalOperator) Test(org.junit.Test)

Example 48 with DrillConfig

use of org.apache.drill.common.config.DrillConfig in project drill by apache.

the class TestWriteToDisk method test.

@Test
@SuppressWarnings("static-method")
public void test() throws Exception {
    final List<ValueVector> vectorList = Lists.newArrayList();
    final DrillConfig config = DrillConfig.create();
    try (final RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();
        final Drillbit bit = new Drillbit(config, serviceSet)) {
        bit.run();
        final DrillbitContext context = bit.getContext();
        final MaterializedField intField = MaterializedField.create("int", Types.required(TypeProtos.MinorType.INT));
        final MaterializedField binField = MaterializedField.create("binary", Types.required(TypeProtos.MinorType.VARBINARY));
        try (final IntVector intVector = (IntVector) TypeHelper.getNewVector(intField, context.getAllocator());
            final VarBinaryVector binVector = (VarBinaryVector) TypeHelper.getNewVector(binField, context.getAllocator())) {
            AllocationHelper.allocate(intVector, 4, 4);
            AllocationHelper.allocate(binVector, 4, 5);
            vectorList.add(intVector);
            vectorList.add(binVector);
            intVector.getMutator().setSafe(0, 0);
            binVector.getMutator().setSafe(0, "ZERO".getBytes());
            intVector.getMutator().setSafe(1, 1);
            binVector.getMutator().setSafe(1, "ONE".getBytes());
            intVector.getMutator().setSafe(2, 2);
            binVector.getMutator().setSafe(2, "TWO".getBytes());
            intVector.getMutator().setSafe(3, 3);
            binVector.getMutator().setSafe(3, "THREE".getBytes());
            intVector.getMutator().setValueCount(4);
            binVector.getMutator().setValueCount(4);
            VectorContainer container = new VectorContainer();
            container.addCollection(vectorList);
            container.setRecordCount(4);
            WritableBatch batch = WritableBatch.getBatchNoHVWrap(container.getRecordCount(), container, false);
            VectorAccessibleSerializable wrap = new VectorAccessibleSerializable(batch, context.getAllocator());
            Configuration conf = new Configuration();
            conf.set(FileSystem.FS_DEFAULT_NAME_KEY, FileSystem.DEFAULT_FS);
            final VectorAccessibleSerializable newWrap = new VectorAccessibleSerializable(context.getAllocator());
            try (final FileSystem fs = FileSystem.get(conf)) {
                final File tempDir = Files.createTempDir();
                tempDir.deleteOnExit();
                final Path path = new Path(tempDir.getAbsolutePath(), "drillSerializable");
                try (final FSDataOutputStream out = fs.create(path)) {
                    wrap.writeToStream(out);
                    out.close();
                }
                try (final FSDataInputStream in = fs.open(path)) {
                    newWrap.readFromStream(in);
                }
            }
            final VectorAccessible newContainer = newWrap.get();
            for (VectorWrapper<?> w : newContainer) {
                try (ValueVector vv = w.getValueVector()) {
                    int values = vv.getAccessor().getValueCount();
                    for (int i = 0; i < values; i++) {
                        final Object o = vv.getAccessor().getObject(i);
                        if (o instanceof byte[]) {
                            System.out.println(new String((byte[]) o));
                        } else {
                            System.out.println(o);
                        }
                    }
                }
            }
        }
    }
}
Also used : DrillbitContext(org.apache.drill.exec.server.DrillbitContext) Path(org.apache.hadoop.fs.Path) SchemaPath(org.apache.drill.common.expression.SchemaPath) IntVector(org.apache.drill.exec.vector.IntVector) Configuration(org.apache.hadoop.conf.Configuration) VectorAccessible(org.apache.drill.exec.record.VectorAccessible) MaterializedField(org.apache.drill.exec.record.MaterializedField) VarBinaryVector(org.apache.drill.exec.vector.VarBinaryVector) VectorContainer(org.apache.drill.exec.record.VectorContainer) ValueVector(org.apache.drill.exec.vector.ValueVector) DrillConfig(org.apache.drill.common.config.DrillConfig) Drillbit(org.apache.drill.exec.server.Drillbit) RemoteServiceSet(org.apache.drill.exec.server.RemoteServiceSet) FileSystem(org.apache.hadoop.fs.FileSystem) WritableBatch(org.apache.drill.exec.record.WritableBatch) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) File(java.io.File) ExecTest(org.apache.drill.exec.ExecTest) Test(org.junit.Test)

Example 49 with DrillConfig

use of org.apache.drill.common.config.DrillConfig in project drill by apache.

the class TestOrderedPartitionExchange method twoBitTwoExchangeRun.

/**
   * Starts two drillbits and runs a physical plan with a Mock scan, project, OrderedParititionExchange, Union Exchange,
   * and sort. The final sort is done first on the partition column, and verifies that the partitions are correct, in that
   * all rows in partition 0 should come in the sort order before any row in partition 1, etc. Also verifies that the standard
   * deviation of the size of the partitions is less than one tenth the mean size of the partitions, because we expect all
   * the partitions to be roughly equal in size.
   * @throws Exception
   */
@Test
public void twoBitTwoExchangeRun() throws Exception {
    RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();
    try (Drillbit bit1 = new Drillbit(CONFIG, serviceSet);
        Drillbit bit2 = new Drillbit(CONFIG, serviceSet);
        DrillClient client = new DrillClient(CONFIG, serviceSet.getCoordinator())) {
        bit1.run();
        bit2.run();
        client.connect();
        List<QueryDataBatch> results = client.runQuery(org.apache.drill.exec.proto.UserBitShared.QueryType.PHYSICAL, Files.toString(FileUtils.getResourceAsFile("/sender/ordered_exchange.json"), Charsets.UTF_8));
        int count = 0;
        List<Integer> partitionRecordCounts = Lists.newArrayList();
        for (QueryDataBatch b : results) {
            if (b.getData() != null) {
                int rows = b.getHeader().getRowCount();
                count += rows;
                DrillConfig config = DrillConfig.create();
                RecordBatchLoader loader = new RecordBatchLoader(new BootStrapContext(config, ClassPathScanner.fromPrescan(config)).getAllocator());
                loader.load(b.getHeader().getDef(), b.getData());
                BigIntVector vv1 = (BigIntVector) loader.getValueAccessorById(BigIntVector.class, loader.getValueVectorId(new SchemaPath("col1", ExpressionPosition.UNKNOWN)).getFieldIds()).getValueVector();
                Float8Vector vv2 = (Float8Vector) loader.getValueAccessorById(Float8Vector.class, loader.getValueVectorId(new SchemaPath("col2", ExpressionPosition.UNKNOWN)).getFieldIds()).getValueVector();
                IntVector pVector = (IntVector) loader.getValueAccessorById(IntVector.class, loader.getValueVectorId(new SchemaPath("partition", ExpressionPosition.UNKNOWN)).getFieldIds()).getValueVector();
                long previous1 = Long.MIN_VALUE;
                double previous2 = Double.MIN_VALUE;
                int partPrevious = -1;
                long current1 = Long.MIN_VALUE;
                double current2 = Double.MIN_VALUE;
                int partCurrent = -1;
                int partitionRecordCount = 0;
                for (int i = 0; i < rows; i++) {
                    previous1 = current1;
                    previous2 = current2;
                    partPrevious = partCurrent;
                    current1 = vv1.getAccessor().get(i);
                    current2 = vv2.getAccessor().get(i);
                    partCurrent = pVector.getAccessor().get(i);
                    Assert.assertTrue(current1 >= previous1);
                    if (current1 == previous1) {
                        Assert.assertTrue(current2 <= previous2);
                    }
                    if (partCurrent == partPrevious || partPrevious == -1) {
                        partitionRecordCount++;
                    } else {
                        partitionRecordCounts.add(partitionRecordCount);
                        partitionRecordCount = 0;
                    }
                }
                partitionRecordCounts.add(partitionRecordCount);
                loader.clear();
            }
            b.release();
        }
        double[] values = new double[partitionRecordCounts.size()];
        int i = 0;
        for (Integer rc : partitionRecordCounts) {
            values[i++] = rc.doubleValue();
        }
        StandardDeviation stdDev = new StandardDeviation();
        Mean mean = new Mean();
        double std = stdDev.evaluate(values);
        double m = mean.evaluate(values);
        System.out.println("mean: " + m + " std dev: " + std);
        //Assert.assertTrue(std < 0.1 * m);
        assertEquals(31000, count);
    }
}
Also used : Mean(org.apache.commons.math.stat.descriptive.moment.Mean) BigIntVector(org.apache.drill.exec.vector.BigIntVector) IntVector(org.apache.drill.exec.vector.IntVector) RecordBatchLoader(org.apache.drill.exec.record.RecordBatchLoader) Float8Vector(org.apache.drill.exec.vector.Float8Vector) BigIntVector(org.apache.drill.exec.vector.BigIntVector) QueryDataBatch(org.apache.drill.exec.rpc.user.QueryDataBatch) DrillConfig(org.apache.drill.common.config.DrillConfig) Drillbit(org.apache.drill.exec.server.Drillbit) SchemaPath(org.apache.drill.common.expression.SchemaPath) RemoteServiceSet(org.apache.drill.exec.server.RemoteServiceSet) BootStrapContext(org.apache.drill.exec.server.BootStrapContext) StandardDeviation(org.apache.commons.math.stat.descriptive.moment.StandardDeviation) DrillClient(org.apache.drill.exec.client.DrillClient) Test(org.junit.Test)

Example 50 with DrillConfig

use of org.apache.drill.common.config.DrillConfig in project drill by apache.

the class TestBitBitKerberos method setupTest.

@BeforeClass
public static void setupTest() throws Exception {
    final Config config = DrillConfig.create(cloneDefaultTestConfigProperties());
    krbHelper = new KerberosHelper(TestBitBitKerberos.class.getSimpleName());
    krbHelper.setupKdc();
    newConfig = new DrillConfig(config.withValue(ExecConstants.AUTHENTICATION_MECHANISMS, ConfigValueFactory.fromIterable(Lists.newArrayList("kerberos"))).withValue(ExecConstants.BIT_AUTHENTICATION_ENABLED, ConfigValueFactory.fromAnyRef(true)).withValue(ExecConstants.BIT_AUTHENTICATION_MECHANISM, ConfigValueFactory.fromAnyRef("kerberos")).withValue(ExecConstants.USE_LOGIN_PRINCIPAL, ConfigValueFactory.fromAnyRef(true)).withValue(BootStrapContext.SERVICE_PRINCIPAL, ConfigValueFactory.fromAnyRef(krbHelper.SERVER_PRINCIPAL)).withValue(BootStrapContext.SERVICE_KEYTAB_LOCATION, ConfigValueFactory.fromAnyRef(krbHelper.serverKeytab.toString())), false);
    // Ignore the compile time warning caused by the code below.
    // Config is statically initialized at this point. But the above configuration results in a different
    // initialization which causes the tests to fail. So the following two changes are required.
    // (1) Refresh Kerberos config.
    sun.security.krb5.Config.refresh();
    // (2) Reset the default realm.
    final Field defaultRealm = KerberosName.class.getDeclaredField("defaultRealm");
    defaultRealm.setAccessible(true);
    defaultRealm.set(null, KerberosUtil.getDefaultRealm());
    updateTestCluster(1, newConfig);
    ScanResult result = ClassPathScanner.fromPrescan(newConfig);
    c1 = new BootStrapContext(newConfig, result);
    setupFragmentContextAndManager();
}
Also used : MaterializedField(org.apache.drill.exec.record.MaterializedField) Field(java.lang.reflect.Field) ScanResult(org.apache.drill.common.scanner.persistence.ScanResult) DrillConfig(org.apache.drill.common.config.DrillConfig) Config(com.typesafe.config.Config) DrillConfig(org.apache.drill.common.config.DrillConfig) BootStrapContext(org.apache.drill.exec.server.BootStrapContext) KerberosHelper(org.apache.drill.exec.rpc.security.KerberosHelper) BeforeClass(org.junit.BeforeClass)

Aggregations

DrillConfig (org.apache.drill.common.config.DrillConfig)57 Test (org.junit.Test)35 Properties (java.util.Properties)19 DrillProperties (org.apache.drill.common.config.DrillProperties)15 Drillbit (org.apache.drill.exec.server.Drillbit)8 RemoteServiceSet (org.apache.drill.exec.server.RemoteServiceSet)8 BeforeClass (org.junit.BeforeClass)8 ScanResult (org.apache.drill.common.scanner.persistence.ScanResult)7 ExecTest (org.apache.drill.exec.ExecTest)7 LogicalPlanPersistence (org.apache.drill.common.config.LogicalPlanPersistence)6 RpcException (org.apache.drill.exec.rpc.RpcException)6 DrillbitEndpoint (org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint)5 NonTransientRpcException (org.apache.drill.exec.rpc.NonTransientRpcException)5 DrillTest (org.apache.drill.test.DrillTest)5 SchemaPath (org.apache.drill.common.expression.SchemaPath)4 BufferAllocator (org.apache.drill.exec.memory.BufferAllocator)4 DrillBuf (io.netty.buffer.DrillBuf)3 File (java.io.File)3 IOException (java.io.IOException)3 Field (java.lang.reflect.Field)3