Search in sources :

Example 21 with HiveConf

use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.

the class TestTxnCommands method setUp.

@Before
public void setUp() throws Exception {
    tearDown();
    hiveConf = new HiveConf(this.getClass());
    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
    hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR);
    hiveConf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict");
    hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory");
    hiveConf.setBoolVar(HiveConf.ConfVars.MERGE_CARDINALITY_VIOLATION_CHECK, true);
    TxnDbUtil.setConfValues(hiveConf);
    TxnDbUtil.prepDb();
    File f = new File(TEST_WAREHOUSE_DIR);
    if (f.exists()) {
        FileUtil.fullyDelete(f);
    }
    if (!(new File(TEST_WAREHOUSE_DIR).mkdirs())) {
        throw new RuntimeException("Could not create " + TEST_WAREHOUSE_DIR);
    }
    SessionState.start(new SessionState(hiveConf));
    d = new Driver(hiveConf);
    d.setMaxRows(10000);
    dropTables();
    runStatementOnDriver("create table " + Table.ACIDTBL + "(a int, b int) clustered by (a) into " + BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES ('transactional'='true')");
    runStatementOnDriver("create table " + Table.ACIDTBLPART + "(a int, b int) partitioned by (p string) clustered by (a) into " + BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES ('transactional'='true')");
    runStatementOnDriver("create table " + Table.NONACIDORCTBL + "(a int, b int) clustered by (a) into " + BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES ('transactional'='false')");
    runStatementOnDriver("create table " + Table.NONACIDORCTBL2 + "(a int, b int) clustered by (a) into " + BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES ('transactional'='false')");
    runStatementOnDriver("create temporary  table " + Table.ACIDTBL2 + "(a int, b int, c int) clustered by (c) into " + BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES ('transactional'='true')");
}
Also used : SessionState(org.apache.hadoop.hive.ql.session.SessionState) HiveConf(org.apache.hadoop.hive.conf.HiveConf) File(java.io.File) Before(org.junit.Before)

Example 22 with HiveConf

use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.

the class TestUtilities method runTestGetInputSummary.

private ContentSummary runTestGetInputSummary(JobConf jobConf, Properties properties, int numOfPartitions, int bytesPerFile, Class<? extends InputFormat> inputFormatClass) throws IOException {
    // creates scratch directories needed by the Context object
    SessionState.start(new HiveConf());
    MapWork mapWork = new MapWork();
    Context context = new Context(jobConf);
    LinkedHashMap<Path, PartitionDesc> pathToPartitionInfo = new LinkedHashMap<>();
    LinkedHashMap<Path, ArrayList<String>> pathToAliasTable = new LinkedHashMap<>();
    TableScanOperator scanOp = new TableScanOperator();
    PartitionDesc partitionDesc = new PartitionDesc(new TableDesc(inputFormatClass, null, properties), null);
    String testTableName = "testTable";
    Path testTablePath = new Path(testTableName);
    Path[] testPartitionsPaths = new Path[numOfPartitions];
    for (int i = 0; i < numOfPartitions; i++) {
        String testPartitionName = "p=" + 1;
        testPartitionsPaths[i] = new Path(testTablePath, "p=" + i);
        pathToPartitionInfo.put(testPartitionsPaths[i], partitionDesc);
        pathToAliasTable.put(testPartitionsPaths[i], Lists.newArrayList(testPartitionName));
        mapWork.getAliasToWork().put(testPartitionName, scanOp);
    }
    mapWork.setPathToAliases(pathToAliasTable);
    mapWork.setPathToPartitionInfo(pathToPartitionInfo);
    FileSystem fs = FileSystem.getLocal(jobConf);
    try {
        fs.mkdirs(testTablePath);
        byte[] data = new byte[bytesPerFile];
        for (int i = 0; i < numOfPartitions; i++) {
            fs.mkdirs(testPartitionsPaths[i]);
            FSDataOutputStream out = fs.create(new Path(testPartitionsPaths[i], "test1.txt"));
            out.write(data);
            out.close();
        }
        return Utilities.getInputSummary(context, mapWork, null);
    } finally {
        if (fs.exists(testTablePath)) {
            fs.delete(testTablePath, true);
        }
    }
}
Also used : Context(org.apache.hadoop.hive.ql.Context) Path(org.apache.hadoop.fs.Path) ArrayList(java.util.ArrayList) LinkedHashMap(java.util.LinkedHashMap) MapWork(org.apache.hadoop.hive.ql.plan.MapWork) FileSystem(org.apache.hadoop.fs.FileSystem) HiveConf(org.apache.hadoop.hive.conf.HiveConf) PartitionDesc(org.apache.hadoop.hive.ql.plan.PartitionDesc) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Example 23 with HiveConf

use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.

the class TestTxnCommands2 method setUpWithTableProperties.

protected void setUpWithTableProperties(String tableProperties) throws Exception {
    tearDown();
    hiveConf = new HiveConf(this.getClass());
    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
    hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR);
    hiveConf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict");
    hiveConf.setVar(HiveConf.ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName());
    hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory");
    hiveConf.setBoolVar(HiveConf.ConfVars.MERGE_CARDINALITY_VIOLATION_CHECK, true);
    TxnDbUtil.setConfValues(hiveConf);
    TxnDbUtil.prepDb();
    File f = new File(TEST_WAREHOUSE_DIR);
    if (f.exists()) {
        FileUtil.fullyDelete(f);
    }
    if (!(new File(TEST_WAREHOUSE_DIR).mkdirs())) {
        throw new RuntimeException("Could not create " + TEST_WAREHOUSE_DIR);
    }
    SessionState.start(new SessionState(hiveConf));
    d = new Driver(hiveConf);
    d.setMaxRows(10000);
    dropTables();
    runStatementOnDriver("create table " + Table.ACIDTBL + "(a int, b int) clustered by (a) into " + BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES (" + tableProperties + ")");
    runStatementOnDriver("create table " + Table.ACIDTBLPART + "(a int, b int) partitioned by (p string) clustered by (a) into " + BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES (" + tableProperties + ")");
    runStatementOnDriver("create table " + Table.NONACIDORCTBL + "(a int, b int) clustered by (a) into " + BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES ('transactional'='false')");
    runStatementOnDriver("create table " + Table.NONACIDPART + "(a int, b int) partitioned by (p string) stored as orc TBLPROPERTIES ('transactional'='false')");
    runStatementOnDriver("create table " + Table.NONACIDPART2 + "(a2 int, b2 int) partitioned by (p2 string) stored as orc TBLPROPERTIES ('transactional'='false')");
    runStatementOnDriver("create table " + Table.ACIDNESTEDPART + "(a int, b int) partitioned by (p int, q int) clustered by (a) into " + BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES (" + tableProperties + ")");
}
Also used : HiveInputFormat(org.apache.hadoop.hive.ql.io.HiveInputFormat) SessionState(org.apache.hadoop.hive.ql.session.SessionState) HiveConf(org.apache.hadoop.hive.conf.HiveConf) File(java.io.File)

Example 24 with HiveConf

use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.

the class TestFunctionRegistry method setUp.

@Override
protected void setUp() {
    String maxVarcharTypeName = "varchar(" + HiveVarchar.MAX_VARCHAR_LENGTH + ")";
    maxVarchar = TypeInfoFactory.getPrimitiveTypeInfo(maxVarcharTypeName);
    varchar10 = TypeInfoFactory.getPrimitiveTypeInfo("varchar(10)");
    varchar5 = TypeInfoFactory.getPrimitiveTypeInfo("varchar(5)");
    char10 = TypeInfoFactory.getPrimitiveTypeInfo("char(10)");
    char5 = TypeInfoFactory.getPrimitiveTypeInfo("char(5)");
    SessionState.start(new HiveConf());
}
Also used : HiveConf(org.apache.hadoop.hive.conf.HiveConf)

Example 25 with HiveConf

use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.

the class TestOperators method testFetchOperatorContext.

@Test
public void testFetchOperatorContext() throws Exception {
    HiveConf conf = new HiveConf();
    conf.set("hive.support.concurrency", "false");
    conf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict");
    conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory");
    SessionState.start(conf);
    String cmd = "create table fetchOp (id int, name string) " + "partitioned by (state string) " + "row format delimited fields terminated by '|' " + "stored as " + "inputformat 'org.apache.hadoop.hive.ql.exec.TestOperators$CustomInFmt' " + "outputformat 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' " + "tblproperties ('myprop1'='val1', 'myprop2' = 'val2')";
    Driver driver = new Driver();
    driver.init();
    CommandProcessorResponse response = driver.run(cmd);
    assertEquals(0, response.getResponseCode());
    List<Object> result = new ArrayList<Object>();
    cmd = "load data local inpath '../data/files/employee.dat' " + "overwrite into table fetchOp partition (state='CA')";
    driver.init();
    response = driver.run(cmd);
    assertEquals(0, response.getResponseCode());
    cmd = "load data local inpath '../data/files/employee2.dat' " + "overwrite into table fetchOp partition (state='OR')";
    driver.init();
    response = driver.run(cmd);
    assertEquals(0, response.getResponseCode());
    cmd = "select * from fetchOp";
    driver.init();
    driver.setMaxRows(500);
    response = driver.run(cmd);
    assertEquals(0, response.getResponseCode());
    driver.getResults(result);
    assertEquals(20, result.size());
    driver.close();
}
Also used : CommandProcessorResponse(org.apache.hadoop.hive.ql.processors.CommandProcessorResponse) ArrayList(java.util.ArrayList) Driver(org.apache.hadoop.hive.ql.Driver) HiveConf(org.apache.hadoop.hive.conf.HiveConf) InspectableObject(org.apache.hadoop.hive.serde2.objectinspector.InspectableObject) Test(org.junit.Test)

Aggregations

HiveConf (org.apache.hadoop.hive.conf.HiveConf)404 BeforeClass (org.junit.BeforeClass)73 Test (org.junit.Test)66 Path (org.apache.hadoop.fs.Path)54 Before (org.junit.Before)50 Driver (org.apache.hadoop.hive.ql.Driver)46 CliSessionState (org.apache.hadoop.hive.cli.CliSessionState)44 IOException (java.io.IOException)39 ArrayList (java.util.ArrayList)37 File (java.io.File)31 HashMap (java.util.HashMap)26 FileSystem (org.apache.hadoop.fs.FileSystem)26 SessionState (org.apache.hadoop.hive.ql.session.SessionState)22 LinkedHashMap (java.util.LinkedHashMap)17 List (java.util.List)16 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)15 MiniHS2 (org.apache.hive.jdbc.miniHS2.MiniHS2)14 Map (java.util.Map)12 HiveMetaStoreClient (org.apache.hadoop.hive.metastore.HiveMetaStoreClient)12 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)12