Search in sources :

Example 26 with HCatException

use of org.apache.hive.hcatalog.common.HCatException in project hive by apache.

the class TestHCatPartitioned method testHCatPartitionedTable.

@Test
public void testHCatPartitionedTable() throws Exception {
    Map<String, String> partitionMap = new HashMap<String, String>();
    partitionMap.put("part1", "p1value1");
    partitionMap.put("part0", "501");
    runMRCreate(partitionMap, partitionColumns, writeRecords, 10, true);
    partitionMap.clear();
    partitionMap.put("PART1", "p1value2");
    partitionMap.put("PART0", "502");
    runMRCreate(partitionMap, partitionColumns, writeRecords, 20, true);
    //Test for duplicate publish -- this will either fail on job creation time
    // and throw an exception, or will fail at runtime, and fail the job.
    IOException exc = null;
    try {
        Job j = runMRCreate(partitionMap, partitionColumns, writeRecords, 20, true);
        assertEquals(!isTableImmutable(), j.isSuccessful());
    } catch (IOException e) {
        exc = e;
        assertTrue(exc instanceof HCatException);
        assertTrue(ErrorType.ERROR_DUPLICATE_PARTITION.equals(((HCatException) exc).getErrorType()));
    }
    if (!isTableImmutable()) {
        assertNull(exc);
    }
    //Test for publish with invalid partition key name
    exc = null;
    partitionMap.clear();
    partitionMap.put("px1", "p1value2");
    partitionMap.put("px0", "502");
    try {
        Job j = runMRCreate(partitionMap, partitionColumns, writeRecords, 20, true);
        assertFalse(j.isSuccessful());
    } catch (IOException e) {
        exc = e;
        assertNotNull(exc);
        assertTrue(exc instanceof HCatException);
        assertEquals(ErrorType.ERROR_MISSING_PARTITION_KEY, ((HCatException) exc).getErrorType());
    }
    //Test for publish with missing partition key values
    exc = null;
    partitionMap.clear();
    partitionMap.put("px", "512");
    try {
        runMRCreate(partitionMap, partitionColumns, writeRecords, 20, true);
    } catch (IOException e) {
        exc = e;
    }
    assertNotNull(exc);
    assertTrue(exc instanceof HCatException);
    assertEquals(ErrorType.ERROR_INVALID_PARTITION_VALUES, ((HCatException) exc).getErrorType());
    //Test for null partition value map
    exc = null;
    try {
        runMRCreate(null, partitionColumns, writeRecords, 20, false);
    } catch (IOException e) {
        exc = e;
    }
    assertTrue(exc == null);
    //Read should get 10 + 20 rows if immutable, 50 (10+20+20) if mutable
    if (isTableImmutable()) {
        runMRRead(30);
    } else {
        runMRRead(50);
    }
    //Read with partition filter
    runMRRead(10, "part1 = \"p1value1\"");
    runMRRead(10, "part0 = \"501\"");
    if (isTableImmutable()) {
        runMRRead(20, "part1 = \"p1value2\"");
        runMRRead(30, "part1 = \"p1value1\" or part1 = \"p1value2\"");
        runMRRead(20, "part0 = \"502\"");
        runMRRead(30, "part0 = \"501\" or part0 = \"502\"");
    } else {
        runMRRead(40, "part1 = \"p1value2\"");
        runMRRead(50, "part1 = \"p1value1\" or part1 = \"p1value2\"");
        runMRRead(40, "part0 = \"502\"");
        runMRRead(50, "part0 = \"501\" or part0 = \"502\"");
    }
    tableSchemaTest();
    columnOrderChangeTest();
    hiveReadTest();
}
Also used : HashMap(java.util.HashMap) HCatException(org.apache.hive.hcatalog.common.HCatException) IOException(java.io.IOException) Job(org.apache.hadoop.mapreduce.Job) Test(org.junit.Test)

Example 27 with HCatException

use of org.apache.hive.hcatalog.common.HCatException in project hive by apache.

the class TestHCatPartitioned method columnOrderChangeTest.

//check behavior while change the order of columns
private void columnOrderChangeTest() throws Exception {
    HCatSchema tableSchema = getTableSchema();
    assertEquals(5, tableSchema.getFields().size());
    partitionColumns = new ArrayList<HCatFieldSchema>();
    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", serdeConstants.INT_TYPE_NAME, "")));
    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c3", serdeConstants.STRING_TYPE_NAME, "")));
    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", serdeConstants.STRING_TYPE_NAME, "")));
    writeRecords = new ArrayList<HCatRecord>();
    for (int i = 0; i < 10; i++) {
        List<Object> objList = new ArrayList<Object>();
        objList.add(i);
        objList.add("co strvalue" + i);
        objList.add("co str2value" + i);
        writeRecords.add(new DefaultHCatRecord(objList));
    }
    Map<String, String> partitionMap = new HashMap<String, String>();
    partitionMap.put("part1", "p1value8");
    partitionMap.put("part0", "508");
    Exception exc = null;
    try {
        runMRCreate(partitionMap, partitionColumns, writeRecords, 10, true);
    } catch (IOException e) {
        exc = e;
    }
    assertTrue(exc != null);
    assertTrue(exc instanceof HCatException);
    assertEquals(ErrorType.ERROR_SCHEMA_COLUMN_MISMATCH, ((HCatException) exc).getErrorType());
    partitionColumns = new ArrayList<HCatFieldSchema>();
    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", serdeConstants.INT_TYPE_NAME, "")));
    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", serdeConstants.STRING_TYPE_NAME, "")));
    writeRecords = new ArrayList<HCatRecord>();
    for (int i = 0; i < 10; i++) {
        List<Object> objList = new ArrayList<Object>();
        objList.add(i);
        objList.add("co strvalue" + i);
        writeRecords.add(new DefaultHCatRecord(objList));
    }
    runMRCreate(partitionMap, partitionColumns, writeRecords, 10, true);
    if (isTableImmutable()) {
        //Read should get 10 + 20 + 10 + 10 + 20 rows
        runMRRead(70);
    } else {
        // +20 from the duplicate publish
        runMRRead(90);
    }
}
Also used : HashMap(java.util.HashMap) HCatFieldSchema(org.apache.hive.hcatalog.data.schema.HCatFieldSchema) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ArrayList(java.util.ArrayList) HCatException(org.apache.hive.hcatalog.common.HCatException) IOException(java.io.IOException) HCatException(org.apache.hive.hcatalog.common.HCatException) IOException(java.io.IOException) HCatFieldSchema(org.apache.hive.hcatalog.data.schema.HCatFieldSchema) HCatSchema(org.apache.hive.hcatalog.data.schema.HCatSchema) DefaultHCatRecord(org.apache.hive.hcatalog.data.DefaultHCatRecord) DefaultHCatRecord(org.apache.hive.hcatalog.data.DefaultHCatRecord) HCatRecord(org.apache.hive.hcatalog.data.HCatRecord)

Example 28 with HCatException

use of org.apache.hive.hcatalog.common.HCatException in project hive by apache.

the class TestCommands method testDropTableCommand2.

@Test
public void testDropTableCommand2() throws HCatException, CommandNeedRetryException, MetaException {
    // Secondary DropTableCommand test for testing repl-drop-tables' effect on partitions inside a partitioned table
    // when there exist partitions inside the table which are older than the drop event.
    // Our goal is this : Create a table t, with repl.last.id=157, say.
    // Create 2 partitions inside it, with repl.last.id=150 and 160, say.
    // Now, process a drop table command with eventid=155.
    // It should result in the table and the partition with repl.last.id=160 continuing to exist,
    // but dropping the partition with repl.last.id=150.
    String dbName = "cmd_testdb";
    String tableName = "cmd_testtable";
    int evid = 157;
    List<HCatFieldSchema> pcols = HCatSchemaUtils.getHCatSchema("b:string").getFields();
    List<HCatFieldSchema> cols = HCatSchemaUtils.getHCatSchema("a:int").getFields();
    Command testReplicatedDropCmd = new DropTableCommand(dbName, tableName, true, evid);
    client.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);
    client.createDatabase(HCatCreateDBDesc.create(dbName).ifNotExists(false).build());
    Map<String, String> tprops = new HashMap<String, String>();
    tprops.put(ReplicationUtils.REPL_STATE_ID, String.valueOf(evid + 2));
    HCatTable table = (new HCatTable(dbName, tableName)).tblProps(tprops).cols(cols).partCols(pcols);
    client.createTable(HCatCreateTableDesc.create(table).build());
    HCatTable tableCreated = client.getTable(dbName, tableName);
    assertNotNull(tableCreated);
    Map<String, String> ptnDesc1 = new HashMap<String, String>();
    ptnDesc1.put("b", "test-older");
    Map<String, String> props1 = new HashMap<String, String>();
    props1.put(ReplicationUtils.REPL_STATE_ID, String.valueOf(evid - 5));
    HCatPartition ptnToAdd1 = (new HCatPartition(tableCreated, ptnDesc1, TestHCatClient.makePartLocation(tableCreated, ptnDesc1))).parameters(props1);
    client.addPartition(HCatAddPartitionDesc.create(ptnToAdd1).build());
    Map<String, String> ptnDesc2 = new HashMap<String, String>();
    ptnDesc2.put("b", "test-newer");
    Map<String, String> props2 = new HashMap<String, String>();
    props2.put(ReplicationUtils.REPL_STATE_ID, String.valueOf(evid + 5));
    HCatPartition ptnToAdd2 = (new HCatPartition(tableCreated, ptnDesc2, TestHCatClient.makePartLocation(tableCreated, ptnDesc2))).parameters(props2);
    client.addPartition(HCatAddPartitionDesc.create(ptnToAdd2).build());
    HCatPartition p1 = client.getPartition(dbName, tableName, ptnDesc1);
    assertNotNull(p1);
    HCatPartition p2 = client.getPartition(dbName, tableName, ptnDesc2);
    assertNotNull(p2);
    LOG.info("About to run :" + testReplicatedDropCmd.get().get(0));
    driver.run(testReplicatedDropCmd.get().get(0));
    HCatTable t_stillExists = client.getTable(dbName, tableName);
    assertNotNull(t_stillExists);
    HCatPartition p2_stillExists = client.getPartition(dbName, tableName, ptnDesc2);
    Exception onfe = null;
    try {
        HCatPartition p1_del = client.getPartition(dbName, tableName, ptnDesc1);
    } catch (Exception e) {
        onfe = e;
    }
    assertNotNull(onfe);
    assertTrue(onfe instanceof ObjectNotFoundException);
}
Also used : Command(org.apache.hive.hcatalog.api.repl.Command) HashMap(java.util.HashMap) ObjectNotFoundException(org.apache.hive.hcatalog.api.ObjectNotFoundException) HCatTable(org.apache.hive.hcatalog.api.HCatTable) HCatPartition(org.apache.hive.hcatalog.api.HCatPartition) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HCatException(org.apache.hive.hcatalog.common.HCatException) CommandNeedRetryException(org.apache.hadoop.hive.ql.CommandNeedRetryException) ObjectNotFoundException(org.apache.hive.hcatalog.api.ObjectNotFoundException) IOException(java.io.IOException) HCatFieldSchema(org.apache.hive.hcatalog.data.schema.HCatFieldSchema) Test(org.junit.Test)

Example 29 with HCatException

use of org.apache.hive.hcatalog.common.HCatException in project hive by apache.

the class TestHCatClient method testDropTableException.

@Test
public void testDropTableException() throws Exception {
    HCatClient client = HCatClient.create(new Configuration(hcatConf));
    String tableName = "tableToBeDropped";
    boolean isExceptionCaught = false;
    client.dropTable(null, tableName, true);
    try {
        client.dropTable(null, tableName, false);
    } catch (Exception exp) {
        isExceptionCaught = true;
        assertTrue(exp instanceof HCatException);
        LOG.info("Drop Table Exception: " + exp.getCause());
    } finally {
        client.close();
        assertTrue("The expected exception was never thrown.", isExceptionCaught);
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HCatException(org.apache.hive.hcatalog.common.HCatException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HCatException(org.apache.hive.hcatalog.common.HCatException) IOException(java.io.IOException) Test(org.junit.Test)

Example 30 with HCatException

use of org.apache.hive.hcatalog.common.HCatException in project hive by apache.

the class TestHCatClient method testBasicDDLCommands.

@Test
public void testBasicDDLCommands() throws Exception {
    String db = "testdb";
    String tableOne = "testTable1";
    String tableTwo = "testTable2";
    String tableThree = "testTable3";
    HCatClient client = HCatClient.create(new Configuration(hcatConf));
    client.dropDatabase(db, true, HCatClient.DropDBMode.CASCADE);
    HCatCreateDBDesc dbDesc = HCatCreateDBDesc.create(db).ifNotExists(false).build();
    client.createDatabase(dbDesc);
    List<String> dbNames = client.listDatabaseNamesByPattern("*");
    assertTrue(dbNames.contains("default"));
    assertTrue(dbNames.contains(db));
    HCatDatabase testDb = client.getDatabase(db);
    assertTrue(testDb.getComment() == null);
    assertTrue(testDb.getProperties().size() == 0);
    String warehouseDir = System.getProperty("test.warehouse.dir", "/user/hive/warehouse");
    if (useExternalMS) {
        assertTrue(testDb.getLocation().matches(".*" + "/" + db + ".db"));
    } else {
        String expectedDir = warehouseDir.replaceFirst("pfile:///", "pfile:/");
        assertEquals(expectedDir + "/" + db + ".db", testDb.getLocation());
    }
    ArrayList<HCatFieldSchema> cols = new ArrayList<HCatFieldSchema>();
    cols.add(new HCatFieldSchema("id", Type.INT, "id comment"));
    cols.add(new HCatFieldSchema("value", Type.STRING, "value comment"));
    HCatCreateTableDesc tableDesc = HCatCreateTableDesc.create(db, tableOne, cols).fileFormat("rcfile").build();
    client.createTable(tableDesc);
    HCatTable table1 = client.getTable(db, tableOne);
    assertTrue(table1.getInputFileFormat().equalsIgnoreCase(RCFileInputFormat.class.getName()));
    assertTrue(table1.getOutputFileFormat().equalsIgnoreCase(RCFileOutputFormat.class.getName()));
    assertTrue(table1.getSerdeLib().equalsIgnoreCase(LazyBinaryColumnarSerDe.class.getName()));
    assertTrue(table1.getCols().equals(cols));
    // will result in an exception.
    try {
        client.createTable(tableDesc);
        fail("Expected exception");
    } catch (HCatException e) {
        assertTrue(e.getMessage().contains("AlreadyExistsException while creating table."));
    }
    client.dropTable(db, tableOne, true);
    HCatCreateTableDesc tableDesc2 = HCatCreateTableDesc.create(db, tableTwo, cols).fieldsTerminatedBy('\001').escapeChar('\002').linesTerminatedBy('\003').mapKeysTerminatedBy('\004').collectionItemsTerminatedBy('\005').nullDefinedAs('\006').build();
    client.createTable(tableDesc2);
    HCatTable table2 = client.getTable(db, tableTwo);
    assertTrue("Expected TextInputFormat, but got: " + table2.getInputFileFormat(), table2.getInputFileFormat().equalsIgnoreCase(TextInputFormat.class.getName()));
    assertTrue(table2.getOutputFileFormat().equalsIgnoreCase(HiveIgnoreKeyTextOutputFormat.class.getName()));
    assertTrue("SerdeParams not found", table2.getSerdeParams() != null);
    assertEquals("checking " + serdeConstants.FIELD_DELIM, Character.toString('\001'), table2.getSerdeParams().get(serdeConstants.FIELD_DELIM));
    assertEquals("checking " + serdeConstants.ESCAPE_CHAR, Character.toString('\002'), table2.getSerdeParams().get(serdeConstants.ESCAPE_CHAR));
    assertEquals("checking " + serdeConstants.LINE_DELIM, Character.toString('\003'), table2.getSerdeParams().get(serdeConstants.LINE_DELIM));
    assertEquals("checking " + serdeConstants.MAPKEY_DELIM, Character.toString('\004'), table2.getSerdeParams().get(serdeConstants.MAPKEY_DELIM));
    assertEquals("checking " + serdeConstants.COLLECTION_DELIM, Character.toString('\005'), table2.getSerdeParams().get(serdeConstants.COLLECTION_DELIM));
    assertEquals("checking " + serdeConstants.SERIALIZATION_NULL_FORMAT, Character.toString('\006'), table2.getSerdeParams().get(serdeConstants.SERIALIZATION_NULL_FORMAT));
    assertTrue(table2.getLocation().toLowerCase().matches(".*" + ("/" + db + ".db/" + tableTwo).toLowerCase()));
    HCatCreateTableDesc tableDesc3 = HCatCreateTableDesc.create(db, tableThree, cols).fileFormat("orcfile").build();
    client.createTable(tableDesc3);
    HCatTable table3 = client.getTable(db, tableThree);
    assertTrue(table3.getInputFileFormat().equalsIgnoreCase(OrcInputFormat.class.getName()));
    assertTrue(table3.getOutputFileFormat().equalsIgnoreCase(OrcOutputFormat.class.getName()));
    assertTrue(table3.getSerdeLib().equalsIgnoreCase(OrcSerde.class.getName()));
    assertTrue(table1.getCols().equals(cols));
    client.close();
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) ArrayList(java.util.ArrayList) HCatException(org.apache.hive.hcatalog.common.HCatException) HCatFieldSchema(org.apache.hive.hcatalog.data.schema.HCatFieldSchema) Test(org.junit.Test)

Aggregations

HCatException (org.apache.hive.hcatalog.common.HCatException)52 IOException (java.io.IOException)23 ArrayList (java.util.ArrayList)20 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)19 TException (org.apache.thrift.TException)14 HCatFieldSchema (org.apache.hive.hcatalog.data.schema.HCatFieldSchema)13 HashMap (java.util.HashMap)11 Test (org.junit.Test)11 NoSuchObjectException (org.apache.hadoop.hive.metastore.api.NoSuchObjectException)10 Configuration (org.apache.hadoop.conf.Configuration)9 Path (org.apache.hadoop.fs.Path)9 Partition (org.apache.hadoop.hive.metastore.api.Partition)8 Table (org.apache.hadoop.hive.metastore.api.Table)8 HCatSchema (org.apache.hive.hcatalog.data.schema.HCatSchema)7 Job (org.apache.hadoop.mapreduce.Job)6 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)5 FileSystem (org.apache.hadoop.fs.FileSystem)4 HiveConf (org.apache.hadoop.hive.conf.HiveConf)4 CommandNeedRetryException (org.apache.hadoop.hive.ql.CommandNeedRetryException)4 Map (java.util.Map)3