Search in sources :

Example 11 with HCatFieldSchema

use of org.apache.hive.hcatalog.data.schema.HCatFieldSchema in project hive by apache.

the class TestHCatClient method testUpdateTableSchema.

@Test
public void testUpdateTableSchema() throws Exception {
    try {
        HCatClient client = HCatClient.create(new Configuration(hcatConf));
        final String dbName = "testUpdateTableSchema_DBName";
        final String tableName = "testUpdateTableSchema_TableName";
        client.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);
        client.createDatabase(HCatCreateDBDesc.create(dbName).build());
        List<HCatFieldSchema> oldSchema = Arrays.asList(new HCatFieldSchema("foo", Type.INT, ""), new HCatFieldSchema("bar", Type.STRING, ""));
        client.createTable(HCatCreateTableDesc.create(dbName, tableName, oldSchema).build());
        List<HCatFieldSchema> newSchema = Arrays.asList(new HCatFieldSchema("completely", Type.DOUBLE, ""), new HCatFieldSchema("new", Type.STRING, ""), new HCatFieldSchema("fields", Type.STRING, ""));
        client.updateTableSchema(dbName, tableName, newSchema);
        assertArrayEquals(newSchema.toArray(), client.getTable(dbName, tableName).getCols().toArray());
        client.dropDatabase(dbName, false, HCatClient.DropDBMode.CASCADE);
    } catch (Exception exception) {
        LOG.error("Unexpected exception.", exception);
        assertTrue("Unexpected exception: " + exception.getMessage(), false);
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HCatException(org.apache.hive.hcatalog.common.HCatException) IOException(java.io.IOException) HCatFieldSchema(org.apache.hive.hcatalog.data.schema.HCatFieldSchema) Test(org.junit.Test)

Example 12 with HCatFieldSchema

use of org.apache.hive.hcatalog.data.schema.HCatFieldSchema in project hive by apache.

the class TestHCatClient method testCreateTableLike.

@Test
public void testCreateTableLike() throws Exception {
    HCatClient client = HCatClient.create(new Configuration(hcatConf));
    String tableName = "tableone";
    String cloneTable = "tabletwo";
    client.dropTable(null, tableName, true);
    client.dropTable(null, cloneTable, true);
    ArrayList<HCatFieldSchema> cols = new ArrayList<HCatFieldSchema>();
    cols.add(new HCatFieldSchema("id", Type.INT, "id columns"));
    cols.add(new HCatFieldSchema("value", Type.STRING, "id columns"));
    HCatCreateTableDesc tableDesc = HCatCreateTableDesc.create(null, tableName, cols).fileFormat("rcfile").build();
    client.createTable(tableDesc);
    // create a new table similar to previous one.
    client.createTableLike(null, tableName, cloneTable, true, false, null);
    List<String> tables = client.listTableNamesByPattern(null, "table*");
    assertTrue(tables.size() == 2);
    client.close();
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) ArrayList(java.util.ArrayList) HCatFieldSchema(org.apache.hive.hcatalog.data.schema.HCatFieldSchema) Test(org.junit.Test)

Example 13 with HCatFieldSchema

use of org.apache.hive.hcatalog.data.schema.HCatFieldSchema in project hive by apache.

the class TestHCatClient method testObjectNotFoundException.

@Test
public void testObjectNotFoundException() throws Exception {
    try {
        HCatClient client = HCatClient.create(new Configuration(hcatConf));
        String dbName = "testObjectNotFoundException_DBName";
        String tableName = "testObjectNotFoundException_TableName";
        client.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);
        try {
            // Test that fetching a non-existent db-name yields ObjectNotFound.
            client.getDatabase(dbName);
            assertTrue("Expected ObjectNotFoundException.", false);
        } catch (Exception exception) {
            LOG.info("Got exception: ", exception);
            assertTrue("Expected ObjectNotFoundException. Got:" + exception.getClass(), exception instanceof ObjectNotFoundException);
        }
        client.createDatabase(HCatCreateDBDesc.create(dbName).build());
        try {
            // Test that fetching a non-existent table-name yields ObjectNotFound.
            client.getTable(dbName, tableName);
            assertTrue("Expected ObjectNotFoundException.", false);
        } catch (Exception exception) {
            LOG.info("Got exception: ", exception);
            assertTrue("Expected ObjectNotFoundException. Got:" + exception.getClass(), exception instanceof ObjectNotFoundException);
        }
        String partitionColumn = "part";
        List<HCatFieldSchema> columns = Arrays.asList(new HCatFieldSchema("col", Type.STRING, ""));
        ArrayList<HCatFieldSchema> partitionColumns = new ArrayList<HCatFieldSchema>(Arrays.asList(new HCatFieldSchema(partitionColumn, Type.STRING, "")));
        HCatTable table = new HCatTable(dbName, tableName).cols(columns).partCols(partitionColumns);
        client.createTable(HCatCreateTableDesc.create(table, false).build());
        HCatTable createdTable = client.getTable(dbName, tableName);
        Map<String, String> partitionSpec = new HashMap<String, String>();
        partitionSpec.put(partitionColumn, "foobar");
        try {
            // Test that fetching a non-existent partition yields ObjectNotFound.
            client.getPartition(dbName, tableName, partitionSpec);
            assertTrue("Expected ObjectNotFoundException.", false);
        } catch (Exception exception) {
            LOG.info("Got exception: ", exception);
            assertTrue("Expected ObjectNotFoundException. Got:" + exception.getClass(), exception instanceof ObjectNotFoundException);
        }
        client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(createdTable, partitionSpec, makePartLocation(createdTable, partitionSpec))).build());
        // Test that listPartitionsByFilter() returns an empty-set, if the filter selects no partitions.
        assertEquals("Expected empty set of partitions.", 0, client.listPartitionsByFilter(dbName, tableName, partitionColumn + " < 'foobar'").size());
        try {
            // Test that listPartitionsByFilter() throws HCatException if the partition-key is incorrect.
            partitionSpec.put("NonExistentKey", "foobar");
            client.getPartition(dbName, tableName, partitionSpec);
            assertTrue("Expected HCatException.", false);
        } catch (Exception exception) {
            LOG.info("Got exception: ", exception);
            assertTrue("Expected HCatException. Got:" + exception.getClass(), exception instanceof HCatException);
            assertFalse("Did not expect ObjectNotFoundException.", exception instanceof ObjectNotFoundException);
        }
    } catch (Throwable t) {
        LOG.error("Unexpected exception!", t);
        assertTrue("Unexpected exception! " + t.getMessage(), false);
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) HCatException(org.apache.hive.hcatalog.common.HCatException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HCatException(org.apache.hive.hcatalog.common.HCatException) IOException(java.io.IOException) HCatFieldSchema(org.apache.hive.hcatalog.data.schema.HCatFieldSchema) Test(org.junit.Test)

Example 14 with HCatFieldSchema

use of org.apache.hive.hcatalog.data.schema.HCatFieldSchema in project hive by apache.

the class TestCommands method testDropTableCommand.

@Test
public void testDropTableCommand() throws HCatException, CommandNeedRetryException {
    String dbName = "cmd_testdb";
    String tableName = "cmd_testtable";
    int evid = 789;
    List<HCatFieldSchema> cols = HCatSchemaUtils.getHCatSchema("a:int,b:string").getFields();
    Command testReplicatedDropCmd = new DropTableCommand(dbName, tableName, true, evid);
    assertEquals(evid, testReplicatedDropCmd.getEventId());
    assertEquals(1, testReplicatedDropCmd.get().size());
    assertEquals(true, testReplicatedDropCmd.isRetriable());
    assertEquals(false, testReplicatedDropCmd.isUndoable());
    CommandTestUtils.testCommandSerialization(testReplicatedDropCmd);
    Command testNormalDropCmd = new DropTableCommand(dbName, tableName, false, evid);
    assertEquals(evid, testNormalDropCmd.getEventId());
    assertEquals(1, testNormalDropCmd.get().size());
    assertEquals(true, testNormalDropCmd.isRetriable());
    assertEquals(false, testNormalDropCmd.isUndoable());
    CommandTestUtils.testCommandSerialization(testNormalDropCmd);
    client.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);
    client.createDatabase(HCatCreateDBDesc.create(dbName).ifNotExists(false).build());
    Map<String, String> tprops = new HashMap<String, String>();
    tprops.put(ReplicationUtils.REPL_STATE_ID, String.valueOf(evid + 5));
    HCatTable tableToCreate = (new HCatTable(dbName, tableName)).tblProps(tprops).cols(cols);
    client.createTable(HCatCreateTableDesc.create(tableToCreate).build());
    HCatTable t1 = client.getTable(dbName, tableName);
    assertNotNull(t1);
    // Test replicated drop, should not drop, because evid < repl.state.id
    LOG.info("About to run :" + testReplicatedDropCmd.get().get(0));
    driver.run(testReplicatedDropCmd.get().get(0));
    HCatTable t2 = client.getTable(dbName, tableName);
    assertNotNull(t2);
    // Test normal drop, should drop unconditionally.
    LOG.info("About to run :" + testNormalDropCmd.get().get(0));
    driver.run(testNormalDropCmd.get().get(0));
    Exception onfe = null;
    try {
        HCatTable t_del = client.getTable(dbName, tableName);
    } catch (Exception e) {
        onfe = e;
    }
    assertNotNull(onfe);
    assertTrue(onfe instanceof ObjectNotFoundException);
    Map<String, String> tprops2 = new HashMap<String, String>();
    tprops2.put(ReplicationUtils.REPL_STATE_ID, String.valueOf(evid - 5));
    HCatTable tableToCreate2 = (new HCatTable(dbName, tableName)).tblProps(tprops2).cols(cols);
    client.createTable(HCatCreateTableDesc.create(tableToCreate2).build());
    HCatTable t3 = client.getTable(dbName, tableName);
    assertNotNull(t3);
    // Test replicated drop, should drop this time, since repl.state.id < evid.
    LOG.info("About to run :" + testReplicatedDropCmd.get().get(0));
    driver.run(testReplicatedDropCmd.get().get(0));
    Exception onfe2 = null;
    try {
        HCatTable t_del = client.getTable(dbName, tableName);
    } catch (Exception e) {
        onfe2 = e;
    }
    assertNotNull(onfe2);
    assertTrue(onfe2 instanceof ObjectNotFoundException);
}
Also used : Command(org.apache.hive.hcatalog.api.repl.Command) HashMap(java.util.HashMap) ObjectNotFoundException(org.apache.hive.hcatalog.api.ObjectNotFoundException) HCatTable(org.apache.hive.hcatalog.api.HCatTable) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HCatException(org.apache.hive.hcatalog.common.HCatException) CommandNeedRetryException(org.apache.hadoop.hive.ql.CommandNeedRetryException) ObjectNotFoundException(org.apache.hive.hcatalog.api.ObjectNotFoundException) IOException(java.io.IOException) HCatFieldSchema(org.apache.hive.hcatalog.data.schema.HCatFieldSchema) Test(org.junit.Test)

Example 15 with HCatFieldSchema

use of org.apache.hive.hcatalog.data.schema.HCatFieldSchema in project hive by apache.

the class TestCommands method testDropPartitionCommand.

@Test
public void testDropPartitionCommand() throws HCatException, CommandNeedRetryException, MetaException {
    String dbName = "cmd_testdb";
    String tableName = "cmd_testtable";
    int evid = 789;
    List<HCatFieldSchema> pcols = HCatSchemaUtils.getHCatSchema("b:string").getFields();
    List<HCatFieldSchema> cols = HCatSchemaUtils.getHCatSchema("a:int").getFields();
    Map<String, String> ptnDesc = new HashMap<String, String>();
    ptnDesc.put("b", "test");
    Command testReplicatedDropPtnCmd = new DropPartitionCommand(dbName, tableName, ptnDesc, true, evid);
    assertEquals(evid, testReplicatedDropPtnCmd.getEventId());
    assertEquals(1, testReplicatedDropPtnCmd.get().size());
    assertEquals(true, testReplicatedDropPtnCmd.isRetriable());
    assertEquals(false, testReplicatedDropPtnCmd.isUndoable());
    CommandTestUtils.testCommandSerialization(testReplicatedDropPtnCmd);
    Command testNormalDropPtnCmd = new DropPartitionCommand(dbName, tableName, ptnDesc, false, evid);
    assertEquals(evid, testNormalDropPtnCmd.getEventId());
    assertEquals(1, testNormalDropPtnCmd.get().size());
    assertEquals(true, testNormalDropPtnCmd.isRetriable());
    assertEquals(false, testNormalDropPtnCmd.isUndoable());
    CommandTestUtils.testCommandSerialization(testNormalDropPtnCmd);
    client.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);
    client.createDatabase(HCatCreateDBDesc.create(dbName).ifNotExists(false).build());
    Map<String, String> props = new HashMap<String, String>();
    props.put(ReplicationUtils.REPL_STATE_ID, String.valueOf(evid + 5));
    HCatTable table = (new HCatTable(dbName, tableName)).tblProps(props).cols(cols).partCols(pcols);
    client.createTable(HCatCreateTableDesc.create(table).build());
    HCatTable tableCreated = client.getTable(dbName, tableName);
    assertNotNull(tableCreated);
    HCatPartition ptnToAdd = (new HCatPartition(tableCreated, ptnDesc, TestHCatClient.makePartLocation(tableCreated, ptnDesc))).parameters(props);
    client.addPartition(HCatAddPartitionDesc.create(ptnToAdd).build());
    HCatPartition p1 = client.getPartition(dbName, tableName, ptnDesc);
    assertNotNull(p1);
    // Test replicated drop, should not drop, because evid < repl.state.id
    LOG.info("About to run :" + testReplicatedDropPtnCmd.get().get(0));
    driver.run(testReplicatedDropPtnCmd.get().get(0));
    HCatPartition p2 = client.getPartition(dbName, tableName, ptnDesc);
    assertNotNull(p2);
    // Test normal drop, should drop unconditionally.
    LOG.info("About to run :" + testNormalDropPtnCmd.get().get(0));
    driver.run(testNormalDropPtnCmd.get().get(0));
    Exception onfe = null;
    try {
        HCatPartition p_del = client.getPartition(dbName, tableName, ptnDesc);
    } catch (Exception e) {
        onfe = e;
    }
    assertNotNull(onfe);
    assertTrue(onfe instanceof ObjectNotFoundException);
    Map<String, String> props2 = new HashMap<String, String>();
    props2.put(ReplicationUtils.REPL_STATE_ID, String.valueOf(evid - 5));
    HCatPartition ptnToAdd2 = (new HCatPartition(tableCreated, ptnDesc, TestHCatClient.makePartLocation(tableCreated, ptnDesc))).parameters(props2);
    client.addPartition(HCatAddPartitionDesc.create(ptnToAdd2).build());
    HCatPartition p3 = client.getPartition(dbName, tableName, ptnDesc);
    assertNotNull(p3);
    // Test replicated drop, should drop this time, since repl.state.id < evid.
    LOG.info("About to run :" + testReplicatedDropPtnCmd.get().get(0));
    driver.run(testReplicatedDropPtnCmd.get().get(0));
    Exception onfe2 = null;
    try {
        HCatPartition p_del = client.getPartition(dbName, tableName, ptnDesc);
    } catch (Exception e) {
        onfe2 = e;
    }
    assertNotNull(onfe2);
    assertTrue(onfe2 instanceof ObjectNotFoundException);
}
Also used : HashMap(java.util.HashMap) Command(org.apache.hive.hcatalog.api.repl.Command) ObjectNotFoundException(org.apache.hive.hcatalog.api.ObjectNotFoundException) HCatTable(org.apache.hive.hcatalog.api.HCatTable) HCatPartition(org.apache.hive.hcatalog.api.HCatPartition) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HCatException(org.apache.hive.hcatalog.common.HCatException) CommandNeedRetryException(org.apache.hadoop.hive.ql.CommandNeedRetryException) ObjectNotFoundException(org.apache.hive.hcatalog.api.ObjectNotFoundException) IOException(java.io.IOException) HCatFieldSchema(org.apache.hive.hcatalog.data.schema.HCatFieldSchema) Test(org.junit.Test)

Aggregations

HCatFieldSchema (org.apache.hive.hcatalog.data.schema.HCatFieldSchema)61 ArrayList (java.util.ArrayList)34 Test (org.junit.Test)30 HCatException (org.apache.hive.hcatalog.common.HCatException)22 IOException (java.io.IOException)21 HCatSchema (org.apache.hive.hcatalog.data.schema.HCatSchema)21 HashMap (java.util.HashMap)19 Configuration (org.apache.hadoop.conf.Configuration)18 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)15 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)7 ResourceSchema (org.apache.pig.ResourceSchema)6 HCatTable (org.apache.hive.hcatalog.api.HCatTable)5 ResourceFieldSchema (org.apache.pig.ResourceSchema.ResourceFieldSchema)5 Map (java.util.Map)4 Properties (java.util.Properties)4 Path (org.apache.hadoop.fs.Path)4 List (java.util.List)3 StorageDescriptor (org.apache.hadoop.hive.metastore.api.StorageDescriptor)3 CommandNeedRetryException (org.apache.hadoop.hive.ql.CommandNeedRetryException)3 FrontendException (org.apache.pig.impl.logicalLayer.FrontendException)3