Search in sources :

Example 6 with Type

use of org.apache.hadoop.hive.metastore.api.Type in project hive by apache.

the class TestPermsGrp method getTable.

private Table getTable(String dbName, String tblName, String typeName) throws NoSuchObjectException, MetaException, TException, AlreadyExistsException, InvalidObjectException {
    msc.dropTable(dbName, tblName);
    silentDropDatabase(dbName);
    msc.dropType(typeName);
    Type typ1 = new Type();
    typ1.setName(typeName);
    typ1.setFields(new ArrayList<FieldSchema>(1));
    typ1.getFields().add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
    msc.createType(typ1);
    Table tbl = new Table();
    tbl.setDbName(dbName);
    tbl.setTableName(tblName);
    StorageDescriptor sd = new StorageDescriptor();
    sd.setSerdeInfo(new SerDeInfo());
    sd.getSerdeInfo().setName(tblName);
    sd.getSerdeInfo().setParameters(new HashMap<String, String>());
    sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
    sd.setInputFormat(HiveInputFormat.class.getName());
    sd.setOutputFormat(HiveOutputFormat.class.getName());
    tbl.setSd(sd);
    sd.setCols(typ1.getFields());
    sd.setSerdeInfo(new SerDeInfo());
    return tbl;
}
Also used : HiveInputFormat(org.apache.hadoop.hive.ql.io.HiveInputFormat) Type(org.apache.hadoop.hive.metastore.api.Type) Table(org.apache.hadoop.hive.metastore.api.Table) LazySimpleSerDe(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) SerDeInfo(org.apache.hadoop.hive.metastore.api.SerDeInfo) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) HiveOutputFormat(org.apache.hadoop.hive.ql.io.HiveOutputFormat)

Example 7 with Type

use of org.apache.hadoop.hive.metastore.api.Type in project hive by apache.

the class TestHiveMetaStore method createType.

private Type createType(String typeName, Map<String, String> fields) throws Throwable {
    Type typ1 = new Type();
    typ1.setName(typeName);
    typ1.setFields(new ArrayList<FieldSchema>(fields.size()));
    for (String fieldName : fields.keySet()) {
        typ1.getFields().add(new FieldSchema(fieldName, fields.get(fieldName), ""));
    }
    client.createType(typ1);
    return typ1;
}
Also used : Type(org.apache.hadoop.hive.metastore.api.Type) ResourceType(org.apache.hadoop.hive.metastore.api.ResourceType) FunctionType(org.apache.hadoop.hive.metastore.api.FunctionType) PrincipalType(org.apache.hadoop.hive.metastore.api.PrincipalType) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema)

Example 8 with Type

use of org.apache.hadoop.hive.metastore.api.Type in project hive by apache.

the class TestHiveMetaStore method partitionTester.

public static void partitionTester(HiveMetaStoreClient client, HiveConf hiveConf) throws Exception {
    try {
        String dbName = "compdb";
        String tblName = "comptbl";
        String typeName = "Person";
        List<String> vals = makeVals("2008-07-01 14:13:12", "14");
        List<String> vals2 = makeVals("2008-07-01 14:13:12", "15");
        List<String> vals3 = makeVals("2008-07-02 14:13:12", "15");
        List<String> vals4 = makeVals("2008-07-03 14:13:12", "151");
        client.dropTable(dbName, tblName);
        silentDropDatabase(dbName);
        Database db = new Database();
        db.setName(dbName);
        client.createDatabase(db);
        db = client.getDatabase(dbName);
        Path dbPath = new Path(db.getLocationUri());
        FileSystem fs = FileSystem.get(dbPath.toUri(), hiveConf);
        boolean inheritPerms = hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
        FsPermission dbPermission = fs.getFileStatus(dbPath).getPermission();
        if (inheritPerms) {
            //Set different perms for the database dir for further tests
            dbPermission = new FsPermission((short) 488);
            fs.setPermission(dbPath, dbPermission);
        }
        client.dropType(typeName);
        Type typ1 = new Type();
        typ1.setName(typeName);
        typ1.setFields(new ArrayList<FieldSchema>(2));
        typ1.getFields().add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
        typ1.getFields().add(new FieldSchema("income", serdeConstants.INT_TYPE_NAME, ""));
        client.createType(typ1);
        Table tbl = new Table();
        tbl.setDbName(dbName);
        tbl.setTableName(tblName);
        StorageDescriptor sd = new StorageDescriptor();
        tbl.setSd(sd);
        sd.setCols(typ1.getFields());
        sd.setCompressed(false);
        sd.setNumBuckets(1);
        sd.setParameters(new HashMap<String, String>());
        sd.getParameters().put("test_param_1", "Use this for comments etc");
        sd.setBucketCols(new ArrayList<String>(2));
        sd.getBucketCols().add("name");
        sd.setSerdeInfo(new SerDeInfo());
        sd.getSerdeInfo().setName(tbl.getTableName());
        sd.getSerdeInfo().setParameters(new HashMap<String, String>());
        sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
        sd.setSortCols(new ArrayList<Order>());
        sd.setStoredAsSubDirectories(false);
        sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
        sd.setInputFormat(HiveInputFormat.class.getName());
        sd.setOutputFormat(HiveOutputFormat.class.getName());
        //skewed information
        SkewedInfo skewInfor = new SkewedInfo();
        skewInfor.setSkewedColNames(Arrays.asList("name"));
        List<String> skv = Arrays.asList("1");
        skewInfor.setSkewedColValues(Arrays.asList(skv));
        Map<List<String>, String> scvlm = new HashMap<List<String>, String>();
        scvlm.put(skv, "location1");
        skewInfor.setSkewedColValueLocationMaps(scvlm);
        sd.setSkewedInfo(skewInfor);
        tbl.setPartitionKeys(new ArrayList<FieldSchema>(2));
        tbl.getPartitionKeys().add(new FieldSchema("ds", serdeConstants.STRING_TYPE_NAME, ""));
        tbl.getPartitionKeys().add(new FieldSchema("hr", serdeConstants.STRING_TYPE_NAME, ""));
        client.createTable(tbl);
        if (isThriftClient) {
            // the createTable() above does not update the location in the 'tbl'
            // object when the client is a thrift client and the code below relies
            // on the location being present in the 'tbl' object - so get the table
            // from the metastore
            tbl = client.getTable(dbName, tblName);
        }
        assertEquals(dbPermission, fs.getFileStatus(new Path(tbl.getSd().getLocation())).getPermission());
        Partition part = makePartitionObject(dbName, tblName, vals, tbl, "/part1");
        Partition part2 = makePartitionObject(dbName, tblName, vals2, tbl, "/part2");
        Partition part3 = makePartitionObject(dbName, tblName, vals3, tbl, "/part3");
        Partition part4 = makePartitionObject(dbName, tblName, vals4, tbl, "/part4");
        // check if the partition exists (it shouldn't)
        boolean exceptionThrown = false;
        try {
            Partition p = client.getPartition(dbName, tblName, vals);
        } catch (Exception e) {
            assertEquals("partition should not have existed", NoSuchObjectException.class, e.getClass());
            exceptionThrown = true;
        }
        assertTrue("getPartition() should have thrown NoSuchObjectException", exceptionThrown);
        Partition retp = client.add_partition(part);
        assertNotNull("Unable to create partition " + part, retp);
        assertEquals(dbPermission, fs.getFileStatus(new Path(retp.getSd().getLocation())).getPermission());
        Partition retp2 = client.add_partition(part2);
        assertNotNull("Unable to create partition " + part2, retp2);
        assertEquals(dbPermission, fs.getFileStatus(new Path(retp2.getSd().getLocation())).getPermission());
        Partition retp3 = client.add_partition(part3);
        assertNotNull("Unable to create partition " + part3, retp3);
        assertEquals(dbPermission, fs.getFileStatus(new Path(retp3.getSd().getLocation())).getPermission());
        Partition retp4 = client.add_partition(part4);
        assertNotNull("Unable to create partition " + part4, retp4);
        assertEquals(dbPermission, fs.getFileStatus(new Path(retp4.getSd().getLocation())).getPermission());
        Partition part_get = client.getPartition(dbName, tblName, part.getValues());
        if (isThriftClient) {
            // since we are using thrift, 'part' will not have the create time and
            // last DDL time set since it does not get updated in the add_partition()
            // call - likewise part2 and part3 - set it correctly so that equals check
            // doesn't fail
            adjust(client, part, dbName, tblName);
            adjust(client, part2, dbName, tblName);
            adjust(client, part3, dbName, tblName);
        }
        assertTrue("Partitions are not same", part.equals(part_get));
        // check null cols schemas for a partition
        List<String> vals6 = makeVals("2016-02-22 00:00:00", "16");
        Partition part6 = makePartitionObject(dbName, tblName, vals6, tbl, "/part5");
        part6.getSd().setCols(null);
        LOG.info("Creating partition will null field schema");
        client.add_partition(part6);
        LOG.info("Listing all partitions for table " + dbName + "." + tblName);
        final List<Partition> partitions = client.listPartitions(dbName, tblName, (short) -1);
        boolean foundPart = false;
        for (Partition p : partitions) {
            if (p.getValues().equals(vals6)) {
                assertNull(p.getSd().getCols());
                LOG.info("Found partition " + p + " having null field schema");
                foundPart = true;
            }
        }
        assertTrue(foundPart);
        String partName = "ds=" + FileUtils.escapePathName("2008-07-01 14:13:12") + "/hr=14";
        String part2Name = "ds=" + FileUtils.escapePathName("2008-07-01 14:13:12") + "/hr=15";
        String part3Name = "ds=" + FileUtils.escapePathName("2008-07-02 14:13:12") + "/hr=15";
        String part4Name = "ds=" + FileUtils.escapePathName("2008-07-03 14:13:12") + "/hr=151";
        part_get = client.getPartition(dbName, tblName, partName);
        assertTrue("Partitions are not the same", part.equals(part_get));
        // Test partition listing with a partial spec - ds is specified but hr is not
        List<String> partialVals = new ArrayList<String>();
        partialVals.add(vals.get(0));
        Set<Partition> parts = new HashSet<Partition>();
        parts.add(part);
        parts.add(part2);
        List<Partition> partial = client.listPartitions(dbName, tblName, partialVals, (short) -1);
        assertTrue("Should have returned 2 partitions", partial.size() == 2);
        assertTrue("Not all parts returned", partial.containsAll(parts));
        Set<String> partNames = new HashSet<String>();
        partNames.add(partName);
        partNames.add(part2Name);
        List<String> partialNames = client.listPartitionNames(dbName, tblName, partialVals, (short) -1);
        assertTrue("Should have returned 2 partition names", partialNames.size() == 2);
        assertTrue("Not all part names returned", partialNames.containsAll(partNames));
        partNames.add(part3Name);
        partNames.add(part4Name);
        partialVals.clear();
        partialVals.add("");
        partialNames = client.listPartitionNames(dbName, tblName, partialVals, (short) -1);
        assertTrue("Should have returned 5 partition names", partialNames.size() == 5);
        assertTrue("Not all part names returned", partialNames.containsAll(partNames));
        // Test partition listing with a partial spec - hr is specified but ds is not
        parts.clear();
        parts.add(part2);
        parts.add(part3);
        partialVals.clear();
        partialVals.add("");
        partialVals.add(vals2.get(1));
        partial = client.listPartitions(dbName, tblName, partialVals, (short) -1);
        assertEquals("Should have returned 2 partitions", 2, partial.size());
        assertTrue("Not all parts returned", partial.containsAll(parts));
        partNames.clear();
        partNames.add(part2Name);
        partNames.add(part3Name);
        partialNames = client.listPartitionNames(dbName, tblName, partialVals, (short) -1);
        assertEquals("Should have returned 2 partition names", 2, partialNames.size());
        assertTrue("Not all part names returned", partialNames.containsAll(partNames));
        // Verify escaped partition names don't return partitions
        exceptionThrown = false;
        try {
            String badPartName = "ds=2008-07-01 14%3A13%3A12/hrs=14";
            client.getPartition(dbName, tblName, badPartName);
        } catch (NoSuchObjectException e) {
            exceptionThrown = true;
        }
        assertTrue("Bad partition spec should have thrown an exception", exceptionThrown);
        Path partPath = new Path(part.getSd().getLocation());
        assertTrue(fs.exists(partPath));
        client.dropPartition(dbName, tblName, part.getValues(), true);
        assertFalse(fs.exists(partPath));
        // Test append_partition_by_name
        client.appendPartition(dbName, tblName, partName);
        Partition part5 = client.getPartition(dbName, tblName, part.getValues());
        assertTrue("Append partition by name failed", part5.getValues().equals(vals));
        ;
        Path part5Path = new Path(part5.getSd().getLocation());
        assertTrue(fs.exists(part5Path));
        // Test drop_partition_by_name
        assertTrue("Drop partition by name failed", client.dropPartition(dbName, tblName, partName, true));
        assertFalse(fs.exists(part5Path));
        // add the partition again so that drop table with a partition can be
        // tested
        retp = client.add_partition(part);
        assertNotNull("Unable to create partition " + part, retp);
        assertEquals(dbPermission, fs.getFileStatus(new Path(retp.getSd().getLocation())).getPermission());
        // test add_partitions
        List<String> mvals1 = makeVals("2008-07-04 14:13:12", "14641");
        List<String> mvals2 = makeVals("2008-07-04 14:13:12", "14642");
        List<String> mvals3 = makeVals("2008-07-04 14:13:12", "14643");
        // equal to 3
        List<String> mvals4 = makeVals("2008-07-04 14:13:12", "14643");
        List<String> mvals5 = makeVals("2008-07-04 14:13:12", "14645");
        Exception savedException;
        // add_partitions(empty list) : ok, normal operation
        client.add_partitions(new ArrayList<Partition>());
        // add_partitions(1,2,3) : ok, normal operation
        Partition mpart1 = makePartitionObject(dbName, tblName, mvals1, tbl, "/mpart1");
        Partition mpart2 = makePartitionObject(dbName, tblName, mvals2, tbl, "/mpart2");
        Partition mpart3 = makePartitionObject(dbName, tblName, mvals3, tbl, "/mpart3");
        client.add_partitions(Arrays.asList(mpart1, mpart2, mpart3));
        if (isThriftClient) {
            // do DDL time munging if thrift mode
            adjust(client, mpart1, dbName, tblName);
            adjust(client, mpart2, dbName, tblName);
            adjust(client, mpart3, dbName, tblName);
        }
        verifyPartitionsPublished(client, dbName, tblName, Arrays.asList(mvals1.get(0)), Arrays.asList(mpart1, mpart2, mpart3));
        Partition mpart4 = makePartitionObject(dbName, tblName, mvals4, tbl, "/mpart4");
        Partition mpart5 = makePartitionObject(dbName, tblName, mvals5, tbl, "/mpart5");
        // create dir for /mpart5
        Path mp5Path = new Path(mpart5.getSd().getLocation());
        warehouse.mkdirs(mp5Path, true);
        assertTrue(fs.exists(mp5Path));
        assertEquals(dbPermission, fs.getFileStatus(mp5Path).getPermission());
        // add_partitions(5,4) : err = duplicate keyvals on mpart4
        savedException = null;
        try {
            client.add_partitions(Arrays.asList(mpart5, mpart4));
        } catch (Exception e) {
            savedException = e;
        } finally {
            assertNotNull(savedException);
        }
        // check that /mpart4 does not exist, but /mpart5 still does.
        assertTrue(fs.exists(mp5Path));
        assertFalse(fs.exists(new Path(mpart4.getSd().getLocation())));
        // add_partitions(5) : ok
        client.add_partitions(Arrays.asList(mpart5));
        if (isThriftClient) {
            // do DDL time munging if thrift mode
            adjust(client, mpart5, dbName, tblName);
        }
        verifyPartitionsPublished(client, dbName, tblName, Arrays.asList(mvals1.get(0)), Arrays.asList(mpart1, mpart2, mpart3, mpart5));
        //// end add_partitions tests
        client.dropTable(dbName, tblName);
        client.dropType(typeName);
        // recreate table as external, drop partition and it should
        // still exist
        tbl.setParameters(new HashMap<String, String>());
        tbl.getParameters().put("EXTERNAL", "TRUE");
        client.createTable(tbl);
        retp = client.add_partition(part);
        assertTrue(fs.exists(partPath));
        client.dropPartition(dbName, tblName, part.getValues(), true);
        assertTrue(fs.exists(partPath));
        for (String tableName : client.getTables(dbName, "*")) {
            client.dropTable(dbName, tableName);
        }
        client.dropDatabase(dbName);
    } catch (Exception e) {
        System.err.println(StringUtils.stringifyException(e));
        System.err.println("testPartition() failed.");
        throw e;
    }
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) LazySimpleSerDe(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) ArrayList(java.util.ArrayList) HiveOutputFormat(org.apache.hadoop.hive.ql.io.HiveOutputFormat) HiveInputFormat(org.apache.hadoop.hive.ql.io.HiveInputFormat) SkewedInfo(org.apache.hadoop.hive.metastore.api.SkewedInfo) FileSystem(org.apache.hadoop.fs.FileSystem) Database(org.apache.hadoop.hive.metastore.api.Database) List(java.util.List) ArrayList(java.util.ArrayList) FsPermission(org.apache.hadoop.fs.permission.FsPermission) HashSet(java.util.HashSet) Path(org.apache.hadoop.fs.Path) Order(org.apache.hadoop.hive.metastore.api.Order) Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) SerDeInfo(org.apache.hadoop.hive.metastore.api.SerDeInfo) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) ConfigValSecurityException(org.apache.hadoop.hive.metastore.api.ConfigValSecurityException) SQLException(java.sql.SQLException) UnknownDBException(org.apache.hadoop.hive.metastore.api.UnknownDBException) TException(org.apache.thrift.TException) InvalidObjectException(org.apache.hadoop.hive.metastore.api.InvalidObjectException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) Type(org.apache.hadoop.hive.metastore.api.Type) ResourceType(org.apache.hadoop.hive.metastore.api.ResourceType) FunctionType(org.apache.hadoop.hive.metastore.api.FunctionType) PrincipalType(org.apache.hadoop.hive.metastore.api.PrincipalType) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException)

Example 9 with Type

use of org.apache.hadoop.hive.metastore.api.Type in project hive by apache.

the class TestHiveMetaStore method testSimpleTypeApi.

public void testSimpleTypeApi() throws Exception {
    try {
        client.dropType(serdeConstants.INT_TYPE_NAME);
        Type typ1 = new Type();
        typ1.setName(serdeConstants.INT_TYPE_NAME);
        boolean ret = client.createType(typ1);
        assertTrue("Unable to create type", ret);
        Type typ1_2 = client.getType(serdeConstants.INT_TYPE_NAME);
        assertNotNull(typ1_2);
        assertEquals(typ1.getName(), typ1_2.getName());
        ret = client.dropType(serdeConstants.INT_TYPE_NAME);
        assertTrue("unable to drop type integer", ret);
        boolean exceptionThrown = false;
        try {
            client.getType(serdeConstants.INT_TYPE_NAME);
        } catch (NoSuchObjectException e) {
            exceptionThrown = true;
        }
        assertTrue("Expected NoSuchObjectException", exceptionThrown);
    } catch (Exception e) {
        System.err.println(StringUtils.stringifyException(e));
        System.err.println("testSimpleTypeApi() failed.");
        throw e;
    }
}
Also used : Type(org.apache.hadoop.hive.metastore.api.Type) ResourceType(org.apache.hadoop.hive.metastore.api.ResourceType) FunctionType(org.apache.hadoop.hive.metastore.api.FunctionType) PrincipalType(org.apache.hadoop.hive.metastore.api.PrincipalType) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) ConfigValSecurityException(org.apache.hadoop.hive.metastore.api.ConfigValSecurityException) SQLException(java.sql.SQLException) UnknownDBException(org.apache.hadoop.hive.metastore.api.UnknownDBException) TException(org.apache.thrift.TException) InvalidObjectException(org.apache.hadoop.hive.metastore.api.InvalidObjectException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException)

Example 10 with Type

use of org.apache.hadoop.hive.metastore.api.Type in project hive by apache.

the class TestHiveMetaStore method testTransactionalValidation.

@Test
public void testTransactionalValidation() throws Throwable {
    String dbName = "acidDb";
    silentDropDatabase(dbName);
    Database db = new Database();
    db.setName(dbName);
    client.createDatabase(db);
    String tblName = "acidTable";
    String owner = "acid";
    Map<String, String> fields = new HashMap<String, String>();
    fields.put("name", serdeConstants.STRING_TYPE_NAME);
    fields.put("income", serdeConstants.INT_TYPE_NAME);
    Type type = createType("Person", fields);
    Map<String, String> params = new HashMap<String, String>();
    params.put("transactional", "");
    Map<String, String> serdParams = new HashMap<String, String>();
    serdParams.put(serdeConstants.SERIALIZATION_FORMAT, "1");
    StorageDescriptor sd = createStorageDescriptor(tblName, type.getFields(), params, serdParams);
    sd.setNumBuckets(0);
    sd.unsetBucketCols();
    // Fail - No "transactional" property is specified
    try {
        Table t = createTable(dbName, tblName, owner, params, null, sd, 0);
        Assert.assertTrue("Expected exception", false);
    } catch (MetaException e) {
        Assert.assertEquals("'transactional' property of TBLPROPERTIES may only have value 'true'", e.getMessage());
    }
    // Fail - "transactional" property is set to an invalid value
    try {
        params.clear();
        params.put("transactional", "foobar");
        Table t = createTable(dbName, tblName, owner, params, null, sd, 0);
        Assert.assertTrue("Expected exception", false);
    } catch (MetaException e) {
        Assert.assertEquals("'transactional' property of TBLPROPERTIES may only have value 'true'", e.getMessage());
    }
    // Fail - "transactional" is set to true, but the table is not bucketed
    try {
        params.clear();
        params.put("transactional", "true");
        Table t = createTable(dbName, tblName, owner, params, null, sd, 0);
        Assert.assertTrue("Expected exception", false);
    } catch (MetaException e) {
        Assert.assertEquals("The table must be bucketed and stored using an ACID compliant format (such as ORC)", e.getMessage());
    }
    // Fail - "transactional" is set to true, and the table is bucketed, but doesn't use ORC
    try {
        params.clear();
        params.put("transactional", "true");
        List<String> bucketCols = new ArrayList<String>();
        bucketCols.add("income");
        sd.setBucketCols(bucketCols);
        Table t = createTable(dbName, tblName, owner, params, null, sd, 0);
        Assert.assertTrue("Expected exception", false);
    } catch (MetaException e) {
        Assert.assertEquals("The table must be bucketed and stored using an ACID compliant format (such as ORC)", e.getMessage());
    }
    // Succeed - "transactional" is set to true, and the table is bucketed, and uses ORC
    params.clear();
    params.put("transactional", "true");
    List<String> bucketCols = new ArrayList<String>();
    bucketCols.add("income");
    sd.setBucketCols(bucketCols);
    sd.setInputFormat("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat");
    sd.setOutputFormat("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat");
    Table t = createTable(dbName, tblName, owner, params, null, sd, 0);
    Assert.assertTrue("CREATE TABLE should succeed", "true".equals(t.getParameters().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL)));
    // Fail - trying to set "transactional" to "false" is not allowed
    try {
        params.clear();
        params.put("transactional", "false");
        t = new Table();
        t.setParameters(params);
        client.alter_table(dbName, tblName, t);
        Assert.assertTrue("Expected exception", false);
    } catch (MetaException e) {
        Assert.assertEquals("TBLPROPERTIES with 'transactional'='true' cannot be unset", e.getMessage());
    }
    // Fail - trying to set "transactional" to "true" but doesn't satisfy bucketing and Input/OutputFormat requirement
    try {
        tblName += "1";
        params.clear();
        sd.unsetBucketCols();
        t = createTable(dbName, tblName, owner, params, null, sd, 0);
        params.put("transactional", "true");
        t.setParameters(params);
        client.alter_table(dbName, tblName, t);
        Assert.assertTrue("Expected exception", false);
    } catch (MetaException e) {
        Assert.assertEquals("The table must be bucketed and stored using an ACID compliant format (such as ORC)", e.getMessage());
    }
    // Succeed - trying to set "transactional" to "true", and satisfies bucketing and Input/OutputFormat requirement
    tblName += "2";
    params.clear();
    sd.setNumBuckets(1);
    sd.setBucketCols(bucketCols);
    t = createTable(dbName, tblName, owner, params, null, sd, 0);
    params.put("transactional", "true");
    t.setParameters(params);
    t.setPartitionKeys(Collections.EMPTY_LIST);
    client.alter_table(dbName, tblName, t);
    Assert.assertTrue("ALTER TABLE should succeed", "true".equals(t.getParameters().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL)));
}
Also used : Type(org.apache.hadoop.hive.metastore.api.Type) ResourceType(org.apache.hadoop.hive.metastore.api.ResourceType) FunctionType(org.apache.hadoop.hive.metastore.api.FunctionType) PrincipalType(org.apache.hadoop.hive.metastore.api.PrincipalType) Table(org.apache.hadoop.hive.metastore.api.Table) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Database(org.apache.hadoop.hive.metastore.api.Database) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) ArrayList(java.util.ArrayList) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) Test(org.junit.Test)

Aggregations

Type (org.apache.hadoop.hive.metastore.api.Type)12 FunctionType (org.apache.hadoop.hive.metastore.api.FunctionType)10 PrincipalType (org.apache.hadoop.hive.metastore.api.PrincipalType)10 ResourceType (org.apache.hadoop.hive.metastore.api.ResourceType)10 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)8 StorageDescriptor (org.apache.hadoop.hive.metastore.api.StorageDescriptor)7 Table (org.apache.hadoop.hive.metastore.api.Table)7 InvalidOperationException (org.apache.hadoop.hive.metastore.api.InvalidOperationException)6 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)6 NoSuchObjectException (org.apache.hadoop.hive.metastore.api.NoSuchObjectException)6 SQLException (java.sql.SQLException)5 AlreadyExistsException (org.apache.hadoop.hive.metastore.api.AlreadyExistsException)5 ConfigValSecurityException (org.apache.hadoop.hive.metastore.api.ConfigValSecurityException)5 Database (org.apache.hadoop.hive.metastore.api.Database)5 InvalidObjectException (org.apache.hadoop.hive.metastore.api.InvalidObjectException)5 SerDeInfo (org.apache.hadoop.hive.metastore.api.SerDeInfo)5 UnknownDBException (org.apache.hadoop.hive.metastore.api.UnknownDBException)5 TException (org.apache.thrift.TException)5 ArrayList (java.util.ArrayList)4 HashMap (java.util.HashMap)4