use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class TestHiveMetaStore method testPartitionFilter.
/**
* Tests for list partition by filter functionality.
* @throws Exception
*/
public void testPartitionFilter() throws Exception {
String dbName = "filterdb";
String tblName = "filtertbl";
silentDropDatabase(dbName);
Database db = new Database();
db.setName(dbName);
client.createDatabase(db);
ArrayList<FieldSchema> cols = new ArrayList<FieldSchema>(2);
cols.add(new FieldSchema("c1", serdeConstants.STRING_TYPE_NAME, ""));
cols.add(new FieldSchema("c2", serdeConstants.INT_TYPE_NAME, ""));
ArrayList<FieldSchema> partCols = new ArrayList<FieldSchema>(3);
partCols.add(new FieldSchema("p1", serdeConstants.STRING_TYPE_NAME, ""));
partCols.add(new FieldSchema("p2", serdeConstants.STRING_TYPE_NAME, ""));
partCols.add(new FieldSchema("p3", serdeConstants.INT_TYPE_NAME, ""));
Table tbl = new Table();
tbl.setDbName(dbName);
tbl.setTableName(tblName);
StorageDescriptor sd = new StorageDescriptor();
tbl.setSd(sd);
sd.setCols(cols);
sd.setCompressed(false);
sd.setNumBuckets(1);
sd.setParameters(new HashMap<String, String>());
sd.setBucketCols(new ArrayList<String>());
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setName(tbl.getTableName());
sd.getSerdeInfo().setParameters(new HashMap<String, String>());
sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
sd.setInputFormat(HiveInputFormat.class.getName());
sd.setOutputFormat(HiveOutputFormat.class.getName());
sd.setSortCols(new ArrayList<Order>());
tbl.setPartitionKeys(partCols);
client.createTable(tbl);
tbl = client.getTable(dbName, tblName);
add_partition(client, tbl, Lists.newArrayList("p11", "p21", "31"), "part1");
add_partition(client, tbl, Lists.newArrayList("p11", "p22", "32"), "part2");
add_partition(client, tbl, Lists.newArrayList("p12", "p21", "31"), "part3");
add_partition(client, tbl, Lists.newArrayList("p12", "p23", "32"), "part4");
add_partition(client, tbl, Lists.newArrayList("p13", "p24", "31"), "part5");
add_partition(client, tbl, Lists.newArrayList("p13", "p25", "-33"), "part6");
// Test equals operator for strings and integers.
checkFilter(client, dbName, tblName, "p1 = \"p11\"", 2);
checkFilter(client, dbName, tblName, "p1 = \"p12\"", 2);
checkFilter(client, dbName, tblName, "p2 = \"p21\"", 2);
checkFilter(client, dbName, tblName, "p2 = \"p23\"", 1);
checkFilter(client, dbName, tblName, "p3 = 31", 3);
checkFilter(client, dbName, tblName, "p3 = 33", 0);
checkFilter(client, dbName, tblName, "p3 = -33", 1);
checkFilter(client, dbName, tblName, "p1 = \"p11\" and p2=\"p22\"", 1);
checkFilter(client, dbName, tblName, "p1 = \"p11\" or p2=\"p23\"", 3);
checkFilter(client, dbName, tblName, "p1 = \"p11\" or p1=\"p12\"", 4);
checkFilter(client, dbName, tblName, "p1 = \"p11\" or p1=\"p12\"", 4);
checkFilter(client, dbName, tblName, "p1 = \"p11\" or p1=\"p12\"", 4);
checkFilter(client, dbName, tblName, "p1 = \"p11\" and p3 = 31", 1);
checkFilter(client, dbName, tblName, "p3 = -33 or p1 = \"p12\"", 3);
// Test not-equals operator for strings and integers.
checkFilter(client, dbName, tblName, "p1 != \"p11\"", 4);
checkFilter(client, dbName, tblName, "p2 != \"p23\"", 5);
checkFilter(client, dbName, tblName, "p2 != \"p33\"", 6);
checkFilter(client, dbName, tblName, "p3 != 32", 4);
checkFilter(client, dbName, tblName, "p3 != 8589934592", 6);
checkFilter(client, dbName, tblName, "p1 != \"p11\" and p1 != \"p12\"", 2);
checkFilter(client, dbName, tblName, "p1 != \"p11\" and p2 != \"p22\"", 4);
checkFilter(client, dbName, tblName, "p1 != \"p11\" or p2 != \"p22\"", 5);
checkFilter(client, dbName, tblName, "p1 != \"p12\" and p2 != \"p25\"", 3);
checkFilter(client, dbName, tblName, "p1 != \"p12\" or p2 != \"p25\"", 6);
checkFilter(client, dbName, tblName, "p3 != -33 or p1 != \"p13\"", 5);
checkFilter(client, dbName, tblName, "p1 != \"p11\" and p3 = 31", 2);
checkFilter(client, dbName, tblName, "p3 != 31 and p1 = \"p12\"", 1);
// Test reverse order.
checkFilter(client, dbName, tblName, "31 != p3 and p1 = \"p12\"", 1);
checkFilter(client, dbName, tblName, "\"p23\" = p2", 1);
// Test and/or more...
checkFilter(client, dbName, tblName, "p1 = \"p11\" or (p1=\"p12\" and p2=\"p21\")", 3);
checkFilter(client, dbName, tblName, "p1 = \"p11\" or (p1=\"p12\" and p2=\"p21\") Or " + "(p1=\"p13\" aNd p2=\"p24\")", 4);
//test for and or precedence
checkFilter(client, dbName, tblName, "p1=\"p12\" and (p2=\"p27\" Or p2=\"p21\")", 1);
checkFilter(client, dbName, tblName, "p1=\"p12\" and p2=\"p27\" Or p2=\"p21\"", 2);
// Test gt/lt/lte/gte/like for strings.
checkFilter(client, dbName, tblName, "p1 > \"p12\"", 2);
checkFilter(client, dbName, tblName, "p1 >= \"p12\"", 4);
checkFilter(client, dbName, tblName, "p1 < \"p12\"", 2);
checkFilter(client, dbName, tblName, "p1 <= \"p12\"", 4);
checkFilter(client, dbName, tblName, "p1 like \"p1.*\"", 6);
checkFilter(client, dbName, tblName, "p2 like \"p.*3\"", 1);
// Test gt/lt/lte/gte for numbers.
checkFilter(client, dbName, tblName, "p3 < 0", 1);
checkFilter(client, dbName, tblName, "p3 >= -33", 6);
checkFilter(client, dbName, tblName, "p3 > -33", 5);
checkFilter(client, dbName, tblName, "p3 > 31 and p3 < 32", 0);
checkFilter(client, dbName, tblName, "p3 > 31 or p3 < 31", 3);
checkFilter(client, dbName, tblName, "p3 > 30 or p3 < 30", 6);
checkFilter(client, dbName, tblName, "p3 >= 31 or p3 < -32", 6);
checkFilter(client, dbName, tblName, "p3 >= 32", 2);
checkFilter(client, dbName, tblName, "p3 > 32", 0);
// Test between
checkFilter(client, dbName, tblName, "p1 between \"p11\" and \"p12\"", 4);
checkFilter(client, dbName, tblName, "p1 not between \"p11\" and \"p12\"", 2);
checkFilter(client, dbName, tblName, "p3 not between 0 and 2", 6);
checkFilter(client, dbName, tblName, "p3 between 31 and 32", 5);
checkFilter(client, dbName, tblName, "p3 between 32 and 31", 0);
checkFilter(client, dbName, tblName, "p3 between -32 and 34 and p3 not between 31 and 32", 0);
checkFilter(client, dbName, tblName, "p3 between 1 and 3 or p3 not between 1 and 3", 6);
checkFilter(client, dbName, tblName, "p3 between 31 and 32 and p1 between \"p12\" and \"p14\"", 3);
//Test for setting the maximum partition count
List<Partition> partitions = client.listPartitionsByFilter(dbName, tblName, "p1 >= \"p12\"", (short) 2);
assertEquals("User specified row limit for partitions", 2, partitions.size());
//Negative tests
Exception me = null;
try {
client.listPartitionsByFilter(dbName, tblName, "p3 >= \"p12\"", (short) -1);
} catch (MetaException e) {
me = e;
}
assertNotNull(me);
assertTrue("Filter on int partition key", me.getMessage().contains("Filtering is supported only on partition keys of type string"));
me = null;
try {
client.listPartitionsByFilter(dbName, tblName, "c1 >= \"p12\"", (short) -1);
} catch (MetaException e) {
me = e;
}
assertNotNull(me);
assertTrue("Filter on invalid key", me.getMessage().contains("<c1> is not a partitioning key for the table"));
me = null;
try {
client.listPartitionsByFilter(dbName, tblName, "c1 >= ", (short) -1);
} catch (MetaException e) {
me = e;
}
assertNotNull(me);
assertTrue("Invalid filter string", me.getMessage().contains("Error parsing partition filter"));
me = null;
try {
client.listPartitionsByFilter("invDBName", "invTableName", "p1 = \"p11\"", (short) -1);
} catch (NoSuchObjectException e) {
me = e;
}
assertNotNull(me);
assertTrue("NoSuchObject exception", me.getMessage().contains("invDBName.invTableName table not found"));
client.dropTable(dbName, tblName);
client.dropDatabase(dbName);
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class TestHiveMetaStore method createPartitions.
private List<Partition> createPartitions(String dbName, Table tbl, List<List<String>> values) throws Throwable {
int i = 1;
List<Partition> partitions = new ArrayList<Partition>();
for (List<String> vals : values) {
Partition part = makePartitionObject(dbName, tbl.getTableName(), vals, tbl, "/part" + i);
i++;
// check if the partition exists (it shouldn't)
boolean exceptionThrown = false;
try {
Partition p = client.getPartition(dbName, tbl.getTableName(), vals);
} catch (Exception e) {
assertEquals("partition should not have existed", NoSuchObjectException.class, e.getClass());
exceptionThrown = true;
}
assertTrue("getPartition() should have thrown NoSuchObjectException", exceptionThrown);
Partition retp = client.add_partition(part);
assertNotNull("Unable to create partition " + part, retp);
partitions.add(retp);
}
return partitions;
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class TestMetaStoreAuthorization method testMetaStoreAuthorization.
public void testMetaStoreAuthorization() throws Exception {
setup();
MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());
HiveMetaStoreClient client = new HiveMetaStoreClient(conf);
FileSystem fs = null;
String dbName = "simpdb";
Database db1 = null;
Path p = null;
try {
try {
db1 = client.getDatabase(dbName);
client.dropDatabase(dbName);
} catch (NoSuchObjectException noe) {
}
if (db1 != null) {
p = new Path(db1.getLocationUri());
fs = p.getFileSystem(conf);
fs.delete(p, true);
}
db1 = new Database();
db1.setName(dbName);
client.createDatabase(db1);
Database db = client.getDatabase(dbName);
assertTrue("Databases do not match", db1.getName().equals(db.getName()));
p = new Path(db.getLocationUri());
if (fs == null) {
fs = p.getFileSystem(conf);
}
fs.setPermission(p.getParent(), FsPermission.createImmutable((short) 0555));
try {
client.dropDatabase(dbName);
throw new Exception("Expected dropDatabase call to fail");
} catch (MetaException me) {
}
fs.setPermission(p.getParent(), FsPermission.createImmutable((short) 0755));
client.dropDatabase(dbName);
} finally {
if (p != null) {
fs.delete(p, true);
}
}
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class TestHiveMetaStore method testComplexTypeApi.
// TODO:pc need to enhance this with complex fields and getType_all function
public void testComplexTypeApi() throws Exception {
try {
client.dropType("Person");
Type typ1 = new Type();
typ1.setName("Person");
typ1.setFields(new ArrayList<FieldSchema>(2));
typ1.getFields().add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
typ1.getFields().add(new FieldSchema("income", serdeConstants.INT_TYPE_NAME, ""));
boolean ret = client.createType(typ1);
assertTrue("Unable to create type", ret);
Type typ1_2 = client.getType("Person");
assertNotNull("type Person not found", typ1_2);
assertEquals(typ1.getName(), typ1_2.getName());
assertEquals(typ1.getFields().size(), typ1_2.getFields().size());
assertEquals(typ1.getFields().get(0), typ1_2.getFields().get(0));
assertEquals(typ1.getFields().get(1), typ1_2.getFields().get(1));
client.dropType("Family");
Type fam = new Type();
fam.setName("Family");
fam.setFields(new ArrayList<FieldSchema>(2));
fam.getFields().add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
fam.getFields().add(new FieldSchema("members", MetaStoreUtils.getListType(typ1.getName()), ""));
ret = client.createType(fam);
assertTrue("Unable to create type " + fam.getName(), ret);
Type fam2 = client.getType("Family");
assertNotNull("type Person not found", fam2);
assertEquals(fam.getName(), fam2.getName());
assertEquals(fam.getFields().size(), fam2.getFields().size());
assertEquals(fam.getFields().get(0), fam2.getFields().get(0));
assertEquals(fam.getFields().get(1), fam2.getFields().get(1));
ret = client.dropType("Family");
assertTrue("unable to drop type Family", ret);
ret = client.dropType("Person");
assertTrue("unable to drop type Person", ret);
boolean exceptionThrown = false;
try {
client.getType("Person");
} catch (NoSuchObjectException e) {
exceptionThrown = true;
}
assertTrue("Expected NoSuchObjectException", exceptionThrown);
} catch (Exception e) {
System.err.println(StringUtils.stringifyException(e));
System.err.println("testComplexTypeApi() failed.");
throw e;
}
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class TestHiveMetaStore method testRenamePartition.
public void testRenamePartition() throws Throwable {
try {
String dbName = "compdb1";
String tblName = "comptbl1";
List<String> vals = new ArrayList<String>(2);
vals.add("2011-07-11");
vals.add("8");
String part_path = "/ds=2011-07-11/hr=8";
List<String> tmp_vals = new ArrayList<String>(2);
tmp_vals.add("tmp_2011-07-11");
tmp_vals.add("-8");
String part2_path = "/ds=tmp_2011-07-11/hr=-8";
client.dropTable(dbName, tblName);
silentDropDatabase(dbName);
Database db = new Database();
db.setName(dbName);
db.setDescription("Rename Partition Test database");
client.createDatabase(db);
ArrayList<FieldSchema> cols = new ArrayList<FieldSchema>(2);
cols.add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
cols.add(new FieldSchema("income", serdeConstants.INT_TYPE_NAME, ""));
Table tbl = new Table();
tbl.setDbName(dbName);
tbl.setTableName(tblName);
StorageDescriptor sd = new StorageDescriptor();
tbl.setSd(sd);
sd.setCols(cols);
sd.setCompressed(false);
sd.setNumBuckets(1);
sd.setParameters(new HashMap<String, String>());
sd.getParameters().put("test_param_1", "Use this for comments etc");
sd.setBucketCols(new ArrayList<String>(2));
sd.getBucketCols().add("name");
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setName(tbl.getTableName());
sd.getSerdeInfo().setParameters(new HashMap<String, String>());
sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
sd.setInputFormat(HiveInputFormat.class.getName());
sd.setOutputFormat(HiveOutputFormat.class.getName());
sd.setSortCols(new ArrayList<Order>());
tbl.setPartitionKeys(new ArrayList<FieldSchema>(2));
tbl.getPartitionKeys().add(new FieldSchema("ds", serdeConstants.STRING_TYPE_NAME, ""));
tbl.getPartitionKeys().add(new FieldSchema("hr", serdeConstants.INT_TYPE_NAME, ""));
client.createTable(tbl);
if (isThriftClient) {
// the createTable() above does not update the location in the 'tbl'
// object when the client is a thrift client and the code below relies
// on the location being present in the 'tbl' object - so get the table
// from the metastore
tbl = client.getTable(dbName, tblName);
}
Partition part = new Partition();
part.setDbName(dbName);
part.setTableName(tblName);
part.setValues(vals);
part.setParameters(new HashMap<String, String>());
part.setSd(tbl.getSd().deepCopy());
part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
part.getSd().setLocation(tbl.getSd().getLocation() + "/part1");
part.getParameters().put("retention", "10");
part.getSd().setNumBuckets(12);
part.getSd().getSerdeInfo().getParameters().put("abc", "1");
client.add_partition(part);
part.setValues(tmp_vals);
client.renamePartition(dbName, tblName, vals, part);
boolean exceptionThrown = false;
try {
Partition p = client.getPartition(dbName, tblName, vals);
} catch (Exception e) {
assertEquals("partition should not have existed", NoSuchObjectException.class, e.getClass());
exceptionThrown = true;
}
assertTrue("Expected NoSuchObjectException", exceptionThrown);
Partition part3 = client.getPartition(dbName, tblName, tmp_vals);
assertEquals("couldn't rename partition", part3.getParameters().get("retention"), "10");
assertEquals("couldn't rename partition", part3.getSd().getSerdeInfo().getParameters().get("abc"), "1");
assertEquals("couldn't rename partition", part3.getSd().getNumBuckets(), 12);
assertEquals("new partition sd matches", part3.getSd().getLocation(), tbl.getSd().getLocation() + part2_path);
part.setValues(vals);
client.renamePartition(dbName, tblName, tmp_vals, part);
exceptionThrown = false;
try {
Partition p = client.getPartition(dbName, tblName, tmp_vals);
} catch (Exception e) {
assertEquals("partition should not have existed", NoSuchObjectException.class, e.getClass());
exceptionThrown = true;
}
assertTrue("Expected NoSuchObjectException", exceptionThrown);
part3 = client.getPartition(dbName, tblName, vals);
assertEquals("couldn't rename partition", part3.getParameters().get("retention"), "10");
assertEquals("couldn't rename partition", part3.getSd().getSerdeInfo().getParameters().get("abc"), "1");
assertEquals("couldn't rename partition", part3.getSd().getNumBuckets(), 12);
assertEquals("new partition sd matches", part3.getSd().getLocation(), tbl.getSd().getLocation() + part_path);
client.dropTable(dbName, tblName);
client.dropDatabase(dbName);
} catch (Exception e) {
System.err.println(StringUtils.stringifyException(e));
System.err.println("testRenamePartition() failed.");
throw e;
}
}
Aggregations