use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.
the class TestHiveMetaStore method createTableForTestFilter.
private Table createTableForTestFilter(String dbName, String tableName, String owner, int lastAccessTime, boolean hasSecondParam) throws Exception {
Map<String, String> tableParams = new HashMap<>();
tableParams.put("test_param_1", "hi");
if (hasSecondParam) {
tableParams.put("test_param_2", "50");
}
Table tbl = new TableBuilder().setDbName(dbName).setTableName(tableName).addCol("name", ColumnType.STRING_TYPE_NAME).addCol("income", ColumnType.INT_TYPE_NAME).addPartCol("ds", ColumnType.STRING_TYPE_NAME).addPartCol("hr", ColumnType.INT_TYPE_NAME).setTableParams(tableParams).setOwner(owner).setLastAccessTime(lastAccessTime).create(client, conf);
if (isThriftClient) {
// the createTable() above does not update the location in the 'tbl'
// object when the client is a thrift client and the code below relies
// on the location being present in the 'tbl' object - so get the table
// from the metastore
tbl = client.getTable(dbName, tableName);
}
return tbl;
}
use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.
the class TestHiveMetaStore method testSimpleTable.
@Test
public void testSimpleTable() throws Exception {
try {
String dbName = "simpdb";
String tblName = "simptbl";
String tblName2 = "simptbl2";
String typeName = "Person";
client.dropTable(dbName, tblName);
silentDropDatabase(dbName);
new DatabaseBuilder().setName(dbName).create(client, conf);
client.dropType(typeName);
Type typ1 = new Type();
typ1.setName(typeName);
typ1.setFields(new ArrayList<>(2));
typ1.getFields().add(new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
typ1.getFields().add(new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
client.createType(typ1);
Table tbl = new TableBuilder().setDbName(dbName).setTableName(tblName).setCols(typ1.getFields()).setNumBuckets(1).addBucketCol("name").addStorageDescriptorParam("test_param_1", "Use this for comments etc").create(client, conf);
if (isThriftClient) {
// the createTable() above does not update the location in the 'tbl'
// object when the client is a thrift client and the code below relies
// on the location being present in the 'tbl' object - so get the table
// from the metastore
tbl = client.getTable(dbName, tblName);
}
Table tbl2 = client.getTable(dbName, tblName);
assertNotNull(tbl2);
Assert.assertTrue(tbl2.isSetId());
assertEquals(tbl2.getDbName(), dbName);
assertEquals(tbl2.getTableName(), tblName);
assertEquals(tbl2.getSd().getCols().size(), typ1.getFields().size());
assertEquals(tbl2.getSd().isCompressed(), false);
assertEquals(tbl2.getSd().getNumBuckets(), 1);
assertEquals(tbl2.getSd().getLocation(), tbl.getSd().getLocation());
assertNotNull(tbl2.getSd().getSerdeInfo());
tbl.getSd().getSerdeInfo().setParameters(new HashMap<>());
tbl.getSd().getSerdeInfo().getParameters().put(ColumnType.SERIALIZATION_FORMAT, "1");
tbl2.setTableName(tblName2);
tbl2.setParameters(new HashMap<>());
tbl2.getParameters().put("EXTERNAL", "TRUE");
tbl2.getSd().setLocation(tbl.getSd().getLocation() + "-2");
List<FieldSchema> fieldSchemas = client.getFields(dbName, tblName);
assertNotNull(fieldSchemas);
assertEquals(fieldSchemas.size(), tbl.getSd().getCols().size());
for (FieldSchema fs : tbl.getSd().getCols()) {
assertTrue(fieldSchemas.contains(fs));
}
List<FieldSchema> fieldSchemasFull = client.getSchema(dbName, tblName);
assertNotNull(fieldSchemasFull);
assertEquals(fieldSchemasFull.size(), tbl.getSd().getCols().size() + tbl.getPartitionKeys().size());
for (FieldSchema fs : tbl.getSd().getCols()) {
assertTrue(fieldSchemasFull.contains(fs));
}
for (FieldSchema fs : tbl.getPartitionKeys()) {
assertTrue(fieldSchemasFull.contains(fs));
}
tbl2.unsetId();
client.createTable(tbl2);
if (isThriftClient) {
tbl2 = client.getTable(tbl2.getDbName(), tbl2.getTableName());
}
Table tbl3 = client.getTable(dbName, tblName2);
assertNotNull(tbl3);
assertEquals(tbl3.getDbName(), dbName);
assertEquals(tbl3.getTableName(), tblName2);
assertEquals(tbl3.getSd().getCols().size(), typ1.getFields().size());
assertEquals(tbl3.getSd().isCompressed(), false);
assertEquals(tbl3.getSd().getNumBuckets(), 1);
assertEquals(tbl3.getSd().getLocation(), tbl2.getSd().getLocation());
assertEquals(tbl3.getParameters(), tbl2.getParameters());
fieldSchemas = client.getFields(dbName, tblName2);
assertNotNull(fieldSchemas);
assertEquals(fieldSchemas.size(), tbl2.getSd().getCols().size());
for (FieldSchema fs : tbl2.getSd().getCols()) {
assertTrue(fieldSchemas.contains(fs));
}
fieldSchemasFull = client.getSchema(dbName, tblName2);
assertNotNull(fieldSchemasFull);
assertEquals(fieldSchemasFull.size(), tbl2.getSd().getCols().size() + tbl2.getPartitionKeys().size());
for (FieldSchema fs : tbl2.getSd().getCols()) {
assertTrue(fieldSchemasFull.contains(fs));
}
for (FieldSchema fs : tbl2.getPartitionKeys()) {
assertTrue(fieldSchemasFull.contains(fs));
}
assertEquals("Use this for comments etc", tbl2.getSd().getParameters().get("test_param_1"));
assertEquals("name", tbl2.getSd().getBucketCols().get(0));
assertTrue("Partition key list is not empty", (tbl2.getPartitionKeys() == null) || (tbl2.getPartitionKeys().size() == 0));
// test get_table_objects_by_name functionality
ArrayList<String> tableNames = new ArrayList<>();
tableNames.add(tblName2);
tableNames.add(tblName);
tableNames.add(tblName2);
List<Table> foundTables = client.getTableObjectsByName(dbName, tableNames);
assertEquals(2, foundTables.size());
for (Table t : foundTables) {
if (t.getTableName().equals(tblName2)) {
assertEquals(t.getSd().getLocation(), tbl2.getSd().getLocation());
} else {
assertEquals(t.getTableName(), tblName);
assertEquals(t.getSd().getLocation(), tbl.getSd().getLocation());
}
assertEquals(t.getSd().getCols().size(), typ1.getFields().size());
assertEquals(t.getSd().isCompressed(), false);
assertEquals(foundTables.get(0).getSd().getNumBuckets(), 1);
assertNotNull(t.getSd().getSerdeInfo());
assertEquals(t.getDbName(), dbName);
}
tableNames.add(1, "table_that_doesnt_exist");
foundTables = client.getTableObjectsByName(dbName, tableNames);
assertEquals(foundTables.size(), 2);
InvalidOperationException ioe = null;
try {
foundTables = client.getTableObjectsByName(dbName, null);
} catch (InvalidOperationException e) {
ioe = e;
}
assertNotNull(ioe);
assertTrue("Table not found", ioe.getMessage().contains("null tables"));
UnknownDBException udbe = null;
try {
foundTables = client.getTableObjectsByName("db_that_doesnt_exist", tableNames);
} catch (UnknownDBException e) {
udbe = e;
}
assertNotNull(udbe);
assertTrue("DB not found", udbe.getMessage().contains("not find database hive.db_that_doesnt_exist"));
udbe = null;
try {
foundTables = client.getTableObjectsByName("", tableNames);
} catch (UnknownDBException e) {
udbe = e;
}
assertNotNull(udbe);
assertTrue("DB not found", udbe.getMessage().contains("is null or empty"));
FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), conf);
client.dropTable(dbName, tblName);
assertFalse(fs.exists(new Path(tbl.getSd().getLocation())));
client.dropTable(dbName, tblName2);
assertTrue(fs.exists(new Path(tbl2.getSd().getLocation())));
client.dropType(typeName);
client.dropDatabase(dbName);
} catch (Exception e) {
System.err.println(StringUtils.stringifyException(e));
System.err.println("testSimpleTable() failed.");
throw e;
}
}
use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.
the class TestHiveMetaStore method partitionTester.
private static void partitionTester(HiveMetaStoreClient client, Configuration conf) throws Exception {
try {
String dbName = "compdb";
String tblName = "comptbl";
String typeName = "Person";
List<String> vals = makeVals("2008-07-01 14:13:12", "14");
List<String> vals2 = makeVals("2008-07-01 14:13:12", "15");
List<String> vals3 = makeVals("2008-07-02 14:13:12", "15");
List<String> vals4 = makeVals("2008-07-03 14:13:12", "151");
client.dropTable(dbName, tblName);
silentDropDatabase(dbName);
new DatabaseBuilder().setName(dbName).create(client, conf);
Database db = client.getDatabase(dbName);
Path dbPath = new Path(db.getLocationUri());
FileSystem fs = FileSystem.get(dbPath.toUri(), conf);
client.dropType(typeName);
Type typ1 = new Type();
typ1.setName(typeName);
typ1.setFields(new ArrayList<>(2));
typ1.getFields().add(new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
typ1.getFields().add(new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
client.createType(typ1);
List<String> skewedColValue = Collections.singletonList("1");
Table tbl = new TableBuilder().setDbName(dbName).setTableName(tblName).setCols(typ1.getFields()).setNumBuckets(1).addBucketCol("name").addTableParam("test_param_1", "Use this for comments etc").addSerdeParam(ColumnType.SERIALIZATION_FORMAT, "1").addSkewedColName("name").setSkewedColValues(Collections.singletonList(skewedColValue)).setSkewedColValueLocationMaps(Collections.singletonMap(skewedColValue, "location1")).addPartCol("ds", ColumnType.STRING_TYPE_NAME).addPartCol("hr", ColumnType.STRING_TYPE_NAME).create(client, conf);
if (isThriftClient) {
// the createTable() above does not update the location in the 'tbl'
// object when the client is a thrift client and the code below relies
// on the location being present in the 'tbl' object - so get the table
// from the metastore
tbl = client.getTable(dbName, tblName);
}
Assert.assertTrue(tbl.isSetId());
tbl.unsetId();
Partition part = makePartitionObject(dbName, tblName, vals, tbl, "/part1");
Partition part2 = makePartitionObject(dbName, tblName, vals2, tbl, "/part2");
Partition part3 = makePartitionObject(dbName, tblName, vals3, tbl, "/part3");
Partition part4 = makePartitionObject(dbName, tblName, vals4, tbl, "/part4");
// check if the partition exists (it shouldn't)
boolean exceptionThrown = false;
try {
Partition p = client.getPartition(dbName, tblName, vals);
} catch (Exception e) {
assertEquals("partition should not have existed", NoSuchObjectException.class, e.getClass());
exceptionThrown = true;
}
assertTrue("getPartition() should have thrown NoSuchObjectException", exceptionThrown);
Partition retp = client.add_partition(part);
assertNotNull("Unable to create partition " + part, retp);
Partition retp2 = client.add_partition(part2);
assertNotNull("Unable to create partition " + part2, retp2);
Partition retp3 = client.add_partition(part3);
assertNotNull("Unable to create partition " + part3, retp3);
Partition retp4 = client.add_partition(part4);
assertNotNull("Unable to create partition " + part4, retp4);
Partition part_get = client.getPartition(dbName, tblName, part.getValues());
// since we are using thrift, 'part' will not have the create time and
// last DDL time set since it does not get updated in the add_partition()
// call - likewise part2 and part3 - set it correctly so that equals check
// doesn't fail
adjust(client, part, dbName, tblName, isThriftClient);
adjust(client, part2, dbName, tblName, isThriftClient);
adjust(client, part3, dbName, tblName, isThriftClient);
assertTrue("Partitions are not same", part.equals(part_get));
// check null cols schemas for a partition
List<String> vals6 = makeVals("2016-02-22 00:00:00", "16");
Partition part6 = makePartitionObject(dbName, tblName, vals6, tbl, "/part5");
part6.getSd().setCols(null);
LOG.info("Creating partition will null field schema");
client.add_partition(part6);
LOG.info("Listing all partitions for table " + dbName + "." + tblName);
final List<Partition> partitions = client.listPartitions(dbName, tblName, (short) -1);
boolean foundPart = false;
for (Partition p : partitions) {
if (p.getValues().equals(vals6)) {
assertNull(p.getSd().getCols());
LOG.info("Found partition " + p + " having null field schema");
foundPart = true;
}
}
assertTrue(foundPart);
String partName = "ds=" + FileUtils.escapePathName("2008-07-01 14:13:12") + "/hr=14";
String part2Name = "ds=" + FileUtils.escapePathName("2008-07-01 14:13:12") + "/hr=15";
String part3Name = "ds=" + FileUtils.escapePathName("2008-07-02 14:13:12") + "/hr=15";
String part4Name = "ds=" + FileUtils.escapePathName("2008-07-03 14:13:12") + "/hr=151";
part_get = client.getPartition(dbName, tblName, partName);
assertTrue("Partitions are not the same", part.equals(part_get));
// Test partition listing with a partial spec - ds is specified but hr is not
List<String> partialVals = new ArrayList<>();
partialVals.add(vals.get(0));
Set<Partition> parts = new HashSet<>();
parts.add(part);
parts.add(part2);
List<Partition> partial = client.listPartitions(dbName, tblName, partialVals, (short) -1);
assertTrue("Should have returned 2 partitions", partial.size() == 2);
assertTrue("Not all parts returned", partial.containsAll(parts));
Set<String> partNames = new HashSet<>();
partNames.add(partName);
partNames.add(part2Name);
List<String> partialNames = client.listPartitionNames(dbName, tblName, partialVals, (short) -1);
assertTrue("Should have returned 2 partition names", partialNames.size() == 2);
assertTrue("Not all part names returned", partialNames.containsAll(partNames));
partNames.add(part3Name);
partNames.add(part4Name);
partialVals.clear();
partialVals.add("");
partialNames = client.listPartitionNames(dbName, tblName, partialVals, (short) -1);
assertTrue("Should have returned 5 partition names", partialNames.size() == 5);
assertTrue("Not all part names returned", partialNames.containsAll(partNames));
// Test partition listing with a partial spec - hr is specified but ds is not
parts.clear();
parts.add(part2);
parts.add(part3);
partialVals.clear();
partialVals.add("");
partialVals.add(vals2.get(1));
partial = client.listPartitions(dbName, tblName, partialVals, (short) -1);
assertEquals("Should have returned 2 partitions", 2, partial.size());
assertTrue("Not all parts returned", partial.containsAll(parts));
partNames.clear();
partNames.add(part2Name);
partNames.add(part3Name);
partialNames = client.listPartitionNames(dbName, tblName, partialVals, (short) -1);
assertEquals("Should have returned 2 partition names", 2, partialNames.size());
assertTrue("Not all part names returned", partialNames.containsAll(partNames));
// Verify escaped partition names don't return partitions
exceptionThrown = false;
try {
String badPartName = "ds=2008-07-01 14%3A13%3A12/hrs=14";
client.getPartition(dbName, tblName, badPartName);
} catch (NoSuchObjectException e) {
exceptionThrown = true;
}
assertTrue("Bad partition spec should have thrown an exception", exceptionThrown);
Path partPath = new Path(part.getSd().getLocation());
assertTrue(fs.exists(partPath));
client.dropPartition(dbName, tblName, part.getValues(), true);
assertFalse(fs.exists(partPath));
// Test append_partition_by_name
client.appendPartition(dbName, tblName, partName);
Partition part5 = client.getPartition(dbName, tblName, part.getValues());
assertTrue("Append partition by name failed", part5.getValues().equals(vals));
Path part5Path = new Path(part5.getSd().getLocation());
assertTrue(fs.exists(part5Path));
// Test drop_partition_by_name
assertTrue("Drop partition by name failed", client.dropPartition(dbName, tblName, partName, true));
assertFalse(fs.exists(part5Path));
// add the partition again so that drop table with a partition can be
// tested
retp = client.add_partition(part);
assertNotNull("Unable to create partition " + part, retp);
// test add_partitions
List<String> mvals1 = makeVals("2008-07-04 14:13:12", "14641");
List<String> mvals2 = makeVals("2008-07-04 14:13:12", "14642");
List<String> mvals3 = makeVals("2008-07-04 14:13:12", "14643");
// equal to 3
List<String> mvals4 = makeVals("2008-07-04 14:13:12", "14643");
List<String> mvals5 = makeVals("2008-07-04 14:13:12", "14645");
Exception savedException;
// add_partitions(empty list) : ok, normal operation
client.add_partitions(new ArrayList<>());
// add_partitions(1,2,3) : ok, normal operation
Partition mpart1 = makePartitionObject(dbName, tblName, mvals1, tbl, "/mpart1");
Partition mpart2 = makePartitionObject(dbName, tblName, mvals2, tbl, "/mpart2");
Partition mpart3 = makePartitionObject(dbName, tblName, mvals3, tbl, "/mpart3");
client.add_partitions(Arrays.asList(mpart1, mpart2, mpart3));
// do DDL time munging if thrift mode
adjust(client, mpart1, dbName, tblName, isThriftClient);
adjust(client, mpart2, dbName, tblName, isThriftClient);
adjust(client, mpart3, dbName, tblName, isThriftClient);
verifyPartitionsPublished(client, dbName, tblName, Arrays.asList(mvals1.get(0)), Arrays.asList(mpart1, mpart2, mpart3));
Partition mpart4 = makePartitionObject(dbName, tblName, mvals4, tbl, "/mpart4");
Partition mpart5 = makePartitionObject(dbName, tblName, mvals5, tbl, "/mpart5");
// create dir for /mpart5
Path mp5Path = new Path(mpart5.getSd().getLocation());
warehouse.mkdirs(mp5Path);
assertTrue(fs.exists(mp5Path));
// add_partitions(5,4) : err = duplicate keyvals on mpart4
savedException = null;
try {
client.add_partitions(Arrays.asList(mpart5, mpart4));
} catch (Exception e) {
savedException = e;
} finally {
assertNotNull(savedException);
}
// check that /mpart4 does not exist, but /mpart5 still does.
assertTrue(fs.exists(mp5Path));
assertFalse(fs.exists(new Path(mpart4.getSd().getLocation())));
// add_partitions(5) : ok
client.add_partitions(Arrays.asList(mpart5));
// do DDL time munging if thrift mode
adjust(client, mpart5, dbName, tblName, isThriftClient);
verifyPartitionsPublished(client, dbName, tblName, Arrays.asList(mvals1.get(0)), Arrays.asList(mpart1, mpart2, mpart3, mpart5));
// // end add_partitions tests
client.dropTable(dbName, tblName);
client.dropType(typeName);
// recreate table as external, drop partition and it should
// still exist
tbl.setParameters(new HashMap<>());
tbl.getParameters().put("EXTERNAL", "TRUE");
client.createTable(tbl);
retp = client.add_partition(part);
assertTrue(fs.exists(partPath));
client.dropPartition(dbName, tblName, part.getValues(), true);
assertTrue(fs.exists(partPath));
for (String tableName : client.getTables(dbName, "*")) {
client.dropTable(dbName, tableName);
}
client.dropDatabase(dbName);
} catch (Exception e) {
System.err.println(StringUtils.stringifyException(e));
System.err.println("testPartition() failed.");
throw e;
}
}
use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.
the class TestHiveMetaStore method testPartitionFilterLike.
/**
* Test "like" filtering on table with single partition.
*/
@Test
public void testPartitionFilterLike() throws Exception {
String dbName = "filterdb";
String tblName = "filtertbl";
List<String> vals = new ArrayList<>(1);
vals.add("abc");
List<String> vals2 = new ArrayList<>(1);
vals2.add("d_\\\\%ae");
List<String> vals3 = new ArrayList<>(1);
vals3.add("af%");
silentDropDatabase(dbName);
new DatabaseBuilder().setName(dbName).create(client, conf);
Table tbl = new TableBuilder().setDbName(dbName).setTableName(tblName).addCol("c1", ColumnType.STRING_TYPE_NAME).addCol("c2", ColumnType.INT_TYPE_NAME).addPartCol("p1", ColumnType.STRING_TYPE_NAME).create(client, conf);
tbl = client.getTable(dbName, tblName);
add_partition(client, tbl, vals, "part1");
add_partition(client, tbl, vals2, "part2");
add_partition(client, tbl, vals3, "part3");
checkFilter(client, dbName, tblName, "p1 like \"a%\"", 2);
checkFilter(client, dbName, tblName, "p1 like \"%a%\"", 3);
checkFilter(client, dbName, tblName, "p1 like \"%a\"", 0);
checkFilter(client, dbName, tblName, "p1 like \"a%c\"", 1);
checkFilter(client, dbName, tblName, "p1 like \"%a%c%\"", 1);
checkFilter(client, dbName, tblName, "p1 like \"%_b%\"", 1);
checkFilter(client, dbName, tblName, "p1 like \"%b_\"", 1);
checkFilter(client, dbName, tblName, "p1 like \"%c\"", 1);
checkFilter(client, dbName, tblName, "p1 like \"%c%\"", 1);
checkFilter(client, dbName, tblName, "p1 like \"c%\"", 0);
checkFilter(client, dbName, tblName, "p1 like \"%\\_%\"", 1);
checkFilter(client, dbName, tblName, "p1 like \"%\\%%\"", 2);
checkFilter(client, dbName, tblName, "p1 like \"abc\"", 1);
checkFilter(client, dbName, tblName, "p1 like \"%\"", 3);
checkFilter(client, dbName, tblName, "p1 like \"_\"", 0);
checkFilter(client, dbName, tblName, "p1 like \"___\"", 2);
checkFilter(client, dbName, tblName, "p1 like \"%%%\"", 3);
checkFilter(client, dbName, tblName, "p1 like \"%\\\\\\\\%\"", 1);
client.dropTable(dbName, tblName);
client.dropDatabase(dbName);
}
use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.
the class TestGetPartitionsUsingProjectionAndFilterSpecs method createTestTables.
private void createTestTables() throws TException {
if (client.tableExists(dbName, tblName)) {
LOG.info("Table is already existing. Dropping it and then recreating");
client.dropTable(dbName, tblName);
}
new TableBuilder().setTableName(tblName).setDbName(dbName).setCols(Arrays.asList(new FieldSchema("col1", "string", "c1 comment"), new FieldSchema("col2", "int", "c2 comment"))).setPartCols(Arrays.asList(new FieldSchema("state", "string", "state comment"), new FieldSchema("city", "string", "city comment"))).setTableParams(new HashMap<String, String>(2) {
{
put("tableparam1", "tableval1");
put("tableparam2", "tableval2");
}
}).setBucketCols(Collections.singletonList("col1")).addSortCol("col2", 1).addSerdeParam(SERIALIZATION_FORMAT, "1").setSerdeName(tblName).setSerdeLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe").setInputFormat("org.apache.hadoop.hive.ql.io.HiveInputFormat").setOutputFormat("org.apache.hadoop.hive.ql.io.HiveOutputFormat").create(client, conf);
Table table = client.getTable(dbName, tblName);
Assert.assertTrue("Table " + dbName + "." + tblName + " does not exist", client.tableExists(dbName, tblName));
List<Partition> partitions = new ArrayList<>();
partitions.add(createPartition(Arrays.asList("CA", "SanFrancisco"), table));
partitions.add(createPartition(Arrays.asList("CA", "PaloAlto"), table));
partitions.add(createPartition(Arrays.asList("WA", "Seattle"), table));
partitions.add(createPartition(Arrays.asList("AZ", "Phoenix"), table));
client.add_partitions(partitions);
}
Aggregations