use of org.apache.hive.hcatalog.data.schema.HCatFieldSchema in project hive by apache.
the class HCatTable method toHiveTable.
Table toHiveTable() throws HCatException {
Table newTable = new Table();
newTable.setDbName(dbName);
newTable.setTableName(tableName);
if (tblProps != null) {
newTable.setParameters(tblProps);
}
if (isExternal) {
newTable.putToParameters("EXTERNAL", "TRUE");
newTable.setTableType(TableType.EXTERNAL_TABLE.toString());
} else {
newTable.setTableType(TableType.MANAGED_TABLE.toString());
}
if (StringUtils.isNotBlank(this.comment)) {
newTable.putToParameters("comment", comment);
}
newTable.setSd(sd);
if (partCols != null) {
ArrayList<FieldSchema> hivePtnCols = new ArrayList<FieldSchema>();
for (HCatFieldSchema fs : partCols) {
hivePtnCols.add(HCatSchemaUtils.getFieldSchema(fs));
}
newTable.setPartitionKeys(hivePtnCols);
}
newTable.setCreateTime((int) (System.currentTimeMillis() / 1000));
newTable.setLastAccessTimeIsSet(false);
try {
// TODO: Verify that this works for systems using UGI.doAs() (e.g. Oozie).
newTable.setOwner(owner == null ? getConf().getUser() : owner);
} catch (Exception exception) {
throw new HCatException("Unable to determine owner of table (" + dbName + "." + tableName + ") from HiveConf.");
}
return newTable;
}
use of org.apache.hive.hcatalog.data.schema.HCatFieldSchema in project hive by apache.
the class TestHCatPartitionPublish method testPartitionPublish.
@Test
public void testPartitionPublish() throws Exception {
String dbName = "default";
String tableName = "testHCatPartitionedTable";
createTable(null, tableName);
Map<String, String> partitionMap = new HashMap<String, String>();
partitionMap.put("part1", "p1value1");
partitionMap.put("part0", "p0value1");
ArrayList<HCatFieldSchema> hcatTableColumns = new ArrayList<HCatFieldSchema>();
for (FieldSchema fs : getTableColumns()) {
hcatTableColumns.add(HCatSchemaUtils.getHCatFieldSchema(fs));
}
runMRCreateFail(dbName, tableName, partitionMap, hcatTableColumns);
List<String> ptns = msc.listPartitionNames(dbName, tableName, (short) 10);
Assert.assertEquals(0, ptns.size());
Table table = msc.getTable(dbName, tableName);
Assert.assertTrue(table != null);
Path path = new Path(table.getSd().getLocation() + "/part1=p1value1/part0=p0value1");
Assert.assertFalse(path.getFileSystem(conf).exists(path));
}
use of org.apache.hive.hcatalog.data.schema.HCatFieldSchema in project hive by apache.
the class TestHCatClient method testGetPartitionsWithPartialSpec.
@Test
public void testGetPartitionsWithPartialSpec() throws Exception {
try {
HCatClient client = HCatClient.create(new Configuration(hcatConf));
final String dbName = "myDb";
final String tableName = "myTable";
client.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);
client.createDatabase(HCatCreateDBDesc.create(dbName).build());
List<HCatFieldSchema> columnSchema = Arrays.asList(new HCatFieldSchema("foo", Type.INT, ""), new HCatFieldSchema("bar", Type.STRING, ""));
List<HCatFieldSchema> partitionSchema = Arrays.asList(new HCatFieldSchema("dt", Type.STRING, ""), new HCatFieldSchema("grid", Type.STRING, ""));
HCatTable table = new HCatTable(dbName, tableName).cols(columnSchema).partCols(partitionSchema);
client.createTable(HCatCreateTableDesc.create(table, false).build());
// Verify that the table was created successfully.
table = client.getTable(dbName, tableName);
assertNotNull("The created just now can't be null.", table);
Map<String, String> partitionSpec = new HashMap<String, String>();
partitionSpec.put("grid", "AB");
partitionSpec.put("dt", "2011_12_31");
client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, makePartLocation(table, partitionSpec))).build());
partitionSpec.put("grid", "AB");
partitionSpec.put("dt", "2012_01_01");
client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, makePartLocation(table, partitionSpec))).build());
partitionSpec.put("dt", "2012_01_01");
partitionSpec.put("grid", "OB");
client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, makePartLocation(table, partitionSpec))).build());
partitionSpec.put("dt", "2012_01_01");
partitionSpec.put("grid", "XB");
client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, makePartLocation(table, partitionSpec))).build());
Map<String, String> partialPartitionSpec = new HashMap<String, String>();
partialPartitionSpec.put("dt", "2012_01_01");
List<HCatPartition> partitions = client.getPartitions(dbName, tableName, partialPartitionSpec);
assertEquals("Unexpected number of partitions.", 3, partitions.size());
assertArrayEquals("Mismatched partition.", new String[] { "2012_01_01", "AB" }, partitions.get(0).getValues().toArray());
assertArrayEquals("Mismatched partition.", new String[] { "2012_01_01", "OB" }, partitions.get(1).getValues().toArray());
assertArrayEquals("Mismatched partition.", new String[] { "2012_01_01", "XB" }, partitions.get(2).getValues().toArray());
client.dropDatabase(dbName, false, HCatClient.DropDBMode.CASCADE);
} catch (Exception unexpected) {
LOG.error("Unexpected exception!", unexpected);
assertTrue("Unexpected exception! " + unexpected.getMessage(), false);
}
}
use of org.apache.hive.hcatalog.data.schema.HCatFieldSchema in project hive by apache.
the class TestHCatClient method testPartitionsHCatClientImpl.
@Test
public void testPartitionsHCatClientImpl() throws Exception {
HCatClient client = HCatClient.create(new Configuration(hcatConf));
String dbName = "ptnDB";
String tableName = "pageView";
client.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);
HCatCreateDBDesc dbDesc = HCatCreateDBDesc.create(dbName).ifNotExists(true).build();
client.createDatabase(dbDesc);
ArrayList<HCatFieldSchema> cols = new ArrayList<HCatFieldSchema>();
cols.add(new HCatFieldSchema("userid", Type.INT, "id columns"));
cols.add(new HCatFieldSchema("viewtime", Type.BIGINT, "view time columns"));
cols.add(new HCatFieldSchema("pageurl", Type.STRING, ""));
cols.add(new HCatFieldSchema("ip", Type.STRING, "IP Address of the User"));
ArrayList<HCatFieldSchema> ptnCols = new ArrayList<HCatFieldSchema>();
ptnCols.add(new HCatFieldSchema("dt", Type.STRING, "date column"));
ptnCols.add(new HCatFieldSchema("country", Type.STRING, "country column"));
HCatTable table = new HCatTable(dbName, tableName).cols(cols).partCols(ptnCols).fileFormat("sequenceFile");
HCatCreateTableDesc tableDesc = HCatCreateTableDesc.create(table, false).build();
client.createTable(tableDesc);
// Verify that the table is created successfully.
table = client.getTable(dbName, tableName);
Map<String, String> firstPtn = new HashMap<String, String>();
firstPtn.put("dt", "04/30/2012");
firstPtn.put("country", "usa");
// Test new HCatAddPartitionsDesc API.
HCatAddPartitionDesc addPtn = HCatAddPartitionDesc.create(new HCatPartition(table, firstPtn, null)).build();
client.addPartition(addPtn);
Map<String, String> secondPtn = new HashMap<String, String>();
secondPtn.put("dt", "04/12/2012");
secondPtn.put("country", "brazil");
// Test deprecated HCatAddPartitionsDesc API.
HCatAddPartitionDesc addPtn2 = HCatAddPartitionDesc.create(dbName, tableName, null, secondPtn).build();
client.addPartition(addPtn2);
Map<String, String> thirdPtn = new HashMap<String, String>();
thirdPtn.put("dt", "04/13/2012");
thirdPtn.put("country", "argentina");
// Test deprecated HCatAddPartitionsDesc API.
HCatAddPartitionDesc addPtn3 = HCatAddPartitionDesc.create(dbName, tableName, null, thirdPtn).build();
client.addPartition(addPtn3);
List<HCatPartition> ptnList = client.listPartitionsByFilter(dbName, tableName, null);
assertTrue(ptnList.size() == 3);
HCatPartition ptn = client.getPartition(dbName, tableName, firstPtn);
assertTrue(ptn != null);
client.dropPartitions(dbName, tableName, firstPtn, true);
ptnList = client.listPartitionsByFilter(dbName, tableName, null);
assertTrue(ptnList.size() == 2);
List<HCatPartition> ptnListTwo = client.listPartitionsByFilter(dbName, tableName, "country = \"argentina\"");
assertTrue(ptnListTwo.size() == 1);
client.markPartitionForEvent(dbName, tableName, thirdPtn, PartitionEventType.LOAD_DONE);
boolean isMarked = client.isPartitionMarkedForEvent(dbName, tableName, thirdPtn, PartitionEventType.LOAD_DONE);
assertTrue(isMarked);
client.close();
}
use of org.apache.hive.hcatalog.data.schema.HCatFieldSchema in project hive by apache.
the class TestHCatClient method testOtherFailure.
@Test
public void testOtherFailure() throws Exception {
HCatClient client = HCatClient.create(new Configuration(hcatConf));
String tableName = "Temptable";
boolean isExceptionCaught = false;
client.dropTable(null, tableName, true);
ArrayList<HCatFieldSchema> cols = new ArrayList<HCatFieldSchema>();
cols.add(new HCatFieldSchema("id", Type.INT, "id columns"));
cols.add(new HCatFieldSchema("value", Type.STRING, "id columns"));
try {
HCatCreateTableDesc tableDesc = HCatCreateTableDesc.create(null, tableName, cols).fileFormat("rcfile").build();
client.createTable(tableDesc);
// The DB foo is non-existent.
client.getTable("foo", tableName);
} catch (Exception exp) {
isExceptionCaught = true;
assertTrue(exp instanceof HCatException);
String newName = "goodTable";
client.dropTable(null, newName, true);
HCatCreateTableDesc tableDesc2 = HCatCreateTableDesc.create(null, newName, cols).fileFormat("rcfile").build();
client.createTable(tableDesc2);
HCatTable newTable = client.getTable(null, newName);
assertTrue(newTable != null);
assertTrue(newTable.getTableName().equalsIgnoreCase(newName));
} finally {
client.close();
assertTrue("The expected exception was never thrown.", isExceptionCaught);
}
}
Aggregations