use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.
the class TestTablesCreateDropAlterTruncate method setUp.
@Before
public void setUp() throws Exception {
// Get new client
client = metaStore.getClient();
// Clean up the database
client.dropDatabase(OTHER_DATABASE, true, true, true);
// Drop every table in the default database
for (String tableName : client.getAllTables(DEFAULT_DATABASE)) {
client.dropTable(DEFAULT_DATABASE, tableName, true, true, true);
}
// Clean up trash
metaStore.cleanWarehouseDirs();
testTables[0] = new TableBuilder().setDbName(DEFAULT_DATABASE).setTableName("test_table").addCol("test_col", "int").build();
testTables[1] = new TableBuilder().setDbName(DEFAULT_DATABASE).setTableName("test_view").addCol("test_col", "int").setType("VIRTUAL_VIEW").build();
testTables[2] = new TableBuilder().setDbName(DEFAULT_DATABASE).setTableName("test_table_to_find_1").addCol("test_col", "int").build();
testTables[3] = new TableBuilder().setDbName(DEFAULT_DATABASE).setTableName("test_partitioned_table").addCol("test_col1", "int").addCol("test_col2", "int").addPartCol("test_part_col", "int").build();
testTables[4] = new TableBuilder().setDbName(DEFAULT_DATABASE).setTableName("external_table_for_test").addCol("test_col", "int").setLocation(metaStore.getWarehouseRoot() + "/external/table_dir").addTableParam("EXTERNAL", "TRUE").setType("EXTERNAL_TABLE").build();
client.createDatabase(new DatabaseBuilder().setName(OTHER_DATABASE).build());
testTables[5] = new TableBuilder().setDbName(OTHER_DATABASE).setTableName("test_table").addCol("test_col", "int").build();
// Create the tables in the MetaStore
for (int i = 0; i < testTables.length; i++) {
client.createTable(testTables[i]);
}
// Create partitions for the partitioned table
for (int i = 0; i < 3; i++) {
Partition partition = new PartitionBuilder().fromTable(testTables[3]).addValue("a" + i).build();
client.add_partition(partition);
}
// Add data files to the partitioned table
List<Partition> partitions = client.listPartitions(testTables[3].getDbName(), testTables[3].getTableName(), (short) -1);
for (Partition partition : partitions) {
Path dataFile = new Path(partition.getSd().getLocation().toString() + "/dataFile");
metaStore.createFile(dataFile, "100");
}
// Reload tables from the MetaStore, and create data files
for (int i = 0; i < testTables.length; i++) {
testTables[i] = client.getTable(testTables[i].getDbName(), testTables[i].getTableName());
if (testTables[i].getPartitionKeys().isEmpty()) {
if (testTables[i].getSd().getLocation() != null) {
Path dataFile = new Path(testTables[i].getSd().getLocation().toString() + "/dataFile");
metaStore.createFile(dataFile, "100");
}
}
}
partitionedTable = testTables[3];
externalTable = testTables[4];
}
use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.
the class TestDropPartitions method createPartition.
private Partition createPartition(List<String> values, List<FieldSchema> partCols) throws Exception {
Partition partition = new PartitionBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).setValues(values).setCols(partCols).build();
client.add_partition(partition);
partition = client.getPartition(DB_NAME, TABLE_NAME, values);
return partition;
}
use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.
the class SmokeTest method runTest.
private void runTest(IMetaStoreClient client) throws TException {
LOG.info("Starting smoke test");
File dbDir = new File(System.getProperty("java.io.tmpdir"), "internal_smoke_test");
if (!dbDir.mkdir()) {
throw new RuntimeException("Unable to create direcotory " + dbDir.getAbsolutePath());
}
dbDir.deleteOnExit();
LOG.info("Going to create database " + dbName);
Database db = new DatabaseBuilder().setName(dbName).setLocation(dbDir.getAbsolutePath()).build();
client.createDatabase(db);
LOG.info("Going to create table " + tableName);
Table table = new TableBuilder().setDbName(db).setTableName(tableName).addCol("col1", ColumnType.INT_TYPE_NAME).addCol("col2", ColumnType.TIMESTAMP_TYPE_NAME).addPartCol("pcol1", ColumnType.STRING_TYPE_NAME).build();
client.createTable(table);
LOG.info("Going to create partition with value " + partValue);
Partition part = new PartitionBuilder().fromTable(table).addValue("val1").build();
client.add_partition(part);
LOG.info("Going to list the partitions");
List<Partition> parts = client.listPartitions(dbName, tableName, (short) -1);
LOG.info("Fetched: { " + parts.toString() + "}");
LOG.info("Going to drop database");
client.dropDatabase(dbName, true, false, true);
LOG.info("Completed smoke test");
}
use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.
the class TestFilterHooks method setUp.
@BeforeClass
public static void setUp() throws Exception {
DummyMetaStoreFilterHookImpl.blockResults = false;
conf = MetastoreConf.newMetastoreConf();
MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3);
MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
MetastoreConf.setClass(conf, ConfVars.FILTER_HOOK, DummyMetaStoreFilterHookImpl.class, MetaStoreFilterHook.class);
MetaStoreTestUtils.setConfForStandloneMode(conf);
MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf);
msc = new HiveMetaStoreClient(conf);
msc.dropDatabase(DBNAME1, true, true, true);
msc.dropDatabase(DBNAME2, true, true, true);
Database db1 = new DatabaseBuilder().setName(DBNAME1).build();
msc.createDatabase(db1);
Database db2 = new DatabaseBuilder().setName(DBNAME2).build();
msc.createDatabase(db2);
Table tab1 = new TableBuilder().setDbName(DBNAME1).setTableName(TAB1).addCol("id", "int").addCol("name", "string").build();
msc.createTable(tab1);
Table tab2 = new TableBuilder().setDbName(DBNAME1).setTableName(TAB2).addCol("id", "int").addPartCol("name", "string").build();
msc.createTable(tab2);
Partition part1 = new PartitionBuilder().fromTable(tab2).addValue("value1").build();
msc.add_partition(part1);
Partition part2 = new PartitionBuilder().fromTable(tab2).addValue("value2").build();
msc.add_partition(part2);
}
use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.
the class TestHiveMetaStoreWithEnvironmentContext method setUp.
@Before
public void setUp() throws Exception {
System.setProperty("hive.metastore.event.listeners", DummyListener.class.getName());
conf = MetastoreConf.newMetastoreConf();
MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3);
MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
MetaStoreTestUtils.setConfForStandloneMode(conf);
MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf);
msc = new HiveMetaStoreClient(conf);
msc.dropDatabase(dbName, true, true);
Map<String, String> envProperties = new HashMap<>();
envProperties.put("hadoop.job.ugi", "test_user");
envContext = new EnvironmentContext(envProperties);
db.setName(dbName);
table = new TableBuilder().setDbName(dbName).setTableName(tblName).addTableParam("a", "string").addPartCol("b", "string").addCol("a", "string").addCol("b", "string").build();
partition = new PartitionBuilder().fromTable(table).addValue("2011").build();
DummyListener.notifyList.clear();
}
Aggregations