use of org.apache.hadoop.hive.metastore.Warehouse in project hive by apache.
the class SemanticAnalyzer method handleLineage.
private void handleLineage(LoadTableDesc ltd, Operator output) throws SemanticException {
if (ltd != null) {
queryState.getLineageState().mapDirToOp(ltd.getSourcePath(), output);
}
if (queryState.getCommandType().equals(HiveOperation.CREATETABLE_AS_SELECT.getOperationName())) {
Path tlocation = null;
String tName = Utilities.getDbTableName(tableDesc.getDbTableName())[1];
try {
Warehouse wh = new Warehouse(conf);
tlocation = wh.getDefaultTablePath(db.getDatabase(tableDesc.getDatabaseName()), tName, tableDesc.isExternal());
} catch (MetaException | HiveException e) {
throw new SemanticException(e);
}
queryState.getLineageState().mapDirToOp(tlocation, output);
} else if (queryState.getCommandType().equals(HiveOperation.CREATE_MATERIALIZED_VIEW.getOperationName())) {
Path tlocation;
String[] dbTable = Utilities.getDbTableName(createVwDesc.getViewName());
try {
Warehouse wh = new Warehouse(conf);
Map<String, String> tblProps = createVwDesc.getTblProps();
tlocation = wh.getDefaultTablePath(db.getDatabase(dbTable[0]), dbTable[1], tblProps == null || !AcidUtils.isTablePropertyTransactional(tblProps));
} catch (MetaException | HiveException e) {
throw new SemanticException(e);
}
queryState.getLineageState().mapDirToOp(tlocation, output);
}
}
use of org.apache.hadoop.hive.metastore.Warehouse in project hive by apache.
the class LoadTable method tableLocation.
static TableLocationTuple tableLocation(ImportTableDesc tblDesc, Database parentDb, TableContext tableContext, Context context) throws MetaException, SemanticException {
Warehouse wh = context.warehouse;
Path defaultTablePath;
if (parentDb == null) {
defaultTablePath = wh.getDefaultTablePath(tblDesc.getDatabaseName(), tblDesc.getTableName(), tblDesc.isExternal());
} else {
defaultTablePath = wh.getDefaultTablePath(parentDb, tblDesc.getTableName(), tblDesc.isExternal());
}
// dont use TableType.EXTERNAL_TABLE.equals(tblDesc.tableType()) since this comes in as managed always for tables.
if (tblDesc.isExternal()) {
if (tblDesc.getLocation() == null) {
// related rules to be applied to replicated tables across different versions of hive.
return new TableLocationTuple(wh.getDnsPath(defaultTablePath).toString(), true);
}
String currentLocation = new Path(tblDesc.getLocation()).toUri().getPath();
String newLocation = ReplExternalTables.externalTableLocation(context.hiveConf, currentLocation);
LOG.debug("external table {} data location is: {}", tblDesc.getTableName(), newLocation);
return new TableLocationTuple(newLocation, false);
}
Path path = tableContext.waitOnPrecursor() ? wh.getDnsPath(defaultTablePath) : wh.getDefaultTablePath(parentDb, tblDesc.getTableName(), tblDesc.isExternal());
return new TableLocationTuple(path.toString(), false);
}
use of org.apache.hadoop.hive.metastore.Warehouse in project hive by apache.
the class TestSessionHiveMetastoreClientExchangePartitionsTempTable method setUp.
@Before
@Override
public void setUp() throws Exception {
initHiveConf();
wh = new Warehouse(conf);
SessionState.start(conf);
setClient(Hive.get(conf).getMSC());
getClient().dropDatabase(DB_NAME, true, true, true);
getMetaStore().cleanWarehouseDirs();
createTestTables();
}
use of org.apache.hadoop.hive.metastore.Warehouse in project hive by apache.
the class TestCachedStore method setUp.
@Before
public void setUp() throws Exception {
Deadline.registerIfNot(10000000);
Deadline.startTimer("");
Configuration conf = MetastoreConf.newMetastoreConf();
MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true);
MetaStoreTestUtils.setConfForStandloneMode(conf);
ObjectStore objectStore = new ObjectStore();
objectStore.setConf(conf);
// Create the 'hive' catalog
HMSHandler.createDefaultCatalog(objectStore, new Warehouse(conf));
// Create 2 database objects
db1 = createDatabaseObject("cs_db1", "user1");
objectStore.createDatabase(db1);
db2 = createDatabaseObject("cs_db2", "user1");
objectStore.createDatabase(db2);
// For each database object, create one partitioned and one unpartitioned table
db1Utbl1 = createUnpartitionedTableObject(db1);
objectStore.createTable(db1Utbl1);
db1Ptbl1 = createPartitionedTableObject(db1);
objectStore.createTable(db1Ptbl1);
db2Utbl1 = createUnpartitionedTableObject(db2);
objectStore.createTable(db2Utbl1);
db2Ptbl1 = createPartitionedTableObject(db2);
objectStore.createTable(db2Ptbl1);
// Create partitions for cs_db1's partitioned table
db1Ptbl1Ptns = createPartitionObjects(db1Ptbl1).getPartitions();
db1Ptbl1PtnNames = createPartitionObjects(db1Ptbl1).getPartitionNames();
objectStore.addPartitions(db1Ptbl1.getCatName(), db1Ptbl1.getDbName(), db1Ptbl1.getTableName(), db1Ptbl1Ptns);
// Create partitions for cs_db2's partitioned table
db2Ptbl1Ptns = createPartitionObjects(db2Ptbl1).getPartitions();
db2Ptbl1PtnNames = createPartitionObjects(db2Ptbl1).getPartitionNames();
objectStore.addPartitions(db2Ptbl1.getCatName(), db2Ptbl1.getDbName(), db2Ptbl1.getTableName(), db2Ptbl1Ptns);
objectStore.shutdown();
}
use of org.apache.hadoop.hive.metastore.Warehouse in project hive by apache.
the class TestCatalogCaching method createObjectStore.
@Before
public void createObjectStore() throws MetaException, InvalidOperationException {
conf = MetastoreConf.newMetastoreConf();
MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true);
MetaStoreTestUtils.setConfForStandloneMode(conf);
objectStore = new ObjectStore();
objectStore.setConf(conf);
// Create three catalogs
HMSHandler.createDefaultCatalog(objectStore, new Warehouse(conf));
Catalog cat1 = new CatalogBuilder().setName(CAT1_NAME).setLocation("/tmp/cat1").build();
objectStore.createCatalog(cat1);
Catalog cat2 = new CatalogBuilder().setName(CAT2_NAME).setLocation("/tmp/cat2").build();
objectStore.createCatalog(cat2);
}
Aggregations