use of org.apache.hadoop.hive.metastore.api.GetTableRequest in project hive by apache.
the class Hive method getTable.
/**
* Returns metadata of the table.
*
* @param dbName
* the name of the database
* @param tableName
* the name of the table
* @param metaTableName
* the name of the metadata table
* @param throwException
* controls whether an exception is thrown or a returns a null
* @param checkTransactional
* checks whether the metadata table stats are valid (or
* compilant with the snapshot isolation of) for the current transaction.
* @param getColumnStats
* get column statistics if available
* @return the table or if throwException is false a null value.
* @throws HiveException
*/
public Table getTable(final String dbName, final String tableName, String metaTableName, boolean throwException, boolean checkTransactional, boolean getColumnStats) throws HiveException {
if (tableName == null || tableName.equals("")) {
throw new HiveException("empty table creation??");
}
// Get the table from metastore
org.apache.hadoop.hive.metastore.api.Table tTable = null;
try {
// Note: this is currently called w/true from StatsOptimizer only.
GetTableRequest request = new GetTableRequest(dbName, tableName);
request.setCatName(getDefaultCatalog(conf));
request.setGetColumnStats(getColumnStats);
request.setEngine(Constants.HIVE_ENGINE);
if (checkTransactional) {
ValidWriteIdList validWriteIdList = null;
long txnId = SessionState.get() != null && SessionState.get().getTxnMgr() != null ? SessionState.get().getTxnMgr().getCurrentTxnId() : 0;
if (txnId > 0) {
validWriteIdList = AcidUtils.getTableValidWriteIdListWithTxnList(conf, dbName, tableName);
}
request.setValidWriteIdList(validWriteIdList != null ? validWriteIdList.toString() : null);
}
tTable = getMSC().getTable(request);
} catch (NoSuchObjectException e) {
if (throwException) {
throw new InvalidTableException(tableName);
}
return null;
} catch (Exception e) {
throw new HiveException("Unable to fetch table " + tableName + ". " + e.getMessage(), e);
}
// For non-views, we need to do some extra fixes
if (!TableType.VIRTUAL_VIEW.toString().equals(tTable.getTableType())) {
// Fix the non-printable chars
Map<String, String> parameters = tTable.getSd().getParameters();
String sf = parameters != null ? parameters.get(SERIALIZATION_FORMAT) : null;
if (sf != null) {
char[] b = sf.toCharArray();
if ((b.length == 1) && (b[0] < 10)) {
// ^A, ^B, ^C, ^D, \t
parameters.put(SERIALIZATION_FORMAT, Integer.toString(b[0]));
}
}
// earlier version of Hive.
if (org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe.class.getName().equals(tTable.getSd().getSerdeInfo().getSerializationLib()) && tTable.getSd().getColsSize() > 0 && tTable.getSd().getCols().get(0).getType().indexOf('<') == -1) {
tTable.getSd().getSerdeInfo().setSerializationLib(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
}
}
Table t = new Table(tTable);
if (metaTableName != null) {
if (t.getStorageHandler() == null || !t.getStorageHandler().isMetadataTableSupported()) {
throw new SemanticException(ErrorMsg.METADATA_TABLE_NOT_SUPPORTED, t.getTableName());
}
if (!t.getStorageHandler().isValidMetadataTable(metaTableName)) {
throw new SemanticException(ErrorMsg.INVALID_METADATA_TABLE_NAME, metaTableName);
}
}
t.setMetaTable(metaTableName);
return t;
}
use of org.apache.hadoop.hive.metastore.api.GetTableRequest in project hive by apache.
the class SessionHiveMetaStoreClient method getTable.
// Need to override this one too or dropTable breaks because it doesn't find the table when checks
// before the drop.
@Override
public org.apache.hadoop.hive.metastore.api.Table getTable(String catName, String dbName, String tableName, boolean getColStats, String engine) throws TException {
GetTableRequest getTableRequest = new GetTableRequest(dbName, tableName);
getTableRequest.setGetColumnStats(getColStats);
getTableRequest.setEngine(engine);
if (!DEFAULT_CATALOG_NAME.equals(catName)) {
getTableRequest.setCatName(catName);
return super.getTable(getTableRequest);
} else {
return getTable(getTableRequest);
}
}
use of org.apache.hadoop.hive.metastore.api.GetTableRequest in project hive by apache.
the class TestHiveMetaStoreClient method getTable.
public Table getTable(String dbName, String tableName, boolean getColumnStats, String engine) throws MetaException, TException, NoSuchObjectException {
GetTableRequest getTableRequest = new GetTableRequest(dbName, tableName);
getTableRequest.setGetColumnStats(getColumnStats);
getTableRequest.setEngine(engine);
return getTable(getTableRequest);
}
use of org.apache.hadoop.hive.metastore.api.GetTableRequest in project hive by apache.
the class TestCleaner method NoCleanupAfterMajorCompaction.
@Test
public void NoCleanupAfterMajorCompaction() throws Exception {
Map<String, String> parameters = new HashMap<>();
// With no cleanup true
parameters.put("no_cleanup", "true");
Table t = newTable("default", "dcamc", false, parameters);
addBaseFile(t, null, 20L, 20);
addDeltaFile(t, null, 21L, 22L, 2);
addDeltaFile(t, null, 23L, 24L, 2);
addBaseFile(t, null, 25L, 25);
burnThroughTransactions("default", "dcamc", 25);
CompactionRequest rqst = new CompactionRequest("default", "dcamc", CompactionType.MAJOR);
compactInTxn(rqst);
startCleaner();
// Check there are no compactions requests left.
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals(1, rsp.getCompactsSize());
Assert.assertEquals(TxnStore.SUCCEEDED_RESPONSE, rsp.getCompacts().get(0).getState());
// Check that the files are not removed
List<Path> paths = getDirectories(conf, t, null);
Assert.assertEquals(4, paths.size());
// With no clean up false
t = ms.getTable(new GetTableRequest("default", "dcamc"));
t.getParameters().put("no_cleanup", "false");
ms.alter_table("default", "dcamc", t);
rqst = new CompactionRequest("default", "dcamc", CompactionType.MAJOR);
compactInTxn(rqst);
startCleaner();
// Check there are no compactions requests left.
rsp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals(2, rsp.getCompactsSize());
Assert.assertEquals(TxnStore.SUCCEEDED_RESPONSE, rsp.getCompacts().get(0).getState());
// Check that the files are not removed
paths = getDirectories(conf, t, null);
Assert.assertEquals(1, paths.size());
Assert.assertEquals("base_25", paths.get(0).getName());
}
Aggregations