use of org.apache.hadoop.hive.ql.io.AcidUtils.TableSnapshot in project hive by apache.
the class Hive method alterPartitions.
/**
* Updates the existing table metadata with the new metadata.
*
* @param tblName
* name of the existing table
* @param newParts
* new partitions
* @param transactional
* Need to generate and save a table snapshot into the metastore?
* @throws InvalidOperationException
* if the changes in metadata is not acceptable
* @throws HiveException
*/
public void alterPartitions(String tblName, List<Partition> newParts, EnvironmentContext environmentContext, boolean transactional) throws InvalidOperationException, HiveException {
String[] names = Utilities.getDbTableName(tblName);
List<org.apache.hadoop.hive.metastore.api.Partition> newTParts = new ArrayList<org.apache.hadoop.hive.metastore.api.Partition>();
try {
AcidUtils.TableSnapshot tableSnapshot = null;
if (transactional) {
tableSnapshot = AcidUtils.getTableSnapshot(conf, newParts.get(0).getTable(), true);
}
// Remove the DDL time so that it gets refreshed
for (Partition tmpPart : newParts) {
if (tmpPart.getParameters() != null) {
tmpPart.getParameters().remove(hive_metastoreConstants.DDL_TIME);
}
String location = tmpPart.getLocation();
if (location != null) {
location = Utilities.getQualifiedPath(conf, new Path(location));
tmpPart.setLocation(location);
}
newTParts.add(tmpPart.getTPartition());
}
getMSC().alter_partitions(names[0], names[1], newTParts, environmentContext, tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null, tableSnapshot != null ? tableSnapshot.getWriteId() : -1);
} catch (MetaException e) {
throw new HiveException("Unable to alter partition. " + e.getMessage(), e);
} catch (TException e) {
throw new HiveException("Unable to alter partition. " + e.getMessage(), e);
}
}
use of org.apache.hadoop.hive.ql.io.AcidUtils.TableSnapshot in project hive by apache.
the class Hive method getTableColumnStatistics.
public List<ColumnStatisticsObj> getTableColumnStatistics(String dbName, String tableName, List<String> colNames, boolean checkTransactional) throws HiveException {
PerfLogger perfLogger = SessionState.getPerfLogger();
perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.HIVE_GET_TABLE_COLUMN_STATS);
List<ColumnStatisticsObj> retv = null;
try {
if (checkTransactional) {
Table tbl = getTable(dbName, tableName);
AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl);
retv = getMSC().getTableColumnStatistics(dbName, tableName, colNames, Constants.HIVE_ENGINE, tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
} else {
retv = getMSC().getTableColumnStatistics(dbName, tableName, colNames, Constants.HIVE_ENGINE);
}
return retv;
} catch (Exception e) {
LOG.debug("Failed getTableColumnStatistics", e);
throw new HiveException(e);
} finally {
perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.HIVE_GET_TABLE_COLUMN_STATS, "HS2-cache");
}
}
use of org.apache.hadoop.hive.ql.io.AcidUtils.TableSnapshot in project hive by apache.
the class Hive method getTableSnapshot.
private TableSnapshot getTableSnapshot(Table tbl, Long writeId) throws LockException {
TableSnapshot tableSnapshot = null;
if ((writeId != null) && (writeId > 0)) {
ValidWriteIdList writeIds = AcidUtils.getTableValidWriteIdListWithTxnList(conf, tbl.getDbName(), tbl.getTableName());
tableSnapshot = new TableSnapshot(writeId, writeIds.writeToString());
} else {
// Make sure we pass in the names, so we can get the correct snapshot for rename table.
tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl, tbl.getDbName(), tbl.getTableName(), true);
}
return tableSnapshot;
}
use of org.apache.hadoop.hive.ql.io.AcidUtils.TableSnapshot in project hive by apache.
the class Hive method getAggrColStatsFor.
public AggrStats getAggrColStatsFor(String dbName, String tblName, List<String> colNames, List<String> partName, boolean checkTransactional) {
PerfLogger perfLogger = SessionState.getPerfLogger();
perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.HIVE_GET_AGGR_COL_STATS);
String writeIdList = null;
try {
if (checkTransactional) {
Table tbl = getTable(dbName, tblName);
AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl);
writeIdList = tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null;
}
AggrStats result = getMSC().getAggrColStatsFor(dbName, tblName, colNames, partName, Constants.HIVE_ENGINE, writeIdList);
return result;
} catch (Exception e) {
LOG.debug("Failed getAggrColStatsFor", e);
return new AggrStats(new ArrayList<ColumnStatisticsObj>(), 0);
} finally {
perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.HIVE_GET_AGGR_COL_STATS, "HS2-cache");
}
}
use of org.apache.hadoop.hive.ql.io.AcidUtils.TableSnapshot in project hive by apache.
the class Hive method getPartitionColumnStatistics.
public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(String dbName, String tableName, List<String> partNames, List<String> colNames, boolean checkTransactional) throws HiveException {
String writeIdList = null;
try {
if (checkTransactional) {
Table tbl = getTable(dbName, tableName);
AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl);
writeIdList = tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null;
}
return getMSC().getPartitionColumnStatistics(dbName, tableName, partNames, colNames, Constants.HIVE_ENGINE, writeIdList);
} catch (Exception e) {
LOG.debug("Failed getPartitionColumnStatistics", e);
throw new HiveException(e);
}
}
Aggregations