use of org.apache.hadoop.hive.metastore.api.EnvironmentContext in project hive by apache.
the class StatsNoJobTask method updatePartitions.
private int updatePartitions(Hive db) throws InvalidOperationException, HiveException {
if (!partUpdates.isEmpty()) {
List<Partition> updatedParts = Lists.newArrayList(partUpdates.values());
if (updatedParts.contains(null) && work.isStatsReliable()) {
LOG.debug("Stats requested to be reliable. Empty stats found and hence failing the task.");
return -1;
} else {
LOG.debug("Bulk updating partitions..");
EnvironmentContext environmentContext = new EnvironmentContext();
environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED, StatsSetupConst.TASK);
db.alterPartitions(tableFullName, Lists.newArrayList(partUpdates.values()), environmentContext);
LOG.debug("Bulk updated " + partUpdates.values().size() + " partitions.");
}
}
return 0;
}
use of org.apache.hadoop.hive.metastore.api.EnvironmentContext in project hive by apache.
the class Hive method alterTable.
public void alterTable(String tblName, Table newTbl, boolean cascade, EnvironmentContext environmentContext) throws InvalidOperationException, HiveException {
String[] names = Utilities.getDbTableName(tblName);
try {
// Remove the DDL_TIME so it gets refreshed
if (newTbl.getParameters() != null) {
newTbl.getParameters().remove(hive_metastoreConstants.DDL_TIME);
}
newTbl.checkValidity(conf);
if (environmentContext == null) {
environmentContext = new EnvironmentContext();
}
if (cascade) {
environmentContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE);
}
getMSC().alter_table_with_environmentContext(names[0], names[1], newTbl.getTTable(), environmentContext);
} catch (MetaException e) {
throw new HiveException("Unable to alter table. " + e.getMessage(), e);
} catch (TException e) {
throw new HiveException("Unable to alter table. " + e.getMessage(), e);
}
}
use of org.apache.hadoop.hive.metastore.api.EnvironmentContext in project hive by apache.
the class Hive method setStatsPropAndAlterPartition.
private void setStatsPropAndAlterPartition(boolean hasFollowingStatsTask, Table tbl, Partition newTPart) throws MetaException, TException {
EnvironmentContext environmentContext = null;
if (hasFollowingStatsTask) {
environmentContext = new EnvironmentContext();
environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
}
LOG.debug("Altering existing partition " + newTPart.getSpec());
getSychronizedMSC().alter_partition(tbl.getDbName(), tbl.getTableName(), newTPart.getTPartition(), environmentContext);
}
use of org.apache.hadoop.hive.metastore.api.EnvironmentContext in project hive by apache.
the class Hive method loadTable.
/**
* Load a directory into a Hive Table. - Alters existing content of table with
* the contents of loadPath. - If table does not exist - an exception is
* thrown - files in loadPath are moved into Hive. But the directory itself is
* not removed.
*
* @param loadPath
* Directory containing files to load into Table
* @param tableName
* name of table to be loaded.
* @param replace
* if true - replace files in the table, otherwise add files to table
* @param isSrcLocal
* If the source directory is LOCAL
* @param isSkewedStoreAsSubdir
* if list bucketing enabled
* @param hasFollowingStatsTask
* if there is any following stats task
* @param isAcid true if this is an ACID based write
*/
public void loadTable(Path loadPath, String tableName, boolean replace, boolean isSrcLocal, boolean isSkewedStoreAsSubdir, boolean isAcid, boolean hasFollowingStatsTask) throws HiveException {
List<Path> newFiles = null;
Table tbl = getTable(tableName);
HiveConf sessionConf = SessionState.getSessionConf();
if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary()) {
newFiles = Collections.synchronizedList(new ArrayList<Path>());
}
if (replace) {
Path tableDest = tbl.getPath();
replaceFiles(tableDest, loadPath, tableDest, tableDest, sessionConf, isSrcLocal);
} else {
FileSystem fs;
try {
fs = tbl.getDataLocation().getFileSystem(sessionConf);
copyFiles(sessionConf, loadPath, tbl.getPath(), fs, isSrcLocal, isAcid, newFiles);
} catch (IOException e) {
throw new HiveException("addFiles: filesystem error in check phase", e);
}
}
if (!this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE);
}
//column stats will be inaccurate
StatsSetupConst.clearColumnStatsState(tbl.getParameters());
try {
if (isSkewedStoreAsSubdir) {
SkewedInfo skewedInfo = tbl.getSkewedInfo();
// Construct list bucketing location mappings from sub-directory name.
Map<List<String>, String> skewedColValueLocationMaps = constructListBucketingLocationMap(tbl.getPath(), skewedInfo);
// Add list bucketing location mappings.
skewedInfo.setSkewedColValueLocationMaps(skewedColValueLocationMaps);
}
} catch (IOException e) {
LOG.error(StringUtils.stringifyException(e));
throw new HiveException(e);
}
EnvironmentContext environmentContext = null;
if (hasFollowingStatsTask) {
environmentContext = new EnvironmentContext();
environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
}
try {
alterTable(tableName, tbl, environmentContext);
} catch (InvalidOperationException e) {
throw new HiveException(e);
}
fireInsertEvent(tbl, null, newFiles);
}
use of org.apache.hadoop.hive.metastore.api.EnvironmentContext in project hive by apache.
the class DDLTask method getLocations.
private List<Path> getLocations(Hive db, Table table, Map<String, String> partSpec) throws HiveException, InvalidOperationException {
List<Path> locations = new ArrayList<Path>();
if (partSpec == null) {
if (table.isPartitioned()) {
for (Partition partition : db.getPartitions(table)) {
locations.add(partition.getDataLocation());
EnvironmentContext environmentContext = new EnvironmentContext();
if (needToUpdateStats(partition.getParameters(), environmentContext)) {
db.alterPartition(table.getDbName(), table.getTableName(), partition, environmentContext);
}
}
} else {
locations.add(table.getPath());
EnvironmentContext environmentContext = new EnvironmentContext();
if (needToUpdateStats(table.getParameters(), environmentContext)) {
db.alterTable(table.getDbName() + "." + table.getTableName(), table, environmentContext);
}
}
} else {
for (Partition partition : db.getPartitionsByNames(table, partSpec)) {
locations.add(partition.getDataLocation());
EnvironmentContext environmentContext = new EnvironmentContext();
if (needToUpdateStats(partition.getParameters(), environmentContext)) {
db.alterPartition(table.getDbName(), table.getTableName(), partition, environmentContext);
}
}
}
return locations;
}
Aggregations