use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.
the class ZooKeeperHiveLockManager method setContext.
/**
* @param ctx The lock manager context (containing the Hive configuration file)
* Start the ZooKeeper client based on the zookeeper cluster specified in the conf.
**/
@Override
public void setContext(HiveLockManagerCtx ctx) throws LockException {
this.ctx = ctx;
HiveConf conf = ctx.getConf();
sleepTime = conf.getTimeVar(HiveConf.ConfVars.HIVE_LOCK_SLEEP_BETWEEN_RETRIES, TimeUnit.MILLISECONDS);
numRetriesForLock = conf.getIntVar(HiveConf.ConfVars.HIVE_LOCK_NUMRETRIES);
numRetriesForUnLock = conf.getIntVar(HiveConf.ConfVars.HIVE_UNLOCK_NUMRETRIES);
try {
curatorFramework = CuratorFrameworkSingleton.getInstance(conf);
parent = conf.getVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_NAMESPACE);
try {
curatorFramework.create().withMode(CreateMode.PERSISTENT).forPath("/" + parent, new byte[0]);
} catch (Exception e) {
// ignore if the parent already exists
if (!(e instanceof KeeperException) || ((KeeperException) e).code() != KeeperException.Code.NODEEXISTS) {
LOG.warn("Unexpected ZK exception when creating parent node /" + parent, e);
}
}
} catch (Exception e) {
LOG.error("Failed to create curatorFramework object: ", e);
throw new LockException(ErrorMsg.ZOOKEEPER_CLIENT_COULD_NOT_BE_INITIALIZED.getMsg());
}
}
use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.
the class MapJoinProcessor method generateMapJoinOperator.
public MapJoinOperator generateMapJoinOperator(ParseContext pctx, JoinOperator op, int mapJoinPos) throws SemanticException {
HiveConf hiveConf = pctx.getConf();
boolean noCheckOuterJoin = HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTSORTMERGEBUCKETMAPJOIN) && HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTBUCKETMAPJOIN);
MapJoinOperator mapJoinOp = convertMapJoin(pctx.getConf(), op, op.getConf().isLeftInputJoin(), op.getConf().getBaseSrc(), op.getConf().getMapAliases(), mapJoinPos, noCheckOuterJoin, true);
// create a dummy select to select all columns
genSelectPlan(pctx, mapJoinOp);
return mapJoinOp;
}
use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.
the class TableSizeBasedBigTableSelectorForAutoSMJ method getBigTablePosition.
public int getBigTablePosition(ParseContext parseCtx, JoinOperator joinOp, Set<Integer> bigTableCandidates) throws SemanticException {
int bigTablePos = -1;
long maxSize = -1;
HiveConf conf = parseCtx.getConf();
try {
List<TableScanOperator> topOps = new ArrayList<TableScanOperator>();
getListTopOps(joinOp, topOps);
int currentPos = 0;
for (TableScanOperator topOp : topOps) {
if (topOp == null) {
return -1;
}
if (!bigTableCandidates.contains(currentPos)) {
currentPos++;
continue;
}
Table table = topOp.getConf().getTableMetadata();
long currentSize = 0;
if (!table.isPartitioned()) {
currentSize = getSize(conf, table);
} else {
// For partitioned tables, get the size of all the partitions
PrunedPartitionList partsList = PartitionPruner.prune(topOp, parseCtx, null);
for (Partition part : partsList.getNotDeniedPartns()) {
currentSize += getSize(conf, part);
}
}
if (currentSize > maxSize) {
maxSize = currentSize;
bigTablePos = currentPos;
}
currentPos++;
}
} catch (HiveException e) {
throw new SemanticException(e.getMessage());
}
return bigTablePos;
}
use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.
the class MetaDataExportListener method export_meta_data.
/** Export the metadata to a given path, and then move it to the user's trash */
private void export_meta_data(PreDropTableEvent tableEvent) throws MetaException {
FileSystem fs = null;
Table tbl = tableEvent.getTable();
String name = tbl.getTableName();
org.apache.hadoop.hive.ql.metadata.Table mTbl = new org.apache.hadoop.hive.ql.metadata.Table(tbl);
HMSHandler handler = tableEvent.getHandler();
HiveConf hiveconf = handler.getHiveConf();
Warehouse wh = new Warehouse(hiveconf);
Path tblPath = new Path(tbl.getSd().getLocation());
fs = wh.getFs(tblPath);
Date now = new Date();
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd-HH-mm-ss");
String dateString = sdf.format(now);
String exportPathString = hiveconf.getVar(HiveConf.ConfVars.METADATA_EXPORT_LOCATION);
boolean moveMetadataToTrash = hiveconf.getBoolVar(HiveConf.ConfVars.MOVE_EXPORTED_METADATA_TO_TRASH);
Path exportPath = null;
if (exportPathString != null && exportPathString.length() == 0) {
exportPath = fs.getHomeDirectory();
} else {
exportPath = new Path(exportPathString);
}
Path metaPath = new Path(exportPath, name + "." + dateString);
LOG.info("Exporting the metadata of table " + tbl.toString() + " to path " + metaPath.toString());
try {
fs.mkdirs(metaPath);
} catch (IOException e) {
throw new MetaException(e.getMessage());
}
Path outFile = new Path(metaPath, name + EximUtil.METADATA_NAME);
try {
SessionState.getConsole().printInfo("Beginning metadata export");
EximUtil.createExportDump(fs, outFile, mTbl, null, null);
if (moveMetadataToTrash == true) {
wh.deleteDir(metaPath, true);
}
} catch (IOException e) {
throw new MetaException(e.getMessage());
} catch (SemanticException e) {
throw new MetaException(e.getMessage());
}
}
use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.
the class GenSparkUtils method processFileSink.
public void processFileSink(GenSparkProcContext context, FileSinkOperator fileSink) throws SemanticException {
ParseContext parseContext = context.parseContext;
// is INSERT OVERWRITE TABLE
boolean isInsertTable = GenMapRedUtils.isInsertInto(parseContext, fileSink);
HiveConf hconf = parseContext.getConf();
boolean chDir = GenMapRedUtils.isMergeRequired(context.moveTask, hconf, fileSink, context.currentTask, isInsertTable);
// Set stats config for FileSinkOperators which are cloned from the fileSink
List<FileSinkOperator> fileSinkList = context.fileSinkMap.get(fileSink);
if (fileSinkList != null) {
for (FileSinkOperator fsOp : fileSinkList) {
fsOp.getConf().setGatherStats(fileSink.getConf().isGatherStats());
fsOp.getConf().setStatsReliable(fileSink.getConf().isStatsReliable());
}
}
Path finalName = GenMapRedUtils.createMoveTask(context.currentTask, chDir, fileSink, parseContext, context.moveTask, hconf, context.dependencyTask);
if (chDir) {
// Merge the files in the destination table/partitions by creating Map-only merge job
// If underlying data is RCFile a RCFileBlockMerge task would be created.
LOG.info("using CombineHiveInputformat for the merge job");
GenMapRedUtils.createMRWorkForMergingFiles(fileSink, finalName, context.dependencyTask, context.moveTask, hconf, context.currentTask);
}
FetchTask fetchTask = parseContext.getFetchTask();
if (fetchTask != null && context.currentTask.getNumChild() == 0) {
if (fetchTask.isFetchFrom(fileSink.getConf())) {
context.currentTask.setFetchSource(true);
}
}
}
Aggregations