use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.
the class PostExecOrcFileDump method run.
@Override
public void run(HookContext hookContext) throws Exception {
assert (hookContext.getHookType() == HookContext.HookType.POST_EXEC_HOOK);
HiveConf conf = hookContext.getConf();
LOG.info("Executing post execution hook to print orc file dump..");
QueryPlan plan = hookContext.getQueryPlan();
if (plan == null) {
return;
}
FetchTask fetchTask = plan.getFetchTask();
if (fetchTask != null) {
SessionState ss = SessionState.get();
SessionState.LogHelper console = ss.getConsole();
// file dump should write to session state console's error stream
PrintStream old = System.out;
System.setOut(console.getErrStream());
FetchWork fetchWork = fetchTask.getWork();
boolean partitionedTable = fetchWork.isPartitioned();
List<Path> directories;
if (partitionedTable) {
LOG.info("Printing orc file dump for files from partitioned directory..");
directories = fetchWork.getPartDir();
} else {
LOG.info("Printing orc file dump for files from table directory..");
directories = Lists.newArrayList();
directories.add(fetchWork.getTblDir());
}
for (Path dir : directories) {
FileSystem fs = dir.getFileSystem(conf);
List<FileStatus> fileList = HdfsUtils.listLocatedStatus(fs, dir, hiddenFileFilter);
for (FileStatus fileStatus : fileList) {
LOG.info("Printing orc file dump for " + fileStatus.getPath());
if (fileStatus.getLen() > 0) {
try {
// just creating orc reader is going to do sanity checks to make sure its valid ORC file
OrcFile.createReader(fs, fileStatus.getPath());
console.printError("-- BEGIN ORC FILE DUMP --");
FileDump.main(new String[] { fileStatus.getPath().toString(), "--rowindex=*" });
console.printError("-- END ORC FILE DUMP --");
} catch (FileFormatException e) {
LOG.warn("File " + fileStatus.getPath() + " is not ORC. Skip printing orc file dump");
} catch (IOException e) {
LOG.warn("Skip printing orc file dump. Exception: " + e.getMessage());
}
} else {
LOG.warn("Zero length file encountered. Skip printing orc file dump.");
}
}
}
// restore the old out stream
System.out.flush();
System.setOut(old);
}
}
use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.
the class PostExecTezSummaryPrinter method run.
@Override
public void run(HookContext hookContext) throws Exception {
assert (hookContext.getHookType() == HookContext.HookType.POST_EXEC_HOOK);
HiveConf conf = hookContext.getConf();
if (!"tez".equals(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE))) {
return;
}
LOG.info("Executing post execution hook to print tez summary..");
SessionState ss = SessionState.get();
SessionState.LogHelper console = ss.getConsole();
QueryPlan plan = hookContext.getQueryPlan();
if (plan == null) {
return;
}
List<TezTask> rootTasks = Utilities.getTezTasks(plan.getRootTasks());
for (TezTask tezTask : rootTasks) {
LOG.info("Printing summary for tez task: " + tezTask.getName());
TezCounters counters = tezTask.getTezCounters();
if (counters != null) {
String hiveCountersGroup = HiveConf.getVar(conf, HiveConf.ConfVars.HIVECOUNTERGROUP);
for (CounterGroup group : counters) {
if (hiveCountersGroup.equals(group.getDisplayName())) {
console.printError(tezTask.getId() + " HIVE COUNTERS:");
for (TezCounter counter : group) {
console.printError(" " + counter.getDisplayName() + ": " + counter.getValue());
}
} else if (group.getName().equals(FileSystemCounter.class.getName())) {
console.printError(tezTask.getId() + " FILE SYSTEM COUNTERS:");
for (TezCounter counter : group) {
// local file system counters
if (counter.getName().contains("HDFS")) {
console.printError(" " + counter.getDisplayName() + ": " + counter.getValue());
}
}
} else if (group.getName().equals(LlapIOCounters.class.getName())) {
console.printError(tezTask.getId() + " LLAP IO COUNTERS:");
List<String> testSafeCounters = LlapIOCounters.testSafeCounterNames();
for (TezCounter counter : group) {
if (testSafeCounters.contains(counter.getDisplayName())) {
console.printError(" " + counter.getDisplayName() + ": " + counter.getValue());
}
}
}
}
}
}
}
use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.
the class EmbeddedLockManager method refresh.
@Override
public void refresh() {
HiveConf conf = ctx.getConf();
sleepTime = conf.getTimeVar(HiveConf.ConfVars.HIVE_LOCK_SLEEP_BETWEEN_RETRIES, TimeUnit.MILLISECONDS);
numRetriesForLock = conf.getIntVar(HiveConf.ConfVars.HIVE_LOCK_NUMRETRIES);
numRetriesForUnLock = conf.getIntVar(HiveConf.ConfVars.HIVE_UNLOCK_NUMRETRIES);
}
use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.
the class CuratorFrameworkSingleton method getInstance.
public static synchronized CuratorFramework getInstance(HiveConf hiveConf) {
if (sharedClient == null) {
// Create a client instance
if (hiveConf == null) {
conf = new HiveConf();
} else {
conf = hiveConf;
}
int sessionTimeout = (int) conf.getTimeVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_SESSION_TIMEOUT, TimeUnit.MILLISECONDS);
int baseSleepTime = (int) conf.getTimeVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_CONNECTION_BASESLEEPTIME, TimeUnit.MILLISECONDS);
int maxRetries = conf.getIntVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_CONNECTION_MAX_RETRIES);
String quorumServers = ZooKeeperHiveHelper.getQuorumServers(conf);
sharedClient = CuratorFrameworkFactory.builder().connectString(quorumServers).sessionTimeoutMs(sessionTimeout).retryPolicy(new ExponentialBackoffRetry(baseSleepTime, maxRetries)).build();
sharedClient.start();
}
return sharedClient;
}
use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.
the class PartialScanTask method main.
public static void main(String[] args) {
String inputPathStr = null;
String outputDir = null;
String jobConfFileName = null;
try {
for (int i = 0; i < args.length; i++) {
if (args[i].equals("-input")) {
inputPathStr = args[++i];
} else if (args[i].equals("-jobconffile")) {
jobConfFileName = args[++i];
} else if (args[i].equals("-outputDir")) {
outputDir = args[++i];
}
}
} catch (IndexOutOfBoundsException e) {
System.err.println("Missing argument to option");
printUsage();
}
if (inputPathStr == null || outputDir == null || outputDir.trim().equals("")) {
printUsage();
}
List<Path> inputPaths = new ArrayList<Path>();
String[] paths = inputPathStr.split(INPUT_SEPERATOR);
if (paths == null || paths.length == 0) {
printUsage();
}
FileSystem fs = null;
JobConf conf = new JobConf(PartialScanTask.class);
for (String path : paths) {
try {
Path pathObj = new Path(path);
if (fs == null) {
fs = FileSystem.get(pathObj.toUri(), conf);
}
FileStatus fstatus = fs.getFileStatus(pathObj);
if (fstatus.isDir()) {
FileStatus[] fileStatus = fs.listStatus(pathObj);
for (FileStatus st : fileStatus) {
inputPaths.add(st.getPath());
}
} else {
inputPaths.add(fstatus.getPath());
}
} catch (IOException e) {
e.printStackTrace(System.err);
}
}
if (jobConfFileName != null) {
conf.addResource(new Path(jobConfFileName));
}
org.slf4j.Logger LOG = LoggerFactory.getLogger(PartialScanTask.class.getName());
boolean isSilent = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESESSIONSILENT);
LogHelper console = new LogHelper(LOG, isSilent);
// that it's easy to find reason for local mode execution failures
for (Appender appender : ((Logger) LogManager.getRootLogger()).getAppenders().values()) {
if (appender instanceof FileAppender) {
console.printInfo("Execution log at: " + ((FileAppender) appender).getFileName());
} else if (appender instanceof RollingFileAppender) {
console.printInfo("Execution log at: " + ((RollingFileAppender) appender).getFileName());
}
}
QueryState queryState = new QueryState(new HiveConf(conf, PartialScanTask.class));
PartialScanWork mergeWork = new PartialScanWork(inputPaths);
DriverContext driverCxt = new DriverContext();
PartialScanTask taskExec = new PartialScanTask();
taskExec.initialize(queryState, null, driverCxt, new CompilationOpContext());
taskExec.setWork(mergeWork);
int ret = taskExec.execute(driverCxt);
if (ret != 0) {
System.exit(2);
}
}
Aggregations