use of org.apache.hadoop.hive.ql.session.SessionState in project hive by apache.
the class HCatCli method processCmd.
private static int processCmd(String cmd) {
SessionState ss = SessionState.get();
long start = System.currentTimeMillis();
cmd = cmd.trim();
String firstToken = cmd.split("\\s+")[0].trim();
if (firstToken.equalsIgnoreCase("set")) {
return new SetProcessor().run(cmd.substring(firstToken.length()).trim()).getResponseCode();
} else if (firstToken.equalsIgnoreCase("dfs")) {
return new DfsProcessor(ss.getConf()).run(cmd.substring(firstToken.length()).trim()).getResponseCode();
}
HCatDriver driver = new HCatDriver(ss.getConf());
int ret = driver.run(cmd).getResponseCode();
if (ret != 0) {
driver.close();
sysExit(ss, ret);
}
ArrayList<String> res = new ArrayList<String>();
try {
while (driver.getResults(res)) {
for (String r : res) {
ss.out.println(r);
}
res.clear();
}
} catch (IOException e) {
ss.err.println("Failed with exception " + e.getClass().getName() + ":" + e.getMessage() + "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
ret = 1;
}
int cret = driver.close();
if (ret == 0) {
ret = cret;
}
long end = System.currentTimeMillis();
if (end > start) {
double timeTaken = (end - start) / 1000.0;
ss.err.println("Time taken: " + timeTaken + " seconds");
}
return ret;
}
use of org.apache.hadoop.hive.ql.session.SessionState in project hive by apache.
the class HCatDriver method run.
public CommandProcessorResponse run(String command) {
CommandProcessorResponse cpr = null;
cpr = driver.run(command);
SessionState ss = SessionState.get();
if (cpr.getResponseCode() == 0) {
// Only attempt to do this, if cmd was successful.
// FIXME: it would be probably better to move this to an after-execution
int rc = setFSPermsNGrp(ss, driver.getConf());
cpr = new CommandProcessorResponse(rc);
}
// reset conf vars
ss.getConf().set(HCatConstants.HCAT_CREATE_DB_NAME, "");
ss.getConf().set(HCatConstants.HCAT_CREATE_TBL_NAME, "");
return cpr;
}
use of org.apache.hadoop.hive.ql.session.SessionState in project hive by apache.
the class PostExecTezSummaryPrinter method run.
@Override
public void run(HookContext hookContext) throws Exception {
assert (hookContext.getHookType() == HookContext.HookType.POST_EXEC_HOOK);
HiveConf conf = hookContext.getConf();
if (!"tez".equals(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE))) {
return;
}
LOG.info("Executing post execution hook to print tez summary..");
SessionState ss = SessionState.get();
SessionState.LogHelper console = ss.getConsole();
QueryPlan plan = hookContext.getQueryPlan();
if (plan == null) {
return;
}
List<TezTask> rootTasks = Utilities.getTezTasks(plan.getRootTasks());
for (TezTask tezTask : rootTasks) {
LOG.info("Printing summary for tez task: " + tezTask.getName());
TezCounters counters = tezTask.getTezCounters();
if (counters != null) {
String hiveCountersGroup = HiveConf.getVar(conf, HiveConf.ConfVars.HIVECOUNTERGROUP);
for (CounterGroup group : counters) {
if (hiveCountersGroup.equals(group.getDisplayName())) {
console.printInfo(tezTask.getId() + " HIVE COUNTERS:", false);
for (TezCounter counter : group) {
console.printInfo(" " + counter.getDisplayName() + ": " + counter.getValue(), false);
}
} else if (group.getName().equals(HiveInputCounters.class.getName())) {
console.printInfo(tezTask.getId() + " INPUT COUNTERS:", false);
for (TezCounter counter : group) {
console.printInfo(" " + counter.getDisplayName() + ": " + counter.getValue(), false);
}
} else if (group.getName().equals(FileSystemCounter.class.getName())) {
console.printInfo(tezTask.getId() + " FILE SYSTEM COUNTERS:", false);
for (TezCounter counter : group) {
// local file system counters
if (counter.getName().contains("HDFS")) {
console.printInfo(" " + counter.getDisplayName() + ": " + counter.getValue(), false);
}
}
} else if (group.getName().equals(LlapIOCounters.class.getName())) {
console.printInfo(tezTask.getId() + " LLAP IO COUNTERS:", false);
List<String> testSafeCounters = LlapIOCounters.testSafeCounterNames();
for (TezCounter counter : group) {
if (testSafeCounters.contains(counter.getDisplayName())) {
console.printInfo(" " + counter.getDisplayName() + ": " + counter.getValue(), false);
}
}
}
}
}
}
}
use of org.apache.hadoop.hive.ql.session.SessionState in project hive by apache.
the class Hive method copyFiles.
private static void copyFiles(final HiveConf conf, final FileSystem destFs, FileStatus[] srcs, final FileSystem srcFs, final Path destf, final boolean isSrcLocal, boolean isOverwrite, final List<Path> newFiles, boolean acidRename) throws HiveException {
final HdfsUtils.HadoopFileStatus fullDestStatus;
try {
fullDestStatus = new HdfsUtils.HadoopFileStatus(conf, destFs, destf);
} catch (IOException e1) {
throw new HiveException(e1);
}
if (!fullDestStatus.getFileStatus().isDirectory()) {
throw new HiveException(destf + " is not a directory.");
}
final List<Future<ObjectPair<Path, Path>>> futures = new LinkedList<>();
final ExecutorService pool = conf.getInt(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT.varname, 25) > 0 ? Executors.newFixedThreadPool(conf.getInt(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT.varname, 25), new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Move-Thread-%d").build()) : null;
// For ACID non-bucketed case, the filenames have to be in the format consistent with INSERT/UPDATE/DELETE Ops,
// i.e, like 000000_0, 000001_0_copy_1, 000002_0.gz etc.
// The extension is only maintained for files which are compressed.
int taskId = 0;
// Sort the files
Arrays.sort(srcs);
for (FileStatus src : srcs) {
FileStatus[] files;
if (src.isDirectory()) {
try {
files = srcFs.listStatus(src.getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER);
} catch (IOException e) {
pool.shutdownNow();
throw new HiveException(e);
}
} else {
files = new FileStatus[] { src };
}
final SessionState parentSession = SessionState.get();
// Sort the files
Arrays.sort(files);
for (final FileStatus srcFile : files) {
final Path srcP = srcFile.getPath();
final boolean needToCopy = needToCopy(srcP, destf, srcFs, destFs);
final boolean isRenameAllowed = !needToCopy && !isSrcLocal;
final String msg = "Unable to move source " + srcP + " to destination " + destf;
// copy from source to destination, we will inherit the destination's parent group ownership.
if (null == pool) {
try {
Path destPath = mvFile(conf, srcFs, srcP, destFs, destf, isSrcLocal, isOverwrite, isRenameAllowed, acidRename ? taskId++ : -1);
if (null != newFiles) {
newFiles.add(destPath);
}
} catch (Exception e) {
throw getHiveException(e, msg, "Failed to move: {}");
}
} else {
// future only takes final or seemingly final values. Make a final copy of taskId
final int finalTaskId = acidRename ? taskId++ : -1;
futures.add(pool.submit(new Callable<ObjectPair<Path, Path>>() {
@Override
public ObjectPair<Path, Path> call() throws HiveException {
SessionState.setCurrentSessionState(parentSession);
try {
Path destPath = mvFile(conf, srcFs, srcP, destFs, destf, isSrcLocal, isOverwrite, isRenameAllowed, finalTaskId);
if (null != newFiles) {
newFiles.add(destPath);
}
return ObjectPair.create(srcP, destPath);
} catch (Exception e) {
throw getHiveException(e, msg);
}
}
}));
}
}
}
if (null != pool) {
pool.shutdown();
for (Future<ObjectPair<Path, Path>> future : futures) {
try {
ObjectPair<Path, Path> pair = future.get();
LOG.debug("Moved src: {}, to dest: {}", pair.getFirst().toString(), pair.getSecond().toString());
} catch (Exception e) {
throw handlePoolException(pool, e);
}
}
}
}
use of org.apache.hadoop.hive.ql.session.SessionState in project hive by apache.
the class Hive method trashFiles.
/**
* Trashes or deletes all files under a directory. Leaves the directory as is.
* @param fs FileSystem to use
* @param statuses fileStatuses of files to be deleted
* @param conf hive configuration
* @return true if deletion successful
* @throws IOException
*/
public static boolean trashFiles(final FileSystem fs, final FileStatus[] statuses, final Configuration conf, final boolean purge) throws IOException {
boolean result = true;
if (statuses == null || statuses.length == 0) {
return false;
}
final List<Future<Boolean>> futures = new LinkedList<>();
final ExecutorService pool = conf.getInt(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT.varname, 25) > 0 ? Executors.newFixedThreadPool(conf.getInt(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT.varname, 25), new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Delete-Thread-%d").build()) : null;
final SessionState parentSession = SessionState.get();
for (final FileStatus status : statuses) {
if (null == pool) {
result &= FileUtils.moveToTrash(fs, status.getPath(), conf, purge);
} else {
futures.add(pool.submit(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
SessionState.setCurrentSessionState(parentSession);
return FileUtils.moveToTrash(fs, status.getPath(), conf, purge);
}
}));
}
}
if (null != pool) {
pool.shutdown();
for (Future<Boolean> future : futures) {
try {
result &= future.get();
} catch (InterruptedException | ExecutionException e) {
LOG.error("Failed to delete: ", e);
pool.shutdownNow();
throw new IOException(e);
}
}
}
return result;
}
Aggregations