use of org.apache.hadoop.hive.ql.session.SessionState in project hive by apache.
the class Hive method loadDynamicPartitions.
/**
* Given a source directory name of the load path, load all dynamically generated partitions
* into the specified table and return a list of strings that represent the dynamic partition
* paths.
* @param loadPath
* @param tableName
* @param partSpec
* @param replace
* @param numDP number of dynamic partitions
* @param listBucketingEnabled
* @param isAcid true if this is an ACID operation
* @param txnId txnId, can be 0 unless isAcid == true
* @return partition map details (PartitionSpec and Partition)
* @throws HiveException
*/
public Map<Map<String, String>, Partition> loadDynamicPartitions(final Path loadPath, final String tableName, final Map<String, String> partSpec, final boolean replace, final int numDP, final boolean listBucketingEnabled, final boolean isAcid, final long txnId, final boolean hasFollowingStatsTask, final AcidUtils.Operation operation) throws HiveException {
final Map<Map<String, String>, Partition> partitionsMap = Collections.synchronizedMap(new LinkedHashMap<Map<String, String>, Partition>());
int poolSize = conf.getInt(ConfVars.HIVE_LOAD_DYNAMIC_PARTITIONS_THREAD_COUNT.varname, 1);
final ExecutorService pool = Executors.newFixedThreadPool(poolSize, new ThreadFactoryBuilder().setDaemon(true).setNameFormat("load-dynamic-partitions-%d").build());
// Get all valid partition paths and existing partitions for them (if any)
final Table tbl = getTable(tableName);
final Set<Path> validPartitions = getValidPartitionsInPath(numDP, loadPath);
final int partsToLoad = validPartitions.size();
final AtomicInteger partitionsLoaded = new AtomicInteger(0);
final boolean inPlaceEligible = conf.getLong("fs.trash.interval", 0) <= 0 && InPlaceUpdate.canRenderInPlace(conf) && !SessionState.getConsole().getIsSilent();
final PrintStream ps = (inPlaceEligible) ? SessionState.getConsole().getInfoStream() : null;
final SessionState parentSession = SessionState.get();
final List<Future<Void>> futures = Lists.newLinkedList();
try {
// and load the partition based on that
for (final Path partPath : validPartitions) {
// generate a full partition specification
final LinkedHashMap<String, String> fullPartSpec = Maps.newLinkedHashMap(partSpec);
Warehouse.makeSpecFromName(fullPartSpec, partPath);
futures.add(pool.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
try {
// move file would require session details (needCopy() invokes SessionState.get)
SessionState.setCurrentSessionState(parentSession);
LOG.info("New loading path = " + partPath + " with partSpec " + fullPartSpec);
// load the partition
Partition newPartition = loadPartition(partPath, tbl, fullPartSpec, replace, true, listBucketingEnabled, false, isAcid, hasFollowingStatsTask);
partitionsMap.put(fullPartSpec, newPartition);
if (inPlaceEligible) {
synchronized (ps) {
InPlaceUpdate.rePositionCursor(ps);
partitionsLoaded.incrementAndGet();
InPlaceUpdate.reprintLine(ps, "Loaded : " + partitionsLoaded.get() + "/" + partsToLoad + " partitions.");
}
}
return null;
} catch (Exception t) {
LOG.error("Exception when loading partition with parameters " + " partPath=" + partPath + ", " + " table=" + tbl.getTableName() + ", " + " partSpec=" + fullPartSpec + ", " + " replace=" + replace + ", " + " listBucketingEnabled=" + listBucketingEnabled + ", " + " isAcid=" + isAcid + ", " + " hasFollowingStatsTask=" + hasFollowingStatsTask, t);
throw t;
}
}
}));
}
pool.shutdown();
LOG.debug("Number of partitions to be added is " + futures.size());
for (Future future : futures) {
future.get();
}
} catch (InterruptedException | ExecutionException e) {
LOG.debug("Cancelling " + futures.size() + " dynamic loading tasks");
//cancel other futures
for (Future future : futures) {
future.cancel(true);
}
throw new HiveException("Exception when loading " + partsToLoad + " in table " + tbl.getTableName() + " with loadPath=" + loadPath, e);
}
try {
if (isAcid) {
List<String> partNames = new ArrayList<>(partitionsMap.size());
for (Partition p : partitionsMap.values()) {
partNames.add(p.getName());
}
getMSC().addDynamicPartitions(txnId, tbl.getDbName(), tbl.getTableName(), partNames, AcidUtils.toDataOperationType(operation));
}
LOG.info("Loaded " + partitionsMap.size() + " partitions");
return partitionsMap;
} catch (TException te) {
throw new HiveException("Exception updating metastore for acid table " + tableName + " with partitions " + partitionsMap.values(), te);
}
}
use of org.apache.hadoop.hive.ql.session.SessionState in project hive by apache.
the class DriverTestHook method preDriverRun.
@Override
public void preDriverRun(HiveDriverRunHookContext hookContext) throws Exception {
SessionState sess = new SessionState((HiveConf) hookContext.getConf());
PrintStream stream = sess.getConsole().getOutStream();
stream.println(hookContext.getCommand());
}
use of org.apache.hadoop.hive.ql.session.SessionState in project hive by apache.
the class MetadataOperation method authorizeMetaGets.
protected void authorizeMetaGets(HiveOperationType opType, List<HivePrivilegeObject> inpObjs, String cmdString) throws HiveSQLException {
SessionState ss = SessionState.get();
HiveAuthzContext.Builder ctxBuilder = new HiveAuthzContext.Builder();
ctxBuilder.setUserIpAddress(ss.getUserIpAddress());
ctxBuilder.setForwardedAddresses(ss.getForwardedAddresses());
ctxBuilder.setCommandString(cmdString);
try {
ss.getAuthorizerV2().checkPrivileges(opType, inpObjs, null, ctxBuilder.build());
} catch (HiveAuthzPluginException | HiveAccessControlException e) {
throw new HiveSQLException(e.getMessage(), e);
}
}
use of org.apache.hadoop.hive.ql.session.SessionState in project hive by apache.
the class CLIService method progressUpdateLog.
private JobProgressUpdate progressUpdateLog(boolean isProgressLogRequested, Operation operation, HiveConf conf) {
if (!isProgressLogRequested || !ServiceUtils.canProvideProgressLog(conf) || !OperationType.EXECUTE_STATEMENT.equals(operation.getType())) {
return new JobProgressUpdate(ProgressMonitor.NULL);
}
SessionState sessionState = operation.getParentSession().getSessionState();
long startTime = System.nanoTime();
int timeOutMs = 8;
try {
while (sessionState.getProgressMonitor() == null && !operation.isDone()) {
long remainingMs = (PROGRESS_MAX_WAIT_NS - (System.nanoTime() - startTime)) / 1000000l;
if (remainingMs <= 0) {
LOG.debug("timed out and hence returning progress log as NULL");
return new JobProgressUpdate(ProgressMonitor.NULL);
}
Thread.sleep(Math.min(remainingMs, timeOutMs));
timeOutMs <<= 1;
}
} catch (InterruptedException e) {
LOG.warn("Error while getting progress update", e);
}
ProgressMonitor pm = sessionState.getProgressMonitor();
return new JobProgressUpdate(pm != null ? pm : ProgressMonitor.NULL);
}
use of org.apache.hadoop.hive.ql.session.SessionState in project hive by apache.
the class TestHiveHistory method testQueryloglocParentDirNotExist.
public void testQueryloglocParentDirNotExist() throws Exception {
String parentTmpDir = tmpdir + "/HIVE2654";
Path parentDirPath = new Path(parentTmpDir);
try {
fs.delete(parentDirPath, true);
} catch (Exception e) {
}
try {
String actualDir = parentTmpDir + "/test";
HiveConf conf = new HiveConf(SessionState.class);
conf.set(HiveConf.ConfVars.HIVEHISTORYFILELOC.toString(), actualDir);
SessionState ss = new CliSessionState(conf);
HiveHistory hiveHistory = new HiveHistoryImpl(ss);
Path actualPath = new Path(actualDir);
if (!fs.exists(actualPath)) {
fail("Query location path is not exist :" + actualPath.toString());
}
} finally {
try {
fs.delete(parentDirPath, true);
} catch (Exception e) {
}
}
}
Aggregations