use of org.apache.hadoop.hive.ql.exec.Utilities.PartitionDetails in project hive by apache.
the class Hive method loadDynamicPartitions.
/**
* Given a source directory name of the load path, load all dynamically generated partitions
* into the specified table and return a list of strings that represent the dynamic partition
* paths.
* @param tbd table descriptor
* @param numLB number of buckets
* @param isAcid true if this is an ACID operation
* @param writeId writeId, can be 0 unless isAcid == true
* @param stmtId statementId
* @param resetStatistics if true, reset statistics. Do not reset statistics otherwise.
* @param operation ACID operation type
* @param partitionDetailsMap full dynamic partition specification
* @return partition map details (PartitionSpec and Partition)
* @throws HiveException
*/
public Map<Map<String, String>, Partition> loadDynamicPartitions(final LoadTableDesc tbd, final int numLB, final boolean isAcid, final long writeId, final int stmtId, final boolean resetStatistics, final AcidUtils.Operation operation, Map<Path, PartitionDetails> partitionDetailsMap) throws HiveException {
PerfLogger perfLogger = SessionState.getPerfLogger();
perfLogger.perfLogBegin("MoveTask", PerfLogger.LOAD_DYNAMIC_PARTITIONS);
final Path loadPath = tbd.getSourcePath();
final Table tbl = getTable(tbd.getTable().getTableName());
final Map<String, String> partSpec = tbd.getPartitionSpec();
final AtomicInteger partitionsLoaded = new AtomicInteger(0);
final boolean inPlaceEligible = conf.getLong("fs.trash.interval", 0) <= 0 && InPlaceUpdate.canRenderInPlace(conf) && !SessionState.getConsole().getIsSilent();
final PrintStream ps = (inPlaceEligible) ? SessionState.getConsole().getInfoStream() : null;
final SessionState parentSession = SessionState.get();
List<Callable<Partition>> tasks = Lists.newLinkedList();
boolean fetchPartitionInfo = true;
final boolean scanPartitionsByName = HiveConf.getBoolVar(conf, HIVE_LOAD_DYNAMIC_PARTITIONS_SCAN_SPECIFIC_PARTITIONS);
// for every dynamic partition
if (scanPartitionsByName && !tbd.isDirectInsert() && !AcidUtils.isTransactionalTable(tbl)) {
// Fetch only relevant partitions from HMS for checking old partitions
List<String> partitionNames = new LinkedList<>();
for (PartitionDetails details : partitionDetailsMap.values()) {
if (details.fullSpec != null && !details.fullSpec.isEmpty()) {
partitionNames.add(Warehouse.makeDynamicPartNameNoTrailingSeperator(details.fullSpec));
}
}
List<Partition> partitions = Hive.get().getPartitionsByNames(tbl, partitionNames);
for (Partition partition : partitions) {
LOG.debug("HMS partition spec: {}", partition.getSpec());
partitionDetailsMap.entrySet().parallelStream().filter(entry -> entry.getValue().fullSpec.equals(partition.getSpec())).findAny().ifPresent(entry -> {
entry.getValue().partition = partition;
entry.getValue().hasOldPartition = true;
});
}
// no need to fetch partition again in tasks since we have already fetched partitions
// info in getPartitionsByNames()
fetchPartitionInfo = false;
}
boolean isTxnTable = AcidUtils.isTransactionalTable(tbl);
AcidUtils.TableSnapshot tableSnapshot = isTxnTable ? getTableSnapshot(tbl, writeId) : null;
for (Entry<Path, PartitionDetails> entry : partitionDetailsMap.entrySet()) {
boolean getPartitionFromHms = fetchPartitionInfo;
tasks.add(() -> {
PartitionDetails partitionDetails = entry.getValue();
Map<String, String> fullPartSpec = partitionDetails.fullSpec;
try {
SessionState.setCurrentSessionState(parentSession);
if (getPartitionFromHms) {
// didn't fetch partition info from HMS. Getting from HMS now.
Partition existing = getPartition(tbl, fullPartSpec, false);
if (existing != null) {
partitionDetails.partition = existing;
partitionDetails.hasOldPartition = true;
}
}
LOG.info("New loading path = " + entry.getKey() + " withPartSpec " + fullPartSpec);
Partition oldPartition = partitionDetails.partition;
List<FileStatus> newFiles = null;
if (partitionDetails.newFiles != null) {
// If we already know the files from the direct insert manifest, use them
newFiles = partitionDetails.newFiles;
} else if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary() && oldPartition == null) {
// Otherwise only collect them, if we are going to fire write notifications
newFiles = Collections.synchronizedList(new ArrayList<>());
}
// load the partition
Partition partition = loadPartitionInternal(entry.getKey(), tbl, fullPartSpec, oldPartition, tbd.getLoadFileType(), true, false, numLB > 0, false, isAcid, resetStatistics, writeId, stmtId, tbd.isInsertOverwrite(), isTxnTable, newFiles, tbd.isDirectInsert());
// metastore
if (tableSnapshot != null) {
partition.getTPartition().setWriteId(tableSnapshot.getWriteId());
}
partitionDetails.tableSnapshot = tableSnapshot;
if (oldPartition == null) {
partitionDetails.newFiles = newFiles;
partitionDetails.partition = partition;
}
if (inPlaceEligible) {
synchronized (ps) {
InPlaceUpdate.rePositionCursor(ps);
partitionsLoaded.incrementAndGet();
InPlaceUpdate.reprintLine(ps, "Loaded : " + partitionsLoaded.get() + "/" + partitionDetailsMap.size() + " partitions.");
}
}
return partition;
} catch (Exception e) {
LOG.error("Exception when loading partition with parameters " + " partPath=" + entry.getKey() + ", " + " table=" + tbl.getTableName() + ", " + " partSpec=" + fullPartSpec + ", " + " loadFileType=" + tbd.getLoadFileType().toString() + ", " + " listBucketingLevel=" + numLB + ", " + " isAcid=" + isAcid + ", " + " resetStatistics=" + resetStatistics, e);
throw e;
} finally {
// get(conf).getMSC can be called in this task, Close the HMS connection right after use, do not wait for finalizer to close it.
closeCurrent();
}
});
}
int poolSize = conf.getInt(ConfVars.HIVE_LOAD_DYNAMIC_PARTITIONS_THREAD_COUNT.varname, 1);
ExecutorService executor = Executors.newFixedThreadPool(poolSize, new ThreadFactoryBuilder().setDaemon(true).setNameFormat("load-dynamic-partitionsToAdd-%d").build());
List<Future<Partition>> futures = Lists.newLinkedList();
Map<Map<String, String>, Partition> result = Maps.newLinkedHashMap();
try {
futures = executor.invokeAll(tasks);
LOG.info("Number of partitionsToAdd to be added is " + futures.size());
for (Future<Partition> future : futures) {
Partition partition = future.get();
result.put(partition.getSpec(), partition);
}
// add new partitions in batch
addPartitionsToMetastore(partitionDetailsMap.entrySet().stream().filter(entry -> !entry.getValue().hasOldPartition).map(entry -> entry.getValue().partition).collect(Collectors.toList()), resetStatistics, tbl, partitionDetailsMap.entrySet().stream().filter(entry -> !entry.getValue().hasOldPartition).map(entry -> entry.getValue().tableSnapshot).collect(Collectors.toList()));
// For acid table, add the acid_write event with file list at the time of load itself. But
// it should be done after partition is created.
List<WriteNotificationLogRequest> requestList = new ArrayList<>();
int maxBatchSize = conf.getIntVar(HIVE_WRITE_NOTIFICATION_MAX_BATCH_SIZE);
for (Entry<Path, PartitionDetails> entry : partitionDetailsMap.entrySet()) {
PartitionDetails partitionDetails = entry.getValue();
if (isTxnTable && partitionDetails.newFiles != null) {
addWriteNotificationLog(tbl, partitionDetails.fullSpec, partitionDetails.newFiles, writeId, requestList);
if (requestList != null && requestList.size() >= maxBatchSize) {
// If the first call returns that the HMS does not supports batching, avoid batching
// for later requests.
boolean batchSupported = addWriteNotificationLogInBatch(tbl, requestList);
if (batchSupported) {
requestList.clear();
} else {
requestList = null;
}
}
}
}
if (requestList != null && requestList.size() > 0) {
addWriteNotificationLogInBatch(tbl, requestList);
}
setStatsPropAndAlterPartitions(resetStatistics, tbl, partitionDetailsMap.entrySet().stream().filter(entry -> entry.getValue().hasOldPartition).map(entry -> entry.getValue().partition).collect(Collectors.toList()), tableSnapshot);
} catch (InterruptedException | ExecutionException e) {
throw new HiveException("Exception when loading " + partitionDetailsMap.size() + " partitions" + " in table " + tbl.getTableName() + " with loadPath=" + loadPath, e);
} catch (TException e) {
LOG.error("Failed loadDynamicPartitions", e);
throw new HiveException(e);
} catch (Exception e) {
StringBuffer logMsg = new StringBuffer();
logMsg.append("Exception when loading partitionsToAdd with parameters ");
logMsg.append("partPaths=");
partitionDetailsMap.keySet().forEach(path -> logMsg.append(path + ", "));
logMsg.append("table=" + tbl.getTableName() + ", ").append("partSpec=" + partSpec + ", ").append("loadFileType=" + tbd.getLoadFileType().toString() + ", ").append("listBucketingLevel=" + numLB + ", ").append("isAcid=" + isAcid + ", ").append("resetStatistics=" + resetStatistics);
LOG.error(logMsg.toString(), e);
throw e;
} finally {
LOG.debug("Cancelling " + futures.size() + " dynamic loading tasks");
executor.shutdownNow();
}
if (HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) && HiveConf.getBoolVar(conf, ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION)) {
throw new HiveException(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION.name() + "=true");
}
try {
if (isTxnTable) {
List<String> partNames = result.values().stream().map(Partition::getName).collect(Collectors.toList());
getMSC().addDynamicPartitions(parentSession.getTxnMgr().getCurrentTxnId(), writeId, tbl.getDbName(), tbl.getTableName(), partNames, AcidUtils.toDataOperationType(operation));
}
LOG.info("Loaded " + result.size() + "partitionsToAdd");
perfLogger.perfLogEnd("MoveTask", PerfLogger.LOAD_DYNAMIC_PARTITIONS);
return result;
} catch (TException te) {
LOG.error("Failed loadDynamicPartitions", te);
throw new HiveException("Exception updating metastore for acid table " + tbd.getTable().getTableName() + " with partitions " + result.values(), te);
}
}
Aggregations