Search in sources :

Example 1 with HIVE_WRITE_NOTIFICATION_MAX_BATCH_SIZE

use of org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_WRITE_NOTIFICATION_MAX_BATCH_SIZE in project hive by apache.

the class Hive method loadDynamicPartitions.

/**
 * Given a source directory name of the load path, load all dynamically generated partitions
 * into the specified table and return a list of strings that represent the dynamic partition
 * paths.
 * @param tbd table descriptor
 * @param numLB number of buckets
 * @param isAcid true if this is an ACID operation
 * @param writeId writeId, can be 0 unless isAcid == true
 * @param stmtId statementId
 * @param resetStatistics if true, reset statistics. Do not reset statistics otherwise.
 * @param operation ACID operation type
 * @param partitionDetailsMap full dynamic partition specification
 * @return partition map details (PartitionSpec and Partition)
 * @throws HiveException
 */
public Map<Map<String, String>, Partition> loadDynamicPartitions(final LoadTableDesc tbd, final int numLB, final boolean isAcid, final long writeId, final int stmtId, final boolean resetStatistics, final AcidUtils.Operation operation, Map<Path, PartitionDetails> partitionDetailsMap) throws HiveException {
    PerfLogger perfLogger = SessionState.getPerfLogger();
    perfLogger.perfLogBegin("MoveTask", PerfLogger.LOAD_DYNAMIC_PARTITIONS);
    final Path loadPath = tbd.getSourcePath();
    final Table tbl = getTable(tbd.getTable().getTableName());
    final Map<String, String> partSpec = tbd.getPartitionSpec();
    final AtomicInteger partitionsLoaded = new AtomicInteger(0);
    final boolean inPlaceEligible = conf.getLong("fs.trash.interval", 0) <= 0 && InPlaceUpdate.canRenderInPlace(conf) && !SessionState.getConsole().getIsSilent();
    final PrintStream ps = (inPlaceEligible) ? SessionState.getConsole().getInfoStream() : null;
    final SessionState parentSession = SessionState.get();
    List<Callable<Partition>> tasks = Lists.newLinkedList();
    boolean fetchPartitionInfo = true;
    final boolean scanPartitionsByName = HiveConf.getBoolVar(conf, HIVE_LOAD_DYNAMIC_PARTITIONS_SCAN_SPECIFIC_PARTITIONS);
    // for every dynamic partition
    if (scanPartitionsByName && !tbd.isDirectInsert() && !AcidUtils.isTransactionalTable(tbl)) {
        // Fetch only relevant partitions from HMS for checking old partitions
        List<String> partitionNames = new LinkedList<>();
        for (PartitionDetails details : partitionDetailsMap.values()) {
            if (details.fullSpec != null && !details.fullSpec.isEmpty()) {
                partitionNames.add(Warehouse.makeDynamicPartNameNoTrailingSeperator(details.fullSpec));
            }
        }
        List<Partition> partitions = Hive.get().getPartitionsByNames(tbl, partitionNames);
        for (Partition partition : partitions) {
            LOG.debug("HMS partition spec: {}", partition.getSpec());
            partitionDetailsMap.entrySet().parallelStream().filter(entry -> entry.getValue().fullSpec.equals(partition.getSpec())).findAny().ifPresent(entry -> {
                entry.getValue().partition = partition;
                entry.getValue().hasOldPartition = true;
            });
        }
        // no need to fetch partition again in tasks since we have already fetched partitions
        // info in getPartitionsByNames()
        fetchPartitionInfo = false;
    }
    boolean isTxnTable = AcidUtils.isTransactionalTable(tbl);
    AcidUtils.TableSnapshot tableSnapshot = isTxnTable ? getTableSnapshot(tbl, writeId) : null;
    for (Entry<Path, PartitionDetails> entry : partitionDetailsMap.entrySet()) {
        boolean getPartitionFromHms = fetchPartitionInfo;
        tasks.add(() -> {
            PartitionDetails partitionDetails = entry.getValue();
            Map<String, String> fullPartSpec = partitionDetails.fullSpec;
            try {
                SessionState.setCurrentSessionState(parentSession);
                if (getPartitionFromHms) {
                    // didn't fetch partition info from HMS. Getting from HMS now.
                    Partition existing = getPartition(tbl, fullPartSpec, false);
                    if (existing != null) {
                        partitionDetails.partition = existing;
                        partitionDetails.hasOldPartition = true;
                    }
                }
                LOG.info("New loading path = " + entry.getKey() + " withPartSpec " + fullPartSpec);
                Partition oldPartition = partitionDetails.partition;
                List<FileStatus> newFiles = null;
                if (partitionDetails.newFiles != null) {
                    // If we already know the files from the direct insert manifest, use them
                    newFiles = partitionDetails.newFiles;
                } else if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary() && oldPartition == null) {
                    // Otherwise only collect them, if we are going to fire write notifications
                    newFiles = Collections.synchronizedList(new ArrayList<>());
                }
                // load the partition
                Partition partition = loadPartitionInternal(entry.getKey(), tbl, fullPartSpec, oldPartition, tbd.getLoadFileType(), true, false, numLB > 0, false, isAcid, resetStatistics, writeId, stmtId, tbd.isInsertOverwrite(), isTxnTable, newFiles, tbd.isDirectInsert());
                // metastore
                if (tableSnapshot != null) {
                    partition.getTPartition().setWriteId(tableSnapshot.getWriteId());
                }
                partitionDetails.tableSnapshot = tableSnapshot;
                if (oldPartition == null) {
                    partitionDetails.newFiles = newFiles;
                    partitionDetails.partition = partition;
                }
                if (inPlaceEligible) {
                    synchronized (ps) {
                        InPlaceUpdate.rePositionCursor(ps);
                        partitionsLoaded.incrementAndGet();
                        InPlaceUpdate.reprintLine(ps, "Loaded : " + partitionsLoaded.get() + "/" + partitionDetailsMap.size() + " partitions.");
                    }
                }
                return partition;
            } catch (Exception e) {
                LOG.error("Exception when loading partition with parameters " + " partPath=" + entry.getKey() + ", " + " table=" + tbl.getTableName() + ", " + " partSpec=" + fullPartSpec + ", " + " loadFileType=" + tbd.getLoadFileType().toString() + ", " + " listBucketingLevel=" + numLB + ", " + " isAcid=" + isAcid + ", " + " resetStatistics=" + resetStatistics, e);
                throw e;
            } finally {
                // get(conf).getMSC can be called in this task, Close the HMS connection right after use, do not wait for finalizer to close it.
                closeCurrent();
            }
        });
    }
    int poolSize = conf.getInt(ConfVars.HIVE_LOAD_DYNAMIC_PARTITIONS_THREAD_COUNT.varname, 1);
    ExecutorService executor = Executors.newFixedThreadPool(poolSize, new ThreadFactoryBuilder().setDaemon(true).setNameFormat("load-dynamic-partitionsToAdd-%d").build());
    List<Future<Partition>> futures = Lists.newLinkedList();
    Map<Map<String, String>, Partition> result = Maps.newLinkedHashMap();
    try {
        futures = executor.invokeAll(tasks);
        LOG.info("Number of partitionsToAdd to be added is " + futures.size());
        for (Future<Partition> future : futures) {
            Partition partition = future.get();
            result.put(partition.getSpec(), partition);
        }
        // add new partitions in batch
        addPartitionsToMetastore(partitionDetailsMap.entrySet().stream().filter(entry -> !entry.getValue().hasOldPartition).map(entry -> entry.getValue().partition).collect(Collectors.toList()), resetStatistics, tbl, partitionDetailsMap.entrySet().stream().filter(entry -> !entry.getValue().hasOldPartition).map(entry -> entry.getValue().tableSnapshot).collect(Collectors.toList()));
        // For acid table, add the acid_write event with file list at the time of load itself. But
        // it should be done after partition is created.
        List<WriteNotificationLogRequest> requestList = new ArrayList<>();
        int maxBatchSize = conf.getIntVar(HIVE_WRITE_NOTIFICATION_MAX_BATCH_SIZE);
        for (Entry<Path, PartitionDetails> entry : partitionDetailsMap.entrySet()) {
            PartitionDetails partitionDetails = entry.getValue();
            if (isTxnTable && partitionDetails.newFiles != null) {
                addWriteNotificationLog(tbl, partitionDetails.fullSpec, partitionDetails.newFiles, writeId, requestList);
                if (requestList != null && requestList.size() >= maxBatchSize) {
                    // If the first call returns that the HMS does not supports batching, avoid batching
                    // for later requests.
                    boolean batchSupported = addWriteNotificationLogInBatch(tbl, requestList);
                    if (batchSupported) {
                        requestList.clear();
                    } else {
                        requestList = null;
                    }
                }
            }
        }
        if (requestList != null && requestList.size() > 0) {
            addWriteNotificationLogInBatch(tbl, requestList);
        }
        setStatsPropAndAlterPartitions(resetStatistics, tbl, partitionDetailsMap.entrySet().stream().filter(entry -> entry.getValue().hasOldPartition).map(entry -> entry.getValue().partition).collect(Collectors.toList()), tableSnapshot);
    } catch (InterruptedException | ExecutionException e) {
        throw new HiveException("Exception when loading " + partitionDetailsMap.size() + " partitions" + " in table " + tbl.getTableName() + " with loadPath=" + loadPath, e);
    } catch (TException e) {
        LOG.error("Failed loadDynamicPartitions", e);
        throw new HiveException(e);
    } catch (Exception e) {
        StringBuffer logMsg = new StringBuffer();
        logMsg.append("Exception when loading partitionsToAdd with parameters ");
        logMsg.append("partPaths=");
        partitionDetailsMap.keySet().forEach(path -> logMsg.append(path + ", "));
        logMsg.append("table=" + tbl.getTableName() + ", ").append("partSpec=" + partSpec + ", ").append("loadFileType=" + tbd.getLoadFileType().toString() + ", ").append("listBucketingLevel=" + numLB + ", ").append("isAcid=" + isAcid + ", ").append("resetStatistics=" + resetStatistics);
        LOG.error(logMsg.toString(), e);
        throw e;
    } finally {
        LOG.debug("Cancelling " + futures.size() + " dynamic loading tasks");
        executor.shutdownNow();
    }
    if (HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) && HiveConf.getBoolVar(conf, ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION)) {
        throw new HiveException(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION.name() + "=true");
    }
    try {
        if (isTxnTable) {
            List<String> partNames = result.values().stream().map(Partition::getName).collect(Collectors.toList());
            getMSC().addDynamicPartitions(parentSession.getTxnMgr().getCurrentTxnId(), writeId, tbl.getDbName(), tbl.getTableName(), partNames, AcidUtils.toDataOperationType(operation));
        }
        LOG.info("Loaded " + result.size() + "partitionsToAdd");
        perfLogger.perfLogEnd("MoveTask", PerfLogger.LOAD_DYNAMIC_PARTITIONS);
        return result;
    } catch (TException te) {
        LOG.error("Failed loadDynamicPartitions", te);
        throw new HiveException("Exception updating metastore for acid table " + tbd.getTable().getTableName() + " with partitions " + result.values(), te);
    }
}
Also used : MetaStoreUtils.getDefaultCatalog(org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog) CALCITE(org.apache.hadoop.hive.ql.metadata.HiveRelOptMaterialization.RewriteAlgorithm.CALCITE) FileSystem(org.apache.hadoop.fs.FileSystem) FileStatus(org.apache.hadoop.fs.FileStatus) FunctionRegistry(org.apache.hadoop.hive.ql.exec.FunctionRegistry) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) Future(java.util.concurrent.Future) StatsSetupConst(org.apache.hadoop.hive.common.StatsSetupConst) Pair(org.apache.commons.lang3.tuple.Pair) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) GetPartitionRequest(org.apache.hadoop.hive.metastore.api.GetPartitionRequest) DefaultConstraintsRequest(org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest) EnumSet(java.util.EnumSet) SQLAllTableConstraints(org.apache.hadoop.hive.metastore.api.SQLAllTableConstraints) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) HadoopShims(org.apache.hadoop.hive.shims.HadoopShims) FileChecksum(org.apache.hadoop.fs.FileChecksum) CheckConstraintsRequest(org.apache.hadoop.hive.metastore.api.CheckConstraintsRequest) WMMapping(org.apache.hadoop.hive.metastore.api.WMMapping) HiveMaterializedViewUtils(org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewUtils) AcidUtils.getFullTableName(org.apache.hadoop.hive.ql.io.AcidUtils.getFullTableName) SetPartitionsStatsRequest(org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest) MetastoreConf(org.apache.hadoop.hive.metastore.conf.MetastoreConf) ForeignKeysRequest(org.apache.hadoop.hive.metastore.api.ForeignKeysRequest) FileUtils(org.apache.hadoop.hive.common.FileUtils) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) ErrorMsg(org.apache.hadoop.hive.ql.ErrorMsg) WMPool(org.apache.hadoop.hive.metastore.api.WMPool) HdfsUtils(org.apache.hadoop.hive.ql.io.HdfsUtils) InsertEventRequestData(org.apache.hadoop.hive.metastore.api.InsertEventRequestData) Role(org.apache.hadoop.hive.metastore.api.Role) CollectionUtils(org.apache.commons.collections4.CollectionUtils) CmRecycleRequest(org.apache.hadoop.hive.metastore.api.CmRecycleRequest) Lists(com.google.common.collect.Lists) MATERIALIZED_VIEW_REWRITING_TIME_WINDOW(org.apache.hadoop.hive.conf.Constants.MATERIALIZED_VIEW_REWRITING_TIME_WINDOW) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Unstable(org.apache.hadoop.hive.common.classification.InterfaceStability.Unstable) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) Constants(org.apache.hadoop.hive.conf.Constants) ReplChangeManager(org.apache.hadoop.hive.metastore.ReplChangeManager) GetOpenTxnsInfoResponse(org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse) LazySimpleSerDe(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe) AllTableConstraintsRequest(org.apache.hadoop.hive.metastore.api.AllTableConstraintsRequest) EnvironmentContext(org.apache.hadoop.hive.metastore.api.EnvironmentContext) IOException(java.io.IOException) PrincipalType(org.apache.hadoop.hive.metastore.api.PrincipalType) LoadFileType(org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType) UnknownHostException(java.net.UnknownHostException) ExecutionException(java.util.concurrent.ExecutionException) PartitionWithoutSD(org.apache.hadoop.hive.metastore.api.PartitionWithoutSD) MetaStoreServerUtils(org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils) Deserializer(org.apache.hadoop.hive.serde2.Deserializer) Preconditions(com.google.common.base.Preconditions) org.apache.hadoop.hive.metastore.api.hive_metastoreConstants(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) ValidWriteIdList(org.apache.hadoop.hive.common.ValidWriteIdList) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) PrimaryKeysRequest(org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest) ConfVars(org.apache.hadoop.hive.conf.HiveConf.ConfVars) WMFullResourcePlan(org.apache.hadoop.hive.metastore.api.WMFullResourcePlan) HiveMetaException(org.apache.hadoop.hive.metastore.HiveMetaException) SerializationUtilities(org.apache.hadoop.hive.ql.exec.SerializationUtilities) HiveObjectType(org.apache.hadoop.hive.metastore.api.HiveObjectType) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) CompactionType(org.apache.hadoop.hive.metastore.api.CompactionType) GetPartitionNamesPsResponse(org.apache.hadoop.hive.metastore.api.GetPartitionNamesPsResponse) ColumnStatisticsDesc(org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc) SynchronizedMetaStoreClient(org.apache.hadoop.hive.metastore.SynchronizedMetaStoreClient) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) InPlaceUpdate(org.apache.hadoop.hive.common.log.InPlaceUpdate) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) PerfLogger(org.apache.hadoop.hive.ql.log.PerfLogger) GetPartitionsByNamesRequest(org.apache.hadoop.hive.metastore.api.GetPartitionsByNamesRequest) WMNullableResourcePlan(org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) FireEventRequest(org.apache.hadoop.hive.metastore.api.FireEventRequest) SQLPrimaryKey(org.apache.hadoop.hive.metastore.api.SQLPrimaryKey) Collectors(java.util.stream.Collectors) SessionState(org.apache.hadoop.hive.ql.session.SessionState) CompactionResponse(org.apache.hadoop.hive.metastore.api.CompactionResponse) IMetaStoreClient(org.apache.hadoop.hive.metastore.IMetaStoreClient) DataConnector(org.apache.hadoop.hive.metastore.api.DataConnector) Entry(java.util.Map.Entry) RetryingMetaStoreClient(org.apache.hadoop.hive.metastore.RetryingMetaStoreClient) AcidUtils(org.apache.hadoop.hive.ql.io.AcidUtils) FilenameUtils(org.apache.commons.io.FilenameUtils) MoreExecutors(com.google.common.util.concurrent.MoreExecutors) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) WMNullablePool(org.apache.hadoop.hive.metastore.api.WMNullablePool) PathFilter(org.apache.hadoop.fs.PathFilter) WriteNotificationLogBatchRequest(org.apache.hadoop.hive.metastore.api.WriteNotificationLogBatchRequest) HashSet(java.util.HashSet) ListBucketingPrunerUtils(org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPrunerUtils) PartitionsByExprRequest(org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest) SERIALIZATION_FORMAT(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT) ImmutableList(com.google.common.collect.ImmutableList) UpdateTransactionalStatsRequest(org.apache.hadoop.hive.metastore.api.UpdateTransactionalStatsRequest) StringUtils(org.apache.hadoop.util.StringUtils) LinkedList(java.util.LinkedList) TApplicationException(org.apache.thrift.TApplicationException) ExecutorService(java.util.concurrent.ExecutorService) AbstractFileMergeOperator(org.apache.hadoop.hive.ql.exec.AbstractFileMergeOperator) PrintStream(java.io.PrintStream) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Logger(org.slf4j.Logger) HiveMetaHookLoader(org.apache.hadoop.hive.metastore.HiveMetaHookLoader) HiveConf(org.apache.hadoop.hive.conf.HiveConf) HiveVersionInfo(org.apache.hive.common.util.HiveVersionInfo) HIVE_LOAD_DYNAMIC_PARTITIONS_SCAN_SPECIFIC_PARTITIONS(org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_LOAD_DYNAMIC_PARTITIONS_SCAN_SPECIFIC_PARTITIONS) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) ShimLoader(org.apache.hadoop.hive.shims.ShimLoader) BitSet(java.util.BitSet) PartitionDropOptions(org.apache.hadoop.hive.metastore.PartitionDropOptions) NotNullConstraintsRequest(org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest) Arrays(java.util.Arrays) WMResourcePlan(org.apache.hadoop.hive.metastore.api.WMResourcePlan) GetPartitionResponse(org.apache.hadoop.hive.metastore.api.GetPartitionResponse) CreateTableAutomaticGrant(org.apache.hadoop.hive.ql.session.CreateTableAutomaticGrant) HIVE_WRITE_NOTIFICATION_MAX_BATCH_SIZE(org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_WRITE_NOTIFICATION_MAX_BATCH_SIZE) InetAddress(java.net.InetAddress) UniqueConstraintsRequest(org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) HiveMaterializedViewUtils.extractTable(org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewUtils.extractTable) GetPartitionsPsWithAuthResponse(org.apache.hadoop.hive.metastore.api.GetPartitionsPsWithAuthResponse) Set(java.util.Set) STRING_TYPE_NAME(org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME) Executors(java.util.concurrent.Executors) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager) HiveMetaStoreClient(org.apache.hadoop.hive.metastore.HiveMetaStoreClient) Callable(java.util.concurrent.Callable) SQLForeignKey(org.apache.hadoop.hive.metastore.api.SQLForeignKey) FsAction(org.apache.hadoop.fs.permission.FsAction) ArrayList(java.util.ArrayList) LinkedHashSet(java.util.LinkedHashSet) Nullable(javax.annotation.Nullable) PrincipalPrivilegeSet(org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet) Materialization(org.apache.hadoop.hive.metastore.api.Materialization) GetPartitionsPsWithAuthRequest(org.apache.hadoop.hive.metastore.api.GetPartitionsPsWithAuthRequest) SOFT_DELETE_TABLE(org.apache.hadoop.hive.common.AcidConstants.SOFT_DELETE_TABLE) FunctionUtils(org.apache.hadoop.hive.ql.exec.FunctionUtils) TException(org.apache.thrift.TException) ValidTxnWriteIdList(org.apache.hadoop.hive.common.ValidTxnWriteIdList) ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) TableType(org.apache.hadoop.hive.metastore.TableType) HiveObjectRef(org.apache.hadoop.hive.metastore.api.HiveObjectRef) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) ExprNodeGenericFuncDesc(org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc) GetRoleGrantsForPrincipalResponse(org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse) LoggerFactory(org.slf4j.LoggerFactory) RolePrincipalGrant(org.apache.hadoop.hive.metastore.api.RolePrincipalGrant) ByteBuffer(java.nio.ByteBuffer) Warehouse(org.apache.hadoop.hive.metastore.Warehouse) ALL(org.apache.hadoop.hive.ql.metadata.HiveRelOptMaterialization.RewriteAlgorithm.ALL) HiveStatsUtils(org.apache.hadoop.hive.common.HiveStatsUtils) InputFormat(org.apache.hadoop.mapred.InputFormat) Path(org.apache.hadoop.fs.Path) ValidReaderWriteIdList(org.apache.hadoop.hive.common.ValidReaderWriteIdList) FireEventRequestData(org.apache.hadoop.hive.metastore.api.FireEventRequestData) Splitter(com.google.common.base.Splitter) PrivilegeBag(org.apache.hadoop.hive.metastore.api.PrivilegeBag) MetaStoreUtils(org.apache.hadoop.hive.metastore.utils.MetaStoreUtils) ImmutableMap(com.google.common.collect.ImmutableMap) ColumnStatisticsObj(org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj) HiveMetaHook(org.apache.hadoop.hive.metastore.HiveMetaHook) AggrStats(org.apache.hadoop.hive.metastore.api.AggrStats) Sets(com.google.common.collect.Sets) FileNotFoundException(java.io.FileNotFoundException) ColumnStatistics(org.apache.hadoop.hive.metastore.api.ColumnStatistics) List(java.util.List) DbTxnManager(org.apache.hadoop.hive.ql.lockmgr.DbTxnManager) DFSUtilClient(org.apache.hadoop.hdfs.DFSUtilClient) MetadataPpdResult(org.apache.hadoop.hive.metastore.api.MetadataPpdResult) Optional(java.util.Optional) HiveMetaStoreUtils(org.apache.hadoop.hive.metastore.HiveMetaStoreUtils) PartitionSpec(org.apache.hadoop.hive.metastore.api.PartitionSpec) HiveObjectPrivilege(org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) GetRoleGrantsForPrincipalRequest(org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest) HashMap(java.util.HashMap) GetTableRequest(org.apache.hadoop.hive.metastore.api.GetTableRequest) LimitedPrivate(org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate) Utilities(org.apache.hadoop.hive.ql.exec.Utilities) TableSnapshot(org.apache.hadoop.hive.ql.io.AcidUtils.TableSnapshot) JDODataStoreException(javax.jdo.JDODataStoreException) ObjectUtils(org.apache.commons.lang3.ObjectUtils) TableName(org.apache.hadoop.hive.common.TableName) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) WMTrigger(org.apache.hadoop.hive.metastore.api.WMTrigger) Iterator(java.util.Iterator) WMValidateResourcePlanResponse(org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse) PartitionDetails(org.apache.hadoop.hive.ql.exec.Utilities.PartitionDetails) SkewedInfo(org.apache.hadoop.hive.metastore.api.SkewedInfo) Maps(com.google.common.collect.Maps) TimeUnit(java.util.concurrent.TimeUnit) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) META_TABLE_STORAGE(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE) GetPartitionNamesPsRequest(org.apache.hadoop.hive.metastore.api.GetPartitionNamesPsRequest) WriteNotificationLogRequest(org.apache.hadoop.hive.metastore.api.WriteNotificationLogRequest) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Collections(java.util.Collections) Database(org.apache.hadoop.hive.metastore.api.Database) Function(org.apache.hadoop.hive.metastore.api.Function) TException(org.apache.thrift.TException) SessionState(org.apache.hadoop.hive.ql.session.SessionState) FileStatus(org.apache.hadoop.fs.FileStatus) TableSnapshot(org.apache.hadoop.hive.ql.io.AcidUtils.TableSnapshot) PerfLogger(org.apache.hadoop.hive.ql.log.PerfLogger) ArrayList(java.util.ArrayList) Callable(java.util.concurrent.Callable) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) ExecutionException(java.util.concurrent.ExecutionException) Path(org.apache.hadoop.fs.Path) PrintStream(java.io.PrintStream) HiveMaterializedViewUtils.extractTable(org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewUtils.extractTable) LinkedList(java.util.LinkedList) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) ExecutionException(java.util.concurrent.ExecutionException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HiveMetaException(org.apache.hadoop.hive.metastore.HiveMetaException) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) TApplicationException(org.apache.thrift.TApplicationException) TException(org.apache.thrift.TException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) FileNotFoundException(java.io.FileNotFoundException) JDODataStoreException(javax.jdo.JDODataStoreException) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) PartitionDetails(org.apache.hadoop.hive.ql.exec.Utilities.PartitionDetails) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ExecutorService(java.util.concurrent.ExecutorService) WriteNotificationLogRequest(org.apache.hadoop.hive.metastore.api.WriteNotificationLogRequest) Future(java.util.concurrent.Future) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) AcidUtils(org.apache.hadoop.hive.ql.io.AcidUtils)

Aggregations

VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 Preconditions (com.google.common.base.Preconditions)1 Splitter (com.google.common.base.Splitter)1 ImmutableList (com.google.common.collect.ImmutableList)1 ImmutableMap (com.google.common.collect.ImmutableMap)1 Lists (com.google.common.collect.Lists)1 Maps (com.google.common.collect.Maps)1 Sets (com.google.common.collect.Sets)1 MoreExecutors (com.google.common.util.concurrent.MoreExecutors)1 ThreadFactoryBuilder (com.google.common.util.concurrent.ThreadFactoryBuilder)1 FileNotFoundException (java.io.FileNotFoundException)1 IOException (java.io.IOException)1 PrintStream (java.io.PrintStream)1 InetAddress (java.net.InetAddress)1 UnknownHostException (java.net.UnknownHostException)1 ByteBuffer (java.nio.ByteBuffer)1 ArrayList (java.util.ArrayList)1 Arrays (java.util.Arrays)1 BitSet (java.util.BitSet)1 Collections (java.util.Collections)1