Search in sources :

Example 16 with PartitionSpec

use of org.apache.hadoop.hive.metastore.api.PartitionSpec in project hive by apache.

the class TestMetaStoreServerUtils method testGetPartitionspecsGroupedBySDonePartitionCombined.

/**
 * Test getPartitionspecsGroupedByStorageDescriptor() multiple partitions:
 * <ul>
 *   <li>Partition with null SD</li>
 *   <li>Two partitions under the table location</li>
 *   <li>One partition outside of table location</li>
 * </ul>
 */
@Test
public void testGetPartitionspecsGroupedBySDonePartitionCombined() throws MetaException {
    // Create database and table
    String sharedInputFormat = "foo1";
    Table tbl = new TableBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).addCol("id", "int").setLocation("/foo").build(null);
    Partition p1 = new PartitionBuilder().setDbName("DB_NAME").setTableName(TABLE_NAME).setLocation("/foo/bar").addCol("a1", "int").addValue("val1").setInputFormat(sharedInputFormat).build(null);
    Partition p2 = new PartitionBuilder().setDbName("DB_NAME").setTableName(TABLE_NAME).setLocation("/a/b").addCol("a2", "int").addValue("val2").setInputFormat("foo2").build(null);
    Partition p3 = new PartitionBuilder().setDbName("DB_NAME").setTableName(TABLE_NAME).addCol("a3", "int").addValue("val3").setInputFormat("foo3").build(null);
    Partition p4 = new PartitionBuilder().setDbName("DB_NAME").setTableName("TABLE_NAME").setLocation("/foo/baz").addCol("a1", "int").addValue("val4").setInputFormat(sharedInputFormat).build(null);
    p3.unsetSd();
    List<PartitionSpec> result = MetaStoreServerUtils.getPartitionspecsGroupedByStorageDescriptor(tbl, Arrays.asList(p1, p2, p3, p4));
    assertThat(result.size(), is(3));
    PartitionSpec ps1 = result.get(0);
    assertThat(ps1.getRootPath(), is((String) null));
    assertThat(ps1.getPartitionList(), is((List<Partition>) null));
    PartitionSpecWithSharedSD partSpec = ps1.getSharedSDPartitionSpec();
    List<PartitionWithoutSD> partitions1 = partSpec.getPartitions();
    assertThat(partitions1.size(), is(1));
    PartitionWithoutSD partition1 = partitions1.get(0);
    assertThat(partition1.getRelativePath(), is((String) null));
    assertThat(partition1.getValues(), is(Collections.singletonList("val3")));
    PartitionSpec ps2 = result.get(1);
    assertThat(ps2.getRootPath(), is(tbl.getSd().getLocation()));
    assertThat(ps2.getPartitionList(), is((List<Partition>) null));
    List<PartitionWithoutSD> partitions2 = ps2.getSharedSDPartitionSpec().getPartitions();
    assertThat(partitions2.size(), is(2));
    PartitionWithoutSD partition2_1 = partitions2.get(0);
    PartitionWithoutSD partition2_2 = partitions2.get(1);
    if (partition2_1.getRelativePath().equals("baz")) {
        // Swap p2_1 and p2_2
        PartitionWithoutSD tmp = partition2_1;
        partition2_1 = partition2_2;
        partition2_2 = tmp;
    }
    assertThat(partition2_1.getRelativePath(), is("/bar"));
    assertThat(partition2_1.getValues(), is(Collections.singletonList("val1")));
    assertThat(partition2_2.getRelativePath(), is("/baz"));
    assertThat(partition2_2.getValues(), is(Collections.singletonList("val4")));
    PartitionSpec ps4 = result.get(2);
    assertThat(ps4.getRootPath(), is((String) null));
    assertThat(ps4.getSharedSDPartitionSpec(), is((PartitionSpecWithSharedSD) null));
    List<Partition> partitions = ps4.getPartitionList().getPartitions();
    assertThat(partitions.size(), is(1));
    Partition partition = partitions.get(0);
    assertThat(partition.getSd().getLocation(), is("/a/b"));
    assertThat(partition.getValues(), is(Collections.singletonList("val2")));
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) PartitionBuilder(org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder) PartitionWithoutSD(org.apache.hadoop.hive.metastore.api.PartitionWithoutSD) List(java.util.List) TableBuilder(org.apache.hadoop.hive.metastore.client.builder.TableBuilder) PartitionSpec(org.apache.hadoop.hive.metastore.api.PartitionSpec) PartitionSpecWithSharedSD(org.apache.hadoop.hive.metastore.api.PartitionSpecWithSharedSD) MetastoreUnitTest(org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest) Test(org.junit.Test)

Example 17 with PartitionSpec

use of org.apache.hadoop.hive.metastore.api.PartitionSpec in project metacat by Netflix.

the class CatalogThriftHiveMetastore method get_part_specs_by_filter.

/**
 * {@inheritDoc}
 */
@Override
public List<PartitionSpec> get_part_specs_by_filter(final String dbName, final String tblName, final String filter, final int maxParts) throws TException {
    // TODO: Handle the use case of grouping
    return requestWrapper("get_partitions_pspec", new Object[] { dbName, tblName, filter, maxParts }, () -> {
        final String databaseName = normalizeIdentifier(dbName);
        final String tableName = normalizeIdentifier(tblName);
        final TableDto tableDto = v1.getTable(catalogName, databaseName, tableName, true, false, false);
        final GetPartitionsRequestDto dto = new GetPartitionsRequestDto(filter, null, true, false);
        final List<PartitionDto> metacatPartitions = partV1.getPartitionsForRequest(catalogName, dbName, tblName, null, null, null, maxParts, false, dto);
        final List<Partition> partitions = Lists.newArrayListWithCapacity(metacatPartitions.size());
        for (PartitionDto partition : metacatPartitions) {
            partitions.add(hiveConverters.metacatToHivePartition(partition, tableDto));
        }
        final PartitionSpec pSpec = new PartitionSpec();
        pSpec.setPartitionList(new PartitionListComposingSpec(partitions));
        pSpec.setDbName(dbName);
        pSpec.setTableName(tblName);
        if (tableDto != null && tableDto.getSerde() != null) {
            pSpec.setRootPath(tableDto.getSerde().getUri());
        }
        return Arrays.asList(pSpec);
    });
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) PartitionListComposingSpec(org.apache.hadoop.hive.metastore.api.PartitionListComposingSpec) PartitionDto(com.netflix.metacat.common.dto.PartitionDto) GetPartitionsRequestDto(com.netflix.metacat.common.dto.GetPartitionsRequestDto) TableDto(com.netflix.metacat.common.dto.TableDto) PartitionSpec(org.apache.hadoop.hive.metastore.api.PartitionSpec)

Example 18 with PartitionSpec

use of org.apache.hadoop.hive.metastore.api.PartitionSpec in project hive by apache.

the class SessionHiveMetaStoreClient method getPartitionSpecProxy.

private PartitionSpecProxy getPartitionSpecProxy(org.apache.hadoop.hive.metastore.api.Table table, List<Partition> partitions, int maxParts) throws MetaException {
    List<PartitionSpec> partitionSpecs;
    PartitionSpec partitionSpec = new PartitionSpec();
    PartitionListComposingSpec partitionListComposingSpec = new PartitionListComposingSpec(new ArrayList<>());
    for (int i = 0; i < ((maxParts < 0 || maxParts > partitions.size()) ? partitions.size() : maxParts); i++) {
        partitionListComposingSpec.addToPartitions(deepCopy(partitions.get(i)));
    }
    partitionSpec.setCatName(table.getCatName());
    partitionSpec.setDbName(table.getDbName());
    partitionSpec.setTableName(table.getTableName());
    partitionSpec.setRootPath(table.getSd().getLocation());
    partitionSpec.setPartitionList(partitionListComposingSpec);
    partitionSpecs = Arrays.asList(partitionSpec);
    return PartitionSpecProxy.Factory.get(partitionSpecs);
}
Also used : PartitionListComposingSpec(org.apache.hadoop.hive.metastore.api.PartitionListComposingSpec) PartitionSpec(org.apache.hadoop.hive.metastore.api.PartitionSpec)

Example 19 with PartitionSpec

use of org.apache.hadoop.hive.metastore.api.PartitionSpec in project hive by apache.

the class Hive method loadDynamicPartitions.

/**
 * Given a source directory name of the load path, load all dynamically generated partitions
 * into the specified table and return a list of strings that represent the dynamic partition
 * paths.
 * @param tbd table descriptor
 * @param numLB number of buckets
 * @param isAcid true if this is an ACID operation
 * @param writeId writeId, can be 0 unless isAcid == true
 * @param stmtId statementId
 * @param resetStatistics if true, reset statistics. Do not reset statistics otherwise.
 * @param operation ACID operation type
 * @param partitionDetailsMap full dynamic partition specification
 * @return partition map details (PartitionSpec and Partition)
 * @throws HiveException
 */
public Map<Map<String, String>, Partition> loadDynamicPartitions(final LoadTableDesc tbd, final int numLB, final boolean isAcid, final long writeId, final int stmtId, final boolean resetStatistics, final AcidUtils.Operation operation, Map<Path, PartitionDetails> partitionDetailsMap) throws HiveException {
    PerfLogger perfLogger = SessionState.getPerfLogger();
    perfLogger.perfLogBegin("MoveTask", PerfLogger.LOAD_DYNAMIC_PARTITIONS);
    final Path loadPath = tbd.getSourcePath();
    final Table tbl = getTable(tbd.getTable().getTableName());
    final Map<String, String> partSpec = tbd.getPartitionSpec();
    final AtomicInteger partitionsLoaded = new AtomicInteger(0);
    final boolean inPlaceEligible = conf.getLong("fs.trash.interval", 0) <= 0 && InPlaceUpdate.canRenderInPlace(conf) && !SessionState.getConsole().getIsSilent();
    final PrintStream ps = (inPlaceEligible) ? SessionState.getConsole().getInfoStream() : null;
    final SessionState parentSession = SessionState.get();
    List<Callable<Partition>> tasks = Lists.newLinkedList();
    boolean fetchPartitionInfo = true;
    final boolean scanPartitionsByName = HiveConf.getBoolVar(conf, HIVE_LOAD_DYNAMIC_PARTITIONS_SCAN_SPECIFIC_PARTITIONS);
    // for every dynamic partition
    if (scanPartitionsByName && !tbd.isDirectInsert() && !AcidUtils.isTransactionalTable(tbl)) {
        // Fetch only relevant partitions from HMS for checking old partitions
        List<String> partitionNames = new LinkedList<>();
        for (PartitionDetails details : partitionDetailsMap.values()) {
            if (details.fullSpec != null && !details.fullSpec.isEmpty()) {
                partitionNames.add(Warehouse.makeDynamicPartNameNoTrailingSeperator(details.fullSpec));
            }
        }
        List<Partition> partitions = Hive.get().getPartitionsByNames(tbl, partitionNames);
        for (Partition partition : partitions) {
            LOG.debug("HMS partition spec: {}", partition.getSpec());
            partitionDetailsMap.entrySet().parallelStream().filter(entry -> entry.getValue().fullSpec.equals(partition.getSpec())).findAny().ifPresent(entry -> {
                entry.getValue().partition = partition;
                entry.getValue().hasOldPartition = true;
            });
        }
        // no need to fetch partition again in tasks since we have already fetched partitions
        // info in getPartitionsByNames()
        fetchPartitionInfo = false;
    }
    boolean isTxnTable = AcidUtils.isTransactionalTable(tbl);
    AcidUtils.TableSnapshot tableSnapshot = isTxnTable ? getTableSnapshot(tbl, writeId) : null;
    for (Entry<Path, PartitionDetails> entry : partitionDetailsMap.entrySet()) {
        boolean getPartitionFromHms = fetchPartitionInfo;
        tasks.add(() -> {
            PartitionDetails partitionDetails = entry.getValue();
            Map<String, String> fullPartSpec = partitionDetails.fullSpec;
            try {
                SessionState.setCurrentSessionState(parentSession);
                if (getPartitionFromHms) {
                    // didn't fetch partition info from HMS. Getting from HMS now.
                    Partition existing = getPartition(tbl, fullPartSpec, false);
                    if (existing != null) {
                        partitionDetails.partition = existing;
                        partitionDetails.hasOldPartition = true;
                    }
                }
                LOG.info("New loading path = " + entry.getKey() + " withPartSpec " + fullPartSpec);
                Partition oldPartition = partitionDetails.partition;
                List<FileStatus> newFiles = null;
                if (partitionDetails.newFiles != null) {
                    // If we already know the files from the direct insert manifest, use them
                    newFiles = partitionDetails.newFiles;
                } else if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary() && oldPartition == null) {
                    // Otherwise only collect them, if we are going to fire write notifications
                    newFiles = Collections.synchronizedList(new ArrayList<>());
                }
                // load the partition
                Partition partition = loadPartitionInternal(entry.getKey(), tbl, fullPartSpec, oldPartition, tbd.getLoadFileType(), true, false, numLB > 0, false, isAcid, resetStatistics, writeId, stmtId, tbd.isInsertOverwrite(), isTxnTable, newFiles, tbd.isDirectInsert());
                // metastore
                if (tableSnapshot != null) {
                    partition.getTPartition().setWriteId(tableSnapshot.getWriteId());
                }
                partitionDetails.tableSnapshot = tableSnapshot;
                if (oldPartition == null) {
                    partitionDetails.newFiles = newFiles;
                    partitionDetails.partition = partition;
                }
                if (inPlaceEligible) {
                    synchronized (ps) {
                        InPlaceUpdate.rePositionCursor(ps);
                        partitionsLoaded.incrementAndGet();
                        InPlaceUpdate.reprintLine(ps, "Loaded : " + partitionsLoaded.get() + "/" + partitionDetailsMap.size() + " partitions.");
                    }
                }
                return partition;
            } catch (Exception e) {
                LOG.error("Exception when loading partition with parameters " + " partPath=" + entry.getKey() + ", " + " table=" + tbl.getTableName() + ", " + " partSpec=" + fullPartSpec + ", " + " loadFileType=" + tbd.getLoadFileType().toString() + ", " + " listBucketingLevel=" + numLB + ", " + " isAcid=" + isAcid + ", " + " resetStatistics=" + resetStatistics, e);
                throw e;
            } finally {
                // get(conf).getMSC can be called in this task, Close the HMS connection right after use, do not wait for finalizer to close it.
                closeCurrent();
            }
        });
    }
    int poolSize = conf.getInt(ConfVars.HIVE_LOAD_DYNAMIC_PARTITIONS_THREAD_COUNT.varname, 1);
    ExecutorService executor = Executors.newFixedThreadPool(poolSize, new ThreadFactoryBuilder().setDaemon(true).setNameFormat("load-dynamic-partitionsToAdd-%d").build());
    List<Future<Partition>> futures = Lists.newLinkedList();
    Map<Map<String, String>, Partition> result = Maps.newLinkedHashMap();
    try {
        futures = executor.invokeAll(tasks);
        LOG.info("Number of partitionsToAdd to be added is " + futures.size());
        for (Future<Partition> future : futures) {
            Partition partition = future.get();
            result.put(partition.getSpec(), partition);
        }
        // add new partitions in batch
        addPartitionsToMetastore(partitionDetailsMap.entrySet().stream().filter(entry -> !entry.getValue().hasOldPartition).map(entry -> entry.getValue().partition).collect(Collectors.toList()), resetStatistics, tbl, partitionDetailsMap.entrySet().stream().filter(entry -> !entry.getValue().hasOldPartition).map(entry -> entry.getValue().tableSnapshot).collect(Collectors.toList()));
        // For acid table, add the acid_write event with file list at the time of load itself. But
        // it should be done after partition is created.
        List<WriteNotificationLogRequest> requestList = new ArrayList<>();
        int maxBatchSize = conf.getIntVar(HIVE_WRITE_NOTIFICATION_MAX_BATCH_SIZE);
        for (Entry<Path, PartitionDetails> entry : partitionDetailsMap.entrySet()) {
            PartitionDetails partitionDetails = entry.getValue();
            if (isTxnTable && partitionDetails.newFiles != null) {
                addWriteNotificationLog(tbl, partitionDetails.fullSpec, partitionDetails.newFiles, writeId, requestList);
                if (requestList != null && requestList.size() >= maxBatchSize) {
                    // If the first call returns that the HMS does not supports batching, avoid batching
                    // for later requests.
                    boolean batchSupported = addWriteNotificationLogInBatch(tbl, requestList);
                    if (batchSupported) {
                        requestList.clear();
                    } else {
                        requestList = null;
                    }
                }
            }
        }
        if (requestList != null && requestList.size() > 0) {
            addWriteNotificationLogInBatch(tbl, requestList);
        }
        setStatsPropAndAlterPartitions(resetStatistics, tbl, partitionDetailsMap.entrySet().stream().filter(entry -> entry.getValue().hasOldPartition).map(entry -> entry.getValue().partition).collect(Collectors.toList()), tableSnapshot);
    } catch (InterruptedException | ExecutionException e) {
        throw new HiveException("Exception when loading " + partitionDetailsMap.size() + " partitions" + " in table " + tbl.getTableName() + " with loadPath=" + loadPath, e);
    } catch (TException e) {
        LOG.error("Failed loadDynamicPartitions", e);
        throw new HiveException(e);
    } catch (Exception e) {
        StringBuffer logMsg = new StringBuffer();
        logMsg.append("Exception when loading partitionsToAdd with parameters ");
        logMsg.append("partPaths=");
        partitionDetailsMap.keySet().forEach(path -> logMsg.append(path + ", "));
        logMsg.append("table=" + tbl.getTableName() + ", ").append("partSpec=" + partSpec + ", ").append("loadFileType=" + tbd.getLoadFileType().toString() + ", ").append("listBucketingLevel=" + numLB + ", ").append("isAcid=" + isAcid + ", ").append("resetStatistics=" + resetStatistics);
        LOG.error(logMsg.toString(), e);
        throw e;
    } finally {
        LOG.debug("Cancelling " + futures.size() + " dynamic loading tasks");
        executor.shutdownNow();
    }
    if (HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) && HiveConf.getBoolVar(conf, ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION)) {
        throw new HiveException(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION.name() + "=true");
    }
    try {
        if (isTxnTable) {
            List<String> partNames = result.values().stream().map(Partition::getName).collect(Collectors.toList());
            getMSC().addDynamicPartitions(parentSession.getTxnMgr().getCurrentTxnId(), writeId, tbl.getDbName(), tbl.getTableName(), partNames, AcidUtils.toDataOperationType(operation));
        }
        LOG.info("Loaded " + result.size() + "partitionsToAdd");
        perfLogger.perfLogEnd("MoveTask", PerfLogger.LOAD_DYNAMIC_PARTITIONS);
        return result;
    } catch (TException te) {
        LOG.error("Failed loadDynamicPartitions", te);
        throw new HiveException("Exception updating metastore for acid table " + tbd.getTable().getTableName() + " with partitions " + result.values(), te);
    }
}
Also used : MetaStoreUtils.getDefaultCatalog(org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog) CALCITE(org.apache.hadoop.hive.ql.metadata.HiveRelOptMaterialization.RewriteAlgorithm.CALCITE) FileSystem(org.apache.hadoop.fs.FileSystem) FileStatus(org.apache.hadoop.fs.FileStatus) FunctionRegistry(org.apache.hadoop.hive.ql.exec.FunctionRegistry) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) Future(java.util.concurrent.Future) StatsSetupConst(org.apache.hadoop.hive.common.StatsSetupConst) Pair(org.apache.commons.lang3.tuple.Pair) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) GetPartitionRequest(org.apache.hadoop.hive.metastore.api.GetPartitionRequest) DefaultConstraintsRequest(org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest) EnumSet(java.util.EnumSet) SQLAllTableConstraints(org.apache.hadoop.hive.metastore.api.SQLAllTableConstraints) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) HadoopShims(org.apache.hadoop.hive.shims.HadoopShims) FileChecksum(org.apache.hadoop.fs.FileChecksum) CheckConstraintsRequest(org.apache.hadoop.hive.metastore.api.CheckConstraintsRequest) WMMapping(org.apache.hadoop.hive.metastore.api.WMMapping) HiveMaterializedViewUtils(org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewUtils) AcidUtils.getFullTableName(org.apache.hadoop.hive.ql.io.AcidUtils.getFullTableName) SetPartitionsStatsRequest(org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest) MetastoreConf(org.apache.hadoop.hive.metastore.conf.MetastoreConf) ForeignKeysRequest(org.apache.hadoop.hive.metastore.api.ForeignKeysRequest) FileUtils(org.apache.hadoop.hive.common.FileUtils) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) ErrorMsg(org.apache.hadoop.hive.ql.ErrorMsg) WMPool(org.apache.hadoop.hive.metastore.api.WMPool) HdfsUtils(org.apache.hadoop.hive.ql.io.HdfsUtils) InsertEventRequestData(org.apache.hadoop.hive.metastore.api.InsertEventRequestData) Role(org.apache.hadoop.hive.metastore.api.Role) CollectionUtils(org.apache.commons.collections4.CollectionUtils) CmRecycleRequest(org.apache.hadoop.hive.metastore.api.CmRecycleRequest) Lists(com.google.common.collect.Lists) MATERIALIZED_VIEW_REWRITING_TIME_WINDOW(org.apache.hadoop.hive.conf.Constants.MATERIALIZED_VIEW_REWRITING_TIME_WINDOW) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Unstable(org.apache.hadoop.hive.common.classification.InterfaceStability.Unstable) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) Constants(org.apache.hadoop.hive.conf.Constants) ReplChangeManager(org.apache.hadoop.hive.metastore.ReplChangeManager) GetOpenTxnsInfoResponse(org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse) LazySimpleSerDe(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe) AllTableConstraintsRequest(org.apache.hadoop.hive.metastore.api.AllTableConstraintsRequest) EnvironmentContext(org.apache.hadoop.hive.metastore.api.EnvironmentContext) IOException(java.io.IOException) PrincipalType(org.apache.hadoop.hive.metastore.api.PrincipalType) LoadFileType(org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType) UnknownHostException(java.net.UnknownHostException) ExecutionException(java.util.concurrent.ExecutionException) PartitionWithoutSD(org.apache.hadoop.hive.metastore.api.PartitionWithoutSD) MetaStoreServerUtils(org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils) Deserializer(org.apache.hadoop.hive.serde2.Deserializer) Preconditions(com.google.common.base.Preconditions) org.apache.hadoop.hive.metastore.api.hive_metastoreConstants(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) ValidWriteIdList(org.apache.hadoop.hive.common.ValidWriteIdList) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) PrimaryKeysRequest(org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest) ConfVars(org.apache.hadoop.hive.conf.HiveConf.ConfVars) WMFullResourcePlan(org.apache.hadoop.hive.metastore.api.WMFullResourcePlan) HiveMetaException(org.apache.hadoop.hive.metastore.HiveMetaException) SerializationUtilities(org.apache.hadoop.hive.ql.exec.SerializationUtilities) HiveObjectType(org.apache.hadoop.hive.metastore.api.HiveObjectType) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) CompactionType(org.apache.hadoop.hive.metastore.api.CompactionType) GetPartitionNamesPsResponse(org.apache.hadoop.hive.metastore.api.GetPartitionNamesPsResponse) ColumnStatisticsDesc(org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc) SynchronizedMetaStoreClient(org.apache.hadoop.hive.metastore.SynchronizedMetaStoreClient) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) InPlaceUpdate(org.apache.hadoop.hive.common.log.InPlaceUpdate) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) PerfLogger(org.apache.hadoop.hive.ql.log.PerfLogger) GetPartitionsByNamesRequest(org.apache.hadoop.hive.metastore.api.GetPartitionsByNamesRequest) WMNullableResourcePlan(org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) FireEventRequest(org.apache.hadoop.hive.metastore.api.FireEventRequest) SQLPrimaryKey(org.apache.hadoop.hive.metastore.api.SQLPrimaryKey) Collectors(java.util.stream.Collectors) SessionState(org.apache.hadoop.hive.ql.session.SessionState) CompactionResponse(org.apache.hadoop.hive.metastore.api.CompactionResponse) IMetaStoreClient(org.apache.hadoop.hive.metastore.IMetaStoreClient) DataConnector(org.apache.hadoop.hive.metastore.api.DataConnector) Entry(java.util.Map.Entry) RetryingMetaStoreClient(org.apache.hadoop.hive.metastore.RetryingMetaStoreClient) AcidUtils(org.apache.hadoop.hive.ql.io.AcidUtils) FilenameUtils(org.apache.commons.io.FilenameUtils) MoreExecutors(com.google.common.util.concurrent.MoreExecutors) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) WMNullablePool(org.apache.hadoop.hive.metastore.api.WMNullablePool) PathFilter(org.apache.hadoop.fs.PathFilter) WriteNotificationLogBatchRequest(org.apache.hadoop.hive.metastore.api.WriteNotificationLogBatchRequest) HashSet(java.util.HashSet) ListBucketingPrunerUtils(org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPrunerUtils) PartitionsByExprRequest(org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest) SERIALIZATION_FORMAT(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT) ImmutableList(com.google.common.collect.ImmutableList) UpdateTransactionalStatsRequest(org.apache.hadoop.hive.metastore.api.UpdateTransactionalStatsRequest) StringUtils(org.apache.hadoop.util.StringUtils) LinkedList(java.util.LinkedList) TApplicationException(org.apache.thrift.TApplicationException) ExecutorService(java.util.concurrent.ExecutorService) AbstractFileMergeOperator(org.apache.hadoop.hive.ql.exec.AbstractFileMergeOperator) PrintStream(java.io.PrintStream) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Logger(org.slf4j.Logger) HiveMetaHookLoader(org.apache.hadoop.hive.metastore.HiveMetaHookLoader) HiveConf(org.apache.hadoop.hive.conf.HiveConf) HiveVersionInfo(org.apache.hive.common.util.HiveVersionInfo) HIVE_LOAD_DYNAMIC_PARTITIONS_SCAN_SPECIFIC_PARTITIONS(org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_LOAD_DYNAMIC_PARTITIONS_SCAN_SPECIFIC_PARTITIONS) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) ShimLoader(org.apache.hadoop.hive.shims.ShimLoader) BitSet(java.util.BitSet) PartitionDropOptions(org.apache.hadoop.hive.metastore.PartitionDropOptions) NotNullConstraintsRequest(org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest) Arrays(java.util.Arrays) WMResourcePlan(org.apache.hadoop.hive.metastore.api.WMResourcePlan) GetPartitionResponse(org.apache.hadoop.hive.metastore.api.GetPartitionResponse) CreateTableAutomaticGrant(org.apache.hadoop.hive.ql.session.CreateTableAutomaticGrant) HIVE_WRITE_NOTIFICATION_MAX_BATCH_SIZE(org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_WRITE_NOTIFICATION_MAX_BATCH_SIZE) InetAddress(java.net.InetAddress) UniqueConstraintsRequest(org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) HiveMaterializedViewUtils.extractTable(org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewUtils.extractTable) GetPartitionsPsWithAuthResponse(org.apache.hadoop.hive.metastore.api.GetPartitionsPsWithAuthResponse) Set(java.util.Set) STRING_TYPE_NAME(org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME) Executors(java.util.concurrent.Executors) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager) HiveMetaStoreClient(org.apache.hadoop.hive.metastore.HiveMetaStoreClient) Callable(java.util.concurrent.Callable) SQLForeignKey(org.apache.hadoop.hive.metastore.api.SQLForeignKey) FsAction(org.apache.hadoop.fs.permission.FsAction) ArrayList(java.util.ArrayList) LinkedHashSet(java.util.LinkedHashSet) Nullable(javax.annotation.Nullable) PrincipalPrivilegeSet(org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet) Materialization(org.apache.hadoop.hive.metastore.api.Materialization) GetPartitionsPsWithAuthRequest(org.apache.hadoop.hive.metastore.api.GetPartitionsPsWithAuthRequest) SOFT_DELETE_TABLE(org.apache.hadoop.hive.common.AcidConstants.SOFT_DELETE_TABLE) FunctionUtils(org.apache.hadoop.hive.ql.exec.FunctionUtils) TException(org.apache.thrift.TException) ValidTxnWriteIdList(org.apache.hadoop.hive.common.ValidTxnWriteIdList) ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) TableType(org.apache.hadoop.hive.metastore.TableType) HiveObjectRef(org.apache.hadoop.hive.metastore.api.HiveObjectRef) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) ExprNodeGenericFuncDesc(org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc) GetRoleGrantsForPrincipalResponse(org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse) LoggerFactory(org.slf4j.LoggerFactory) RolePrincipalGrant(org.apache.hadoop.hive.metastore.api.RolePrincipalGrant) ByteBuffer(java.nio.ByteBuffer) Warehouse(org.apache.hadoop.hive.metastore.Warehouse) ALL(org.apache.hadoop.hive.ql.metadata.HiveRelOptMaterialization.RewriteAlgorithm.ALL) HiveStatsUtils(org.apache.hadoop.hive.common.HiveStatsUtils) InputFormat(org.apache.hadoop.mapred.InputFormat) Path(org.apache.hadoop.fs.Path) ValidReaderWriteIdList(org.apache.hadoop.hive.common.ValidReaderWriteIdList) FireEventRequestData(org.apache.hadoop.hive.metastore.api.FireEventRequestData) Splitter(com.google.common.base.Splitter) PrivilegeBag(org.apache.hadoop.hive.metastore.api.PrivilegeBag) MetaStoreUtils(org.apache.hadoop.hive.metastore.utils.MetaStoreUtils) ImmutableMap(com.google.common.collect.ImmutableMap) ColumnStatisticsObj(org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj) HiveMetaHook(org.apache.hadoop.hive.metastore.HiveMetaHook) AggrStats(org.apache.hadoop.hive.metastore.api.AggrStats) Sets(com.google.common.collect.Sets) FileNotFoundException(java.io.FileNotFoundException) ColumnStatistics(org.apache.hadoop.hive.metastore.api.ColumnStatistics) List(java.util.List) DbTxnManager(org.apache.hadoop.hive.ql.lockmgr.DbTxnManager) DFSUtilClient(org.apache.hadoop.hdfs.DFSUtilClient) MetadataPpdResult(org.apache.hadoop.hive.metastore.api.MetadataPpdResult) Optional(java.util.Optional) HiveMetaStoreUtils(org.apache.hadoop.hive.metastore.HiveMetaStoreUtils) PartitionSpec(org.apache.hadoop.hive.metastore.api.PartitionSpec) HiveObjectPrivilege(org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) GetRoleGrantsForPrincipalRequest(org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest) HashMap(java.util.HashMap) GetTableRequest(org.apache.hadoop.hive.metastore.api.GetTableRequest) LimitedPrivate(org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate) Utilities(org.apache.hadoop.hive.ql.exec.Utilities) TableSnapshot(org.apache.hadoop.hive.ql.io.AcidUtils.TableSnapshot) JDODataStoreException(javax.jdo.JDODataStoreException) ObjectUtils(org.apache.commons.lang3.ObjectUtils) TableName(org.apache.hadoop.hive.common.TableName) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) WMTrigger(org.apache.hadoop.hive.metastore.api.WMTrigger) Iterator(java.util.Iterator) WMValidateResourcePlanResponse(org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse) PartitionDetails(org.apache.hadoop.hive.ql.exec.Utilities.PartitionDetails) SkewedInfo(org.apache.hadoop.hive.metastore.api.SkewedInfo) Maps(com.google.common.collect.Maps) TimeUnit(java.util.concurrent.TimeUnit) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) META_TABLE_STORAGE(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE) GetPartitionNamesPsRequest(org.apache.hadoop.hive.metastore.api.GetPartitionNamesPsRequest) WriteNotificationLogRequest(org.apache.hadoop.hive.metastore.api.WriteNotificationLogRequest) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Collections(java.util.Collections) Database(org.apache.hadoop.hive.metastore.api.Database) Function(org.apache.hadoop.hive.metastore.api.Function) TException(org.apache.thrift.TException) SessionState(org.apache.hadoop.hive.ql.session.SessionState) FileStatus(org.apache.hadoop.fs.FileStatus) TableSnapshot(org.apache.hadoop.hive.ql.io.AcidUtils.TableSnapshot) PerfLogger(org.apache.hadoop.hive.ql.log.PerfLogger) ArrayList(java.util.ArrayList) Callable(java.util.concurrent.Callable) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) ExecutionException(java.util.concurrent.ExecutionException) Path(org.apache.hadoop.fs.Path) PrintStream(java.io.PrintStream) HiveMaterializedViewUtils.extractTable(org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewUtils.extractTable) LinkedList(java.util.LinkedList) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) ExecutionException(java.util.concurrent.ExecutionException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HiveMetaException(org.apache.hadoop.hive.metastore.HiveMetaException) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) TApplicationException(org.apache.thrift.TApplicationException) TException(org.apache.thrift.TException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) FileNotFoundException(java.io.FileNotFoundException) JDODataStoreException(javax.jdo.JDODataStoreException) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) PartitionDetails(org.apache.hadoop.hive.ql.exec.Utilities.PartitionDetails) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ExecutorService(java.util.concurrent.ExecutorService) WriteNotificationLogRequest(org.apache.hadoop.hive.metastore.api.WriteNotificationLogRequest) Future(java.util.concurrent.Future) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) AcidUtils(org.apache.hadoop.hive.ql.io.AcidUtils)

Example 20 with PartitionSpec

use of org.apache.hadoop.hive.metastore.api.PartitionSpec in project hive by apache.

the class TestSessionHiveMetastoreClientListPartitionsTempTable method testListPartitionsSpecByExprHighMaxParts.

@Test
public void testListPartitionsSpecByExprHighMaxParts() throws Exception {
    Table t = createTable4PartColsParts(getClient()).table;
    TestMetastoreExpr.ExprBuilder e = new TestMetastoreExpr.ExprBuilder(TABLE_NAME);
    List<PartitionSpec> result = new ArrayList<>();
    PartitionsByExprRequest req = new PartitionsByExprRequest(DB_NAME, TABLE_NAME, ByteBuffer.wrap(SerializationUtilities.serializeExpressionToKryo(e.strCol("yyyy").val("2017").pred(">=", 2).build())));
    req.setMaxParts((short) 100);
    req.setId(t.getId());
    getClient().listPartitionsSpecByExpr(req, result);
    assertEquals(4, result.iterator().next().getSharedSDPartitionSpec().getPartitionsSize());
}
Also used : Table(org.apache.hadoop.hive.metastore.api.Table) PartitionsByExprRequest(org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest) ArrayList(java.util.ArrayList) PartitionSpec(org.apache.hadoop.hive.metastore.api.PartitionSpec) TestMetastoreExpr(org.apache.hadoop.hive.metastore.TestMetastoreExpr) Test(org.junit.Test) MetastoreCheckinTest(org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest)

Aggregations

PartitionSpec (org.apache.hadoop.hive.metastore.api.PartitionSpec)27 Test (org.junit.Test)14 Partition (org.apache.hadoop.hive.metastore.api.Partition)13 PartitionWithoutSD (org.apache.hadoop.hive.metastore.api.PartitionWithoutSD)12 ArrayList (java.util.ArrayList)11 MetastoreCheckinTest (org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest)10 PartitionSpecWithSharedSD (org.apache.hadoop.hive.metastore.api.PartitionSpecWithSharedSD)10 Table (org.apache.hadoop.hive.metastore.api.Table)9 PartitionListComposingSpec (org.apache.hadoop.hive.metastore.api.PartitionListComposingSpec)6 StorageDescriptor (org.apache.hadoop.hive.metastore.api.StorageDescriptor)6 PartitionsByExprRequest (org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest)5 List (java.util.List)4 MetastoreUnitTest (org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest)4 PartitionBuilder (org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder)4 TableBuilder (org.apache.hadoop.hive.metastore.client.builder.TableBuilder)4 PartitionSpecProxy (org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy)4 HashMap (java.util.HashMap)3 GetPartitionsRequest (org.apache.hadoop.hive.metastore.api.GetPartitionsRequest)3 GetPartitionsResponse (org.apache.hadoop.hive.metastore.api.GetPartitionsResponse)3 GetProjectionsSpec (org.apache.hadoop.hive.metastore.api.GetProjectionsSpec)3