Search in sources :

Example 16 with IndexFilter

use of org.apache.carbondata.core.index.IndexFilter in project carbondata by apache.

the class CarbonCompactionExecutor method processTableBlocks.

/**
 * For processing of the table blocks.
 *
 * @return Map of String with Carbon iterators
 * Map has 2 elements: UNSORTED and SORTED
 * Map(UNSORTED) = List of Iterators which yield sorted data
 * Map(Sorted) = List of Iterators which yield sorted data
 * In Range Column compaction we will have a Filter Expression to process
 */
public Map<String, List<RawResultIterator>> processTableBlocks(Configuration configuration, Expression filterExpr) throws IOException {
    Map<String, List<RawResultIterator>> resultList = new HashMap<>(2);
    resultList.put(CarbonCompactionUtil.UNSORTED_IDX, new ArrayList<RawResultIterator>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE));
    resultList.put(CarbonCompactionUtil.SORTED_IDX, new ArrayList<RawResultIterator>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE));
    List<TableBlockInfo> tableBlockInfos = null;
    QueryModelBuilder builder = null;
    if (null == filterExpr) {
        builder = new QueryModelBuilder(carbonTable).projectAllColumns().dataConverter(dataTypeConverter).enableForcedDetailRawQuery();
    } else {
        builder = new QueryModelBuilder(carbonTable).projectAllColumns().filterExpression(new IndexFilter(carbonTable, filterExpr)).dataConverter(dataTypeConverter).enableForcedDetailRawQuery().convertToRangeFilter(false);
    }
    if (enablePageLevelReaderForCompaction()) {
        builder.enableReadPageByPage();
    }
    queryModel = builder.build();
    // iterate each seg ID
    for (Map.Entry<String, TaskBlockInfo> taskMap : segmentMapping.entrySet()) {
        String segmentId = taskMap.getKey();
        List<DataFileFooter> listMetadata = dataFileMetadataSegMapping.get(segmentId);
        // for each segment get taskblock info
        TaskBlockInfo taskBlockInfo = taskMap.getValue();
        Set<String> taskBlockListMapping = taskBlockInfo.getTaskSet();
        // Check if block needs sorting or not
        boolean sortingRequired = !CarbonCompactionUtil.isSortedByCurrentSortColumns(carbonTable, listMetadata.get(0));
        for (String task : taskBlockListMapping) {
            tableBlockInfos = taskBlockInfo.getTableBlockInfoList(task);
            // during update there may be a chance that the cardinality may change within the segment
            // which may lead to failure while converting the row, so get all the blocks present in a
            // task and then split into multiple lists of same column values and create separate
            // RawResultIterator for each tableBlockInfo of same column values. If all the blocks have
            // same column values, then make a single RawResultIterator for all the blocks
            List<List<TableBlockInfo>> listOfTableBlocksBasedOnKeyLength = getListOfTableBlocksBasedOnColumnValueSize(tableBlockInfos);
            for (List<TableBlockInfo> tableBlockInfoList : listOfTableBlocksBasedOnKeyLength) {
                Collections.sort(tableBlockInfoList);
                LOGGER.info("for task -" + task + "- in segment id -" + segmentId + "- block size is -" + tableBlockInfos.size());
                queryModel.setTableBlockInfos(tableBlockInfoList);
                if (sortingRequired) {
                    resultList.get(CarbonCompactionUtil.UNSORTED_IDX).add(getRawResultIterator(configuration, segmentId, task, tableBlockInfoList));
                } else {
                    resultList.get(CarbonCompactionUtil.SORTED_IDX).add(getRawResultIterator(configuration, segmentId, task, tableBlockInfoList));
                }
            }
        }
    }
    return resultList;
}
Also used : TableBlockInfo(org.apache.carbondata.core.datastore.block.TableBlockInfo) RawResultIterator(org.apache.carbondata.core.scan.result.iterator.RawResultIterator) ColumnDriftRawResultIterator(org.apache.carbondata.core.scan.result.iterator.ColumnDriftRawResultIterator) HashMap(java.util.HashMap) QueryModelBuilder(org.apache.carbondata.core.scan.model.QueryModelBuilder) DataFileFooter(org.apache.carbondata.core.metadata.blocklet.DataFileFooter) ArrayList(java.util.ArrayList) List(java.util.List) IndexFilter(org.apache.carbondata.core.index.IndexFilter) TaskBlockInfo(org.apache.carbondata.core.datastore.block.TaskBlockInfo) HashMap(java.util.HashMap) Map(java.util.Map)

Example 17 with IndexFilter

use of org.apache.carbondata.core.index.IndexFilter in project carbondata by apache.

the class CarbonTableReader method getInputSplits.

/**
 * Get a carbon muti-block input splits
 *
 * @param tableCacheModel cached table
 * @param filters carbonData filters
 * @param filteredPartitions matched partitionSpec for the filter
 * @param config hadoop conf
 * @return list of multiblock split
 * @throws IOException
 */
public List<CarbonLocalMultiBlockSplit> getInputSplits(CarbonTableCacheModel tableCacheModel, Expression filters, List<PartitionSpec> filteredPartitions, Configuration config) throws IOException {
    List<CarbonLocalInputSplit> result = new ArrayList<>();
    List<CarbonLocalMultiBlockSplit> multiBlockSplitList = new ArrayList<>();
    CarbonTable carbonTable = tableCacheModel.getCarbonTable();
    TableInfo tableInfo = tableCacheModel.getCarbonTable().getTableInfo();
    config.set("presto.cli.query.id", prestoQueryId);
    config.set(CarbonTableInputFormat.INPUT_SEGMENT_NUMBERS, "");
    String carbonTablePath = carbonTable.getAbsoluteTableIdentifier().getTablePath();
    config.set(CarbonTableInputFormat.INPUT_DIR, carbonTablePath);
    config.set(CarbonTableInputFormat.DATABASE_NAME, carbonTable.getDatabaseName());
    config.set(CarbonTableInputFormat.TABLE_NAME, carbonTable.getTableName());
    config.set("query.id", queryId);
    CarbonInputFormat.setTransactionalTable(config, carbonTable.isTransactionalTable());
    CarbonInputFormat.setTableInfo(config, carbonTable.getTableInfo());
    if (CarbonProperties.getInstance().isCoarseGrainSecondaryIndex(tableInfo.getDatabaseName(), tableInfo.getFactTable().getTableName(), "true")) {
        CarbonInputFormat.checkAndSetSecondaryIndexPruning(carbonTable.getTableInfo(), filters, config);
    }
    JobConf jobConf = new JobConf(config);
    try {
        CarbonTableInputFormat.setTableInfo(config, tableInfo);
        CarbonTableInputFormat<Object> carbonTableInputFormat = createInputFormat(jobConf, carbonTable.getAbsoluteTableIdentifier(), new IndexFilter(carbonTable, filters, true), filteredPartitions);
        Job job = Job.getInstance(jobConf);
        List<InputSplit> splits = carbonTableInputFormat.getSplits(job);
        Gson gson = new Gson();
        if (splits != null && splits.size() > 0) {
            for (InputSplit inputSplit : splits) {
                CarbonInputSplit carbonInputSplit = (CarbonInputSplit) inputSplit;
                result.add(new CarbonLocalInputSplit(carbonInputSplit.getSegmentId(), carbonInputSplit.getPath().toString(), carbonInputSplit.getStart(), carbonInputSplit.getLength(), Arrays.asList(carbonInputSplit.getLocations()), carbonInputSplit.getNumberOfBlocklets(), carbonInputSplit.getVersion().number(), carbonInputSplit.getDeleteDeltaFiles(), carbonInputSplit.getBlockletId(), gson.toJson(carbonInputSplit.getDetailInfo()), carbonInputSplit.getFileFormat().ordinal()));
            }
            // Use block distribution
            List<List<CarbonLocalInputSplit>> inputSplits = new ArrayList<>(result.stream().collect(Collectors.groupingBy(carbonInput -> {
                if (FileFormat.ROW_V1.equals(carbonInput.getFileFormat())) {
                    return carbonInput.getSegmentId().concat(carbonInput.getPath()).concat(carbonInput.getStart() + "");
                }
                return carbonInput.getSegmentId().concat(carbonInput.getPath());
            })).values());
            // TODO : try to optimize the below loic as it may slowdown for huge splits
            for (int j = 0; j < inputSplits.size(); j++) {
                multiBlockSplitList.add(new CarbonLocalMultiBlockSplit(inputSplits.get(j), inputSplits.get(j).stream().flatMap(f -> Arrays.stream(getLocations(f))).distinct().toArray(String[]::new)));
            }
            LOGGER.error("Size fo MultiblockList   " + multiBlockSplitList.size());
        }
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
    return multiBlockSplitList;
}
Also used : CarbonMetadata(org.apache.carbondata.core.metadata.CarbonMetadata) Arrays(java.util.Arrays) Inject(com.google.inject.Inject) HiveColumnHandle(io.prestosql.plugin.hive.HiveColumnHandle) CarbonCommonConstants(org.apache.carbondata.core.constants.CarbonCommonConstants) SegmentFileStore(org.apache.carbondata.core.metadata.SegmentFileStore) Logger(org.apache.log4j.Logger) Gson(com.google.gson.Gson) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) TBase(org.apache.thrift.TBase) ACCESS_KEY(org.apache.hadoop.fs.s3a.Constants.ACCESS_KEY) IndexMetadata(org.apache.carbondata.core.metadata.schema.indextable.IndexMetadata) CarbonTable(org.apache.carbondata.core.metadata.schema.table.CarbonTable) Expression(org.apache.carbondata.core.scan.expression.Expression) ThriftWrapperSchemaConverterImpl(org.apache.carbondata.core.metadata.converter.ThriftWrapperSchemaConverterImpl) PrestoFilterUtil(org.apache.carbondata.presto.PrestoFilterUtil) CarbonInputFormat(org.apache.carbondata.hadoop.api.CarbonInputFormat) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) Collectors(java.util.stream.Collectors) IndexStatus(org.apache.carbondata.core.index.status.IndexStatus) IndexType(org.apache.carbondata.core.metadata.index.IndexType) Objects(java.util.Objects) List(java.util.List) CarbonTableInputFormat(org.apache.carbondata.hadoop.api.CarbonTableInputFormat) ThriftReader(org.apache.carbondata.core.reader.ThriftReader) Job(org.apache.hadoop.mapreduce.Job) CarbonProperties(org.apache.carbondata.core.util.CarbonProperties) CarbonUtil(org.apache.carbondata.core.util.CarbonUtil) IndexTableInfo(org.apache.carbondata.core.metadata.schema.indextable.IndexTableInfo) TableInfo(org.apache.carbondata.core.metadata.schema.table.TableInfo) HashMap(java.util.HashMap) FileFactory(org.apache.carbondata.core.datastore.impl.FileFactory) AtomicReference(java.util.concurrent.atomic.AtomicReference) LoadMetadataDetails(org.apache.carbondata.core.statusmanager.LoadMetadataDetails) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) SchemaTableName(io.prestosql.spi.connector.SchemaTableName) CollectionUtils(org.apache.commons.collections.CollectionUtils) LogServiceFactory(org.apache.carbondata.common.logging.LogServiceFactory) IndexStoreManager(org.apache.carbondata.core.index.IndexStoreManager) CarbonFile(org.apache.carbondata.core.datastore.filesystem.CarbonFile) InputSplit(org.apache.hadoop.mapreduce.InputSplit) CarbonTablePath(org.apache.carbondata.core.util.path.CarbonTablePath) TupleDomain(io.prestosql.spi.predicate.TupleDomain) IOException(java.io.IOException) JobConf(org.apache.hadoop.mapred.JobConf) PartitionSpec(org.apache.carbondata.core.indexstore.PartitionSpec) CarbonInputSplit(org.apache.carbondata.hadoop.CarbonInputSplit) FileFormat(org.apache.carbondata.core.statusmanager.FileFormat) AbsoluteTableIdentifier(org.apache.carbondata.core.metadata.AbsoluteTableIdentifier) SECRET_KEY(org.apache.hadoop.fs.s3a.Constants.SECRET_KEY) IndexFilter(org.apache.carbondata.core.index.IndexFilter) ENDPOINT(org.apache.hadoop.fs.s3a.Constants.ENDPOINT) SchemaConverter(org.apache.carbondata.core.metadata.converter.SchemaConverter) ArrayList(java.util.ArrayList) Gson(com.google.gson.Gson) CarbonInputSplit(org.apache.carbondata.hadoop.CarbonInputSplit) IOException(java.io.IOException) CarbonTable(org.apache.carbondata.core.metadata.schema.table.CarbonTable) IndexTableInfo(org.apache.carbondata.core.metadata.schema.indextable.IndexTableInfo) TableInfo(org.apache.carbondata.core.metadata.schema.table.TableInfo) List(java.util.List) ArrayList(java.util.ArrayList) IndexFilter(org.apache.carbondata.core.index.IndexFilter) Job(org.apache.hadoop.mapreduce.Job) JobConf(org.apache.hadoop.mapred.JobConf) InputSplit(org.apache.hadoop.mapreduce.InputSplit) CarbonInputSplit(org.apache.carbondata.hadoop.CarbonInputSplit)

Example 18 with IndexFilter

use of org.apache.carbondata.core.index.IndexFilter in project carbondata by apache.

the class CarbondataPageSource method createQueryModel.

/**
 * @param carbondataSplit
 * @param columns
 * @return
 */
private QueryModel createQueryModel(HiveSplit carbondataSplit, ConnectorTableHandle tableHandle, List<? extends ColumnHandle> columns, Configuration conf) {
    try {
        CarbonProjection carbonProjection = getCarbonProjection(columns);
        conf.set(CarbonTableInputFormat.INPUT_SEGMENT_NUMBERS, "");
        String carbonTablePath = carbonTable.getAbsoluteTableIdentifier().getTablePath();
        CarbonTableInputFormat.setTransactionalTable(conf, carbonTable.getTableInfo().isTransactionalTable());
        CarbonTableInputFormat.setTableInfo(conf, carbonTable.getTableInfo());
        conf.set(CarbonTableInputFormat.INPUT_DIR, carbonTablePath);
        conf.set("query.id", queryId);
        JobConf jobConf = new JobConf(conf);
        HiveTableHandle hiveTable = (HiveTableHandle) tableHandle;
        CarbonTableInputFormat carbonTableInputFormat = createInputFormat(jobConf, carbonTable, new IndexFilter(carbonTable, PrestoFilterUtil.parseFilterExpression(hiveTable.getCompactEffectivePredicate())), carbonProjection);
        TaskAttemptContextImpl hadoopAttemptContext = new TaskAttemptContextImpl(jobConf, new TaskAttemptID("", 1, TaskType.MAP, 0, 0));
        CarbonMultiBlockSplit carbonInputSplit = CarbonLocalMultiBlockSplit.convertSplit(carbondataSplit.getSchema().getProperty("carbonSplit"));
        QueryModel queryModel = carbonTableInputFormat.createQueryModel(carbonInputSplit, hadoopAttemptContext);
        queryModel.setQueryId(queryId);
        queryModel.setVectorReader(true);
        queryModel.setStatisticsRecorder(CarbonTimeStatisticsFactory.createExecutorRecorder(queryModel.getQueryId()));
        List<TableBlockInfo> tableBlockInfoList = CarbonInputSplit.createBlocks(carbonInputSplit.getAllSplits());
        queryModel.setTableBlockInfos(tableBlockInfoList);
        return queryModel;
    } catch (IOException e) {
        throw new RuntimeException("Unable to get the Query Model ", e);
    }
}
Also used : TableBlockInfo(org.apache.carbondata.core.datastore.block.TableBlockInfo) TaskAttemptID(org.apache.hadoop.mapreduce.TaskAttemptID) IOException(java.io.IOException) QueryModel(org.apache.carbondata.core.scan.model.QueryModel) CarbonProjection(org.apache.carbondata.hadoop.CarbonProjection) HiveTableHandle(io.prestosql.plugin.hive.HiveTableHandle) CarbonMultiBlockSplit(org.apache.carbondata.hadoop.CarbonMultiBlockSplit) TaskAttemptContextImpl(org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl) CarbonTableInputFormat(org.apache.carbondata.hadoop.api.CarbonTableInputFormat) IndexFilter(org.apache.carbondata.core.index.IndexFilter) JobConf(org.apache.hadoop.mapred.JobConf)

Example 19 with IndexFilter

use of org.apache.carbondata.core.index.IndexFilter in project carbondata by apache.

the class CarbondataPageSource method createQueryModel.

/**
 * @param carbondataSplit
 * @param columns
 * @return
 */
private QueryModel createQueryModel(HiveSplit carbondataSplit, List<? extends ColumnHandle> columns, Configuration conf) {
    try {
        CarbonProjection carbonProjection = getCarbonProjection(columns);
        conf.set(CarbonTableInputFormat.INPUT_SEGMENT_NUMBERS, "");
        String carbonTablePath = carbonTable.getAbsoluteTableIdentifier().getTablePath();
        CarbonTableInputFormat.setTransactionalTable(conf, carbonTable.getTableInfo().isTransactionalTable());
        CarbonTableInputFormat.setTableInfo(conf, carbonTable.getTableInfo());
        conf.set(CarbonTableInputFormat.INPUT_DIR, carbonTablePath);
        conf.set("query.id", queryId);
        JobConf jobConf = new JobConf(conf);
        CarbonTableInputFormat carbonTableInputFormat = createInputFormat(jobConf, carbonTable, new IndexFilter(carbonTable, PrestoFilterUtil.parseFilterExpression(carbondataSplit.getEffectivePredicate())), carbonProjection);
        TaskAttemptContextImpl hadoopAttemptContext = new TaskAttemptContextImpl(jobConf, new TaskAttemptID("", 1, TaskType.MAP, 0, 0));
        CarbonMultiBlockSplit carbonInputSplit = CarbonLocalMultiBlockSplit.convertSplit(carbondataSplit.getSchema().getProperty("carbonSplit"));
        QueryModel queryModel = carbonTableInputFormat.createQueryModel(carbonInputSplit, hadoopAttemptContext);
        queryModel.setQueryId(queryId);
        queryModel.setVectorReader(true);
        queryModel.setStatisticsRecorder(CarbonTimeStatisticsFactory.createExecutorRecorder(queryModel.getQueryId()));
        List<TableBlockInfo> tableBlockInfoList = CarbonInputSplit.createBlocks(carbonInputSplit.getAllSplits());
        queryModel.setTableBlockInfos(tableBlockInfoList);
        return queryModel;
    } catch (IOException e) {
        throw new RuntimeException("Unable to get the Query Model ", e);
    }
}
Also used : TableBlockInfo(org.apache.carbondata.core.datastore.block.TableBlockInfo) TaskAttemptID(org.apache.hadoop.mapreduce.TaskAttemptID) IOException(java.io.IOException) QueryModel(org.apache.carbondata.core.scan.model.QueryModel) CarbonProjection(org.apache.carbondata.hadoop.CarbonProjection) CarbonMultiBlockSplit(org.apache.carbondata.hadoop.CarbonMultiBlockSplit) TaskAttemptContextImpl(org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl) CarbonTableInputFormat(org.apache.carbondata.hadoop.api.CarbonTableInputFormat) IndexFilter(org.apache.carbondata.core.index.IndexFilter) JobConf(org.apache.hadoop.mapred.JobConf)

Example 20 with IndexFilter

use of org.apache.carbondata.core.index.IndexFilter in project carbondata by apache.

the class BlockletIndexFactory method getTableBlockIndexUniqueIdentifierUsingSegmentMinMax.

/**
 * Using blockLevel minmax values, identify if segment has to be added for further pruning and to
 * load segment index info to cache
 * @param segment to be identified if needed for loading block indexes
 * @param segmentMetaDataInfo list of block level min max values
 * @param filter filter expression
 * @param identifiers tableBlockIndexUniqueIdentifiers
 * @param tableBlockIndexUniqueIdentifierWrappers to add tableBlockIndexUniqueIdentifiers
 */
private void getTableBlockIndexUniqueIdentifierUsingSegmentMinMax(Segment segment, SegmentMetaDataInfo segmentMetaDataInfo, IndexFilter filter, Set<TableBlockIndexUniqueIdentifier> identifiers, List<TableBlockIndexUniqueIdentifierWrapper> tableBlockIndexUniqueIdentifierWrappers) {
    boolean isScanRequired = false;
    Map<String, SegmentColumnMetaDataInfo> segmentColumnMetaDataInfoMap = segmentMetaDataInfo.getSegmentColumnMetaDataInfoMap();
    int length = segmentColumnMetaDataInfoMap.size();
    // Add columnSchemas based on the columns present in segment
    List<ColumnSchema> columnSchemas = new ArrayList<>();
    byte[][] min = new byte[length][];
    byte[][] max = new byte[length][];
    boolean[] minMaxFlag = new boolean[length];
    int i = 0;
    // get current columnSchema list for the table
    Map<String, ColumnSchema> tableColumnSchemas = this.getCarbonTable().getTableInfo().getFactTable().getListOfColumns().stream().collect(Collectors.toMap(ColumnSchema::getColumnUniqueId, ColumnSchema::clone));
    // fill min,max and columnSchema values
    for (Map.Entry<String, SegmentColumnMetaDataInfo> columnMetaData : segmentColumnMetaDataInfoMap.entrySet()) {
        ColumnSchema columnSchema = tableColumnSchemas.get(columnMetaData.getKey());
        if (null != columnSchema) {
            // get segment sort column and column drift info
            boolean isSortColumnInSegment = columnMetaData.getValue().isSortColumn();
            boolean isColumnDriftInSegment = columnMetaData.getValue().isColumnDrift();
            if (null != columnSchema.getColumnProperties()) {
                // get current sort column and column drift info from current columnSchema
                String isSortColumn = columnSchema.getColumnProperties().get(CarbonCommonConstants.SORT_COLUMNS);
                String isColumnDrift = columnSchema.getColumnProperties().get(CarbonCommonConstants.COLUMN_DRIFT);
                if (null != isSortColumn) {
                    if (isSortColumn.equalsIgnoreCase("true") && !isSortColumnInSegment) {
                        // Unset current column schema column properties
                        modifyColumnSchemaForSortColumn(columnSchema, isColumnDriftInSegment, isColumnDrift, false);
                    } else if (isSortColumn.equalsIgnoreCase("false") && isSortColumnInSegment) {
                        // set sort column to true in current column schema column properties
                        modifyColumnSchemaForSortColumn(columnSchema, isColumnDriftInSegment, isColumnDrift, true);
                    }
                } else {
                    modifyColumnSchemaForSortColumn(columnSchema, isColumnDriftInSegment, isColumnDrift, false);
                }
            }
            columnSchemas.add(columnSchema);
            min[i] = columnMetaData.getValue().getColumnMinValue();
            max[i] = columnMetaData.getValue().getColumnMaxValue();
            minMaxFlag[i] = min[i].length != 0 && max[i].length != 0;
            i++;
        }
    }
    // get segmentProperties using created columnSchemas list
    SegmentProperties segmentProperties = SegmentPropertiesAndSchemaHolder.getInstance().addSegmentProperties(this.getCarbonTable(), columnSchemas, segment.getSegmentNo()).getSegmentProperties();
    FilterResolverIntf resolver = new IndexFilter(segmentProperties, this.getCarbonTable(), filter.getExpression()).getResolver();
    // prepare filter executor using IndexFilter resolver
    FilterExecutor filterExecutor = FilterUtil.getFilterExecutorTree(resolver, segmentProperties, null, null, false);
    // check if block has to be pruned based on segment minmax
    BitSet scanRequired = filterExecutor.isScanRequired(max, min, minMaxFlag);
    if (!scanRequired.isEmpty()) {
        isScanRequired = true;
    }
    if (isScanRequired) {
        for (TableBlockIndexUniqueIdentifier tableBlockIndexUniqueIdentifier : identifiers) {
            tableBlockIndexUniqueIdentifierWrappers.add(new TableBlockIndexUniqueIdentifierWrapper(tableBlockIndexUniqueIdentifier, this.getCarbonTable()));
        }
    }
}
Also used : FilterExecutor(org.apache.carbondata.core.scan.filter.executer.FilterExecutor) ArrayList(java.util.ArrayList) BitSet(java.util.BitSet) TableBlockIndexUniqueIdentifier(org.apache.carbondata.core.indexstore.TableBlockIndexUniqueIdentifier) ColumnSchema(org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema) TableBlockIndexUniqueIdentifierWrapper(org.apache.carbondata.core.indexstore.TableBlockIndexUniqueIdentifierWrapper) SegmentProperties(org.apache.carbondata.core.datastore.block.SegmentProperties) IndexFilter(org.apache.carbondata.core.index.IndexFilter) SegmentColumnMetaDataInfo(org.apache.carbondata.core.segmentmeta.SegmentColumnMetaDataInfo) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) FilterResolverIntf(org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf)

Aggregations

IndexFilter (org.apache.carbondata.core.index.IndexFilter)27 Configuration (org.apache.hadoop.conf.Configuration)16 InputSplit (org.apache.hadoop.mapreduce.InputSplit)16 JobConf (org.apache.hadoop.mapred.JobConf)15 Job (org.apache.hadoop.mapreduce.Job)15 ExprNodeGenericFuncDesc (org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc)12 Test (org.junit.Test)12 CarbonFileInputFormat (org.apache.carbondata.hadoop.api.CarbonFileInputFormat)11 ExprNodeColumnDesc (org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc)11 ExprNodeDesc (org.apache.hadoop.hive.ql.plan.ExprNodeDesc)11 IOException (java.io.IOException)9 ExprNodeConstantDesc (org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc)9 ArrayList (java.util.ArrayList)8 List (java.util.List)5 CarbonTable (org.apache.carbondata.core.metadata.schema.table.CarbonTable)5 CarbonInputSplit (org.apache.carbondata.hadoop.CarbonInputSplit)5 CarbonTableInputFormat (org.apache.carbondata.hadoop.api.CarbonTableInputFormat)5 HashMap (java.util.HashMap)4 CarbonTablePath (org.apache.carbondata.core.util.path.CarbonTablePath)4 Map (java.util.Map)3