use of org.apache.carbondata.core.metadata.datatype.DataType in project carbondata by apache.
the class CarbonUtil method getValueCompressionModel.
/**
* Below method will be used to get the value compression model of the
* measure data chunk
*
* @return value compression model
*/
public static WriterCompressModel getValueCompressionModel(List<ValueEncoderMeta> encodeMetaList) {
Object[] maxValue = new Object[encodeMetaList.size()];
Object[] minValue = new Object[encodeMetaList.size()];
Object[] uniqueValue = new Object[encodeMetaList.size()];
int[] decimal = new int[encodeMetaList.size()];
DataType[] type = new DataType[encodeMetaList.size()];
byte[] dataTypeSelected = new byte[encodeMetaList.size()];
/*
* to fill the meta data required for value compression model
*/
for (int i = 0; i < dataTypeSelected.length; i++) {
// always 1
ValueEncoderMeta valueEncoderMeta = encodeMetaList.get(i);
maxValue[i] = valueEncoderMeta.getMaxValue();
minValue[i] = valueEncoderMeta.getMinValue();
uniqueValue[i] = valueEncoderMeta.getUniqueValue();
decimal[i] = valueEncoderMeta.getDecimal();
type[i] = valueEncoderMeta.getType();
dataTypeSelected[i] = valueEncoderMeta.getDataTypeSelected();
}
MeasureMetaDataModel measureMetadataModel = new MeasureMetaDataModel(minValue, maxValue, decimal, dataTypeSelected.length, uniqueValue, type, dataTypeSelected);
return ValueCompressionUtil.getWriterCompressModel(measureMetadataModel);
}
use of org.apache.carbondata.core.metadata.datatype.DataType in project carbondata by apache.
the class AbstractQueryExecutor method initQuery.
/**
* Below method will be used to fill the executor properties based on query
* model it will parse the query model and get the detail and fill it in
* query properties
*
* @param queryModel
*/
protected void initQuery(QueryModel queryModel) throws IOException {
StandardLogService.setThreadName(StandardLogService.getPartitionID(queryModel.getAbsoluteTableIdentifier().getCarbonTableIdentifier().getTableName()), queryModel.getQueryId());
LOGGER.info("Query will be executed on table: " + queryModel.getAbsoluteTableIdentifier().getCarbonTableIdentifier().getTableName());
// add executor service for query execution
queryProperties.executorService = Executors.newCachedThreadPool();
// Initializing statistics list to record the query statistics
// creating copy on write to handle concurrent scenario
queryProperties.queryStatisticsRecorder = CarbonTimeStatisticsFactory.createExecutorRecorder(queryModel.getQueryId());
queryModel.setStatisticsRecorder(queryProperties.queryStatisticsRecorder);
QueryUtil.resolveQueryModel(queryModel);
QueryStatistic queryStatistic = new QueryStatistic();
// sort the block info
// so block will be loaded in sorted order this will be required for
// query execution
Collections.sort(queryModel.getTableBlockInfos());
// get the table blocks
CacheProvider cacheProvider = CacheProvider.getInstance();
BlockIndexStore<TableBlockUniqueIdentifier, AbstractIndex> cache = (BlockIndexStore) cacheProvider.createCache(CacheType.EXECUTOR_BTREE, queryModel.getTable().getStorePath());
// remove the invalid table blocks, block which is deleted or compacted
cache.removeTableBlocks(queryModel.getInvalidSegmentIds(), queryModel.getAbsoluteTableIdentifier());
List<TableBlockUniqueIdentifier> tableBlockUniqueIdentifiers = prepareTableBlockUniqueIdentifier(queryModel.getTableBlockInfos(), queryModel.getAbsoluteTableIdentifier());
cache.removeTableBlocksIfHorizontalCompactionDone(queryModel);
queryProperties.dataBlocks = cache.getAll(tableBlockUniqueIdentifiers);
queryStatistic.addStatistics(QueryStatisticsConstants.LOAD_BLOCKS_EXECUTOR, System.currentTimeMillis());
queryProperties.queryStatisticsRecorder.recordStatistics(queryStatistic);
// calculating the total number of aggeragted columns
int aggTypeCount = queryModel.getQueryMeasures().size();
int currentIndex = 0;
DataType[] dataTypes = new DataType[aggTypeCount];
for (QueryMeasure carbonMeasure : queryModel.getQueryMeasures()) {
// adding the data type and aggregation type of all the measure this
// can be used
// to select the aggregator
dataTypes[currentIndex] = carbonMeasure.getMeasure().getDataType();
currentIndex++;
}
queryProperties.measureDataTypes = dataTypes;
// as aggregation will be executed in following order
// 1.aggregate dimension expression
// 2. expression
// 3. query measure
// so calculating the index of the expression start index
// and measure column start index
queryProperties.filterMeasures = new HashSet<>();
queryProperties.complexFilterDimension = new HashSet<>();
QueryUtil.getAllFilterDimensions(queryModel.getFilterExpressionResolverTree(), queryProperties.complexFilterDimension, queryProperties.filterMeasures);
queryStatistic = new QueryStatistic();
// dictionary column unique column id to dictionary mapping
// which will be used to get column actual data
queryProperties.columnToDictionayMapping = QueryUtil.getDimensionDictionaryDetail(queryModel.getQueryDimension(), queryProperties.complexFilterDimension, queryModel.getAbsoluteTableIdentifier());
queryStatistic.addStatistics(QueryStatisticsConstants.LOAD_DICTIONARY, System.currentTimeMillis());
queryProperties.queryStatisticsRecorder.recordStatistics(queryStatistic);
queryModel.setColumnToDictionaryMapping(queryProperties.columnToDictionayMapping);
}
use of org.apache.carbondata.core.metadata.datatype.DataType in project carbondata by apache.
the class ValueCompressionUtil method getWriterCompressModel.
/**
* Create Value compression model for write path
*/
public static WriterCompressModel getWriterCompressModel(MeasureMetaDataModel measureMDMdl) {
int measureCount = measureMDMdl.getMeasureCount();
Object[] minValue = measureMDMdl.getMinValue();
Object[] maxValue = measureMDMdl.getMaxValue();
Object[] uniqueValue = measureMDMdl.getUniqueValue();
int[] mantissa = measureMDMdl.getMantissa();
DataType[] type = measureMDMdl.getType();
byte[] dataTypeSelected = measureMDMdl.getDataTypeSelected();
WriterCompressModel compressionModel = new WriterCompressModel();
DataType[] actualType = new DataType[measureCount];
DataType[] convertedType = new DataType[measureCount];
CompressionFinder[] compressionFinders = new CompressionFinder[measureCount];
for (int i = 0; i < measureCount; i++) {
CompressionFinder compresssionFinder = ValueCompressionUtil.getCompressionFinder(maxValue[i], minValue[i], mantissa[i], type[i], dataTypeSelected[i]);
compressionFinders[i] = compresssionFinder;
actualType[i] = compresssionFinder.getActualDataType();
convertedType[i] = compresssionFinder.getConvertedDataType();
}
compressionModel.setCompressionFinders(compressionFinders);
compressionModel.setMaxValue(maxValue);
compressionModel.setMantissa(mantissa);
compressionModel.setConvertedDataType(convertedType);
compressionModel.setActualDataType(actualType);
compressionModel.setMinValue(minValue);
compressionModel.setUniqueValue(uniqueValue);
compressionModel.setType(type);
compressionModel.setDataTypeSelected(dataTypeSelected);
ValueCompressionHolder[] values = ValueCompressionUtil.getValueCompressionHolder(compressionFinders);
compressionModel.setValueCompressionHolder(values);
return compressionModel;
}
use of org.apache.carbondata.core.metadata.datatype.DataType in project carbondata by apache.
the class ValueCompressionUtil method getLongCompressorFinder.
private static CompressionFinder getLongCompressorFinder(Object maxValue, Object minValue, int mantissa, byte dataTypeSelected, DataType measureStoreType) {
DataType adaptiveDataType = getDataType((long) maxValue, mantissa, dataTypeSelected);
int adaptiveSize = getSize(adaptiveDataType);
DataType deltaDataType = null;
// it is not possible to determine the compression type.
if (adaptiveDataType == DataType.LONG) {
deltaDataType = DataType.LONG;
} else {
deltaDataType = getDataType((long) maxValue - (long) minValue, mantissa, dataTypeSelected);
}
int deltaSize = getSize(deltaDataType);
if (adaptiveSize > deltaSize) {
return new CompressionFinder(COMPRESSION_TYPE.DELTA_DOUBLE, DataType.LONG, deltaDataType, measureStoreType);
} else if (adaptiveSize < deltaSize) {
return new CompressionFinder(COMPRESSION_TYPE.ADAPTIVE, DataType.LONG, deltaDataType, measureStoreType);
} else {
return new CompressionFinder(COMPRESSION_TYPE.ADAPTIVE, DataType.LONG, adaptiveDataType, measureStoreType);
}
}
use of org.apache.carbondata.core.metadata.datatype.DataType in project carbondata by apache.
the class ExpressionResult method equals.
@Override
public boolean equals(Object obj) {
if (!(obj instanceof ExpressionResult)) {
return false;
}
if (this == obj) {
return true;
}
if (getClass() != obj.getClass()) {
return false;
}
ExpressionResult objToCompare = (ExpressionResult) obj;
boolean result = false;
if (this.value == objToCompare.value) {
return true;
}
if (this.isNull() || objToCompare.isNull()) {
return false;
}
// make the comparison against the data type whose precedence is higher like
// LONG precedence is higher than INT, so from int value we should get the long value
// and then compare both the values. If done vice versa exception will be thrown
// and comparison will fail
DataType dataType = null;
if (objToCompare.getDataType().getPrecedenceOrder() < this.getDataType().getPrecedenceOrder()) {
dataType = this.getDataType();
} else {
dataType = objToCompare.getDataType();
}
try {
switch(dataType) {
case STRING:
result = this.getString().equals(objToCompare.getString());
break;
case SHORT:
result = this.getShort().equals(objToCompare.getShort());
break;
case INT:
result = this.getInt().equals(objToCompare.getInt());
break;
case LONG:
case DATE:
case TIMESTAMP:
result = this.getLong().equals(objToCompare.getLong());
break;
case DOUBLE:
result = this.getDouble().equals(objToCompare.getDouble());
break;
case DECIMAL:
result = this.getDecimal().equals(objToCompare.getDecimal());
break;
default:
break;
}
} catch (FilterIllegalMemberException ex) {
return false;
}
return result;
}
Aggregations