use of org.apache.druid.segment.column.ColumnCapabilities in project druid by druid-io.
the class LookupSegmentTest method test_asStorageAdapter_getColumnCapabilitiesK.
@Test
public void test_asStorageAdapter_getColumnCapabilitiesK() {
final ColumnCapabilities capabilities = LOOKUP_SEGMENT.asStorageAdapter().getColumnCapabilities("k");
Assert.assertEquals(ValueType.STRING, capabilities.getType());
// Note: the "k" column does not actually have multiple values, but the RowBasedStorageAdapter doesn't allow
// reporting complete single-valued capabilities. It would be good to change this in the future, so query engines
// running on top of lookups can take advantage of singly-valued optimizations.
Assert.assertTrue(capabilities.hasMultipleValues().isUnknown());
Assert.assertFalse(capabilities.isDictionaryEncoded().isTrue());
}
use of org.apache.druid.segment.column.ColumnCapabilities in project druid by druid-io.
the class Sink method makeNewCurrIndex.
private FireHydrant makeNewCurrIndex(long minTimestamp, DataSchema schema) {
final IncrementalIndexSchema indexSchema = new IncrementalIndexSchema.Builder().withMinTimestamp(minTimestamp).withTimestampSpec(schema.getTimestampSpec()).withQueryGranularity(schema.getGranularitySpec().getQueryGranularity()).withDimensionsSpec(schema.getDimensionsSpec()).withMetrics(schema.getAggregators()).withRollup(schema.getGranularitySpec().isRollup()).build();
// Build the incremental-index according to the spec that was chosen by the user
final IncrementalIndex newIndex = appendableIndexSpec.builder().setIndexSchema(indexSchema).setMaxRowCount(maxRowsInMemory).setMaxBytesInMemory(maxBytesInMemory).setUseMaxMemoryEstimates(useMaxMemoryEstimates).build();
final FireHydrant old;
synchronized (hydrantLock) {
if (writable) {
old = currHydrant;
int newCount = 0;
int numHydrants = hydrants.size();
if (numHydrants > 0) {
FireHydrant lastHydrant = hydrants.get(numHydrants - 1);
newCount = lastHydrant.getCount() + 1;
if (!indexSchema.getDimensionsSpec().hasCustomDimensions()) {
Map<String, ColumnCapabilities> oldCapabilities;
if (lastHydrant.hasSwapped()) {
oldCapabilities = new HashMap<>();
ReferenceCountingSegment segment = lastHydrant.getIncrementedSegment();
try {
QueryableIndex oldIndex = segment.asQueryableIndex();
for (String dim : oldIndex.getAvailableDimensions()) {
dimOrder.add(dim);
oldCapabilities.put(dim, oldIndex.getColumnHolder(dim).getCapabilities());
}
} finally {
segment.decrement();
}
} else {
IncrementalIndex oldIndex = lastHydrant.getIndex();
dimOrder.addAll(oldIndex.getDimensionOrder());
oldCapabilities = oldIndex.getColumnCapabilities();
}
newIndex.loadDimensionIterable(dimOrder, oldCapabilities);
}
}
currHydrant = new FireHydrant(newIndex, newCount, getSegment().getId());
if (old != null) {
numRowsExcludingCurrIndex.addAndGet(old.getIndex().size());
}
hydrants.add(currHydrant);
} else {
// Oops, someone called finishWriting while we were making this new index.
newIndex.close();
throw new ISE("finishWriting() called during swap");
}
}
return old;
}
use of org.apache.druid.segment.column.ColumnCapabilities in project druid by druid-io.
the class DimensionHandlerUtils method getColumnValueSelectorFromDimensionSpec.
private static ColumnValueSelector<?> getColumnValueSelectorFromDimensionSpec(DimensionSpec dimSpec, ColumnSelectorFactory columnSelectorFactory) {
String dimName = dimSpec.getDimension();
ColumnCapabilities capabilities = columnSelectorFactory.getColumnCapabilities(dimName);
capabilities = getEffectiveCapabilities(dimSpec, capabilities);
if (capabilities.is(ValueType.STRING)) {
return columnSelectorFactory.makeDimensionSelector(dimSpec);
}
return columnSelectorFactory.makeColumnValueSelector(dimSpec.getDimension());
}
use of org.apache.druid.segment.column.ColumnCapabilities in project druid by druid-io.
the class ColumnProcessors method makeProcessorInternal.
/**
* Creates "column processors", which are objects that wrap a single input column and provide some
* functionality on top of it.
*
* @param inputCapabilitiesFn function that returns capabilities of the column being processed. The type provided
* by these capabilities will be used to determine what kind of selector to create. If
* this function returns null, then processorFactory.defaultType() will be
* used to construct a set of assumed capabilities.
* @param dimensionSelectorFn function that creates a DimensionSelector for the column being processed. Will be
* called if the column type is string.
* @param valueSelectorFunction function that creates a ColumnValueSelector for the column being processed. Will be
* called if the column type is long, float, double, or complex.
* @param processorFactory object that encapsulates the knowledge about how to create processors
* @param selectorFactory column selector factory used for creating the vector processor
*/
private static <T> T makeProcessorInternal(final Function<ColumnSelectorFactory, ColumnCapabilities> inputCapabilitiesFn, final Function<ColumnSelectorFactory, DimensionSelector> dimensionSelectorFn, final Function<ColumnSelectorFactory, ColumnValueSelector<?>> valueSelectorFunction, final ColumnProcessorFactory<T> processorFactory, final ColumnSelectorFactory selectorFactory) {
final ColumnCapabilities capabilities = inputCapabilitiesFn.apply(selectorFactory);
final TypeSignature<ValueType> effectiveType = capabilities != null ? capabilities : processorFactory.defaultType();
switch(effectiveType.getType()) {
case STRING:
return processorFactory.makeDimensionProcessor(dimensionSelectorFn.apply(selectorFactory), mayBeMultiValue(capabilities));
case LONG:
return processorFactory.makeLongProcessor(valueSelectorFunction.apply(selectorFactory));
case FLOAT:
return processorFactory.makeFloatProcessor(valueSelectorFunction.apply(selectorFactory));
case DOUBLE:
return processorFactory.makeDoubleProcessor(valueSelectorFunction.apply(selectorFactory));
case COMPLEX:
return processorFactory.makeComplexProcessor(valueSelectorFunction.apply(selectorFactory));
default:
throw new ISE("Unsupported type[%s]", effectiveType.asTypeString());
}
}
use of org.apache.druid.segment.column.ColumnCapabilities in project druid by druid-io.
the class IndexIO method validateRowValues.
private static void validateRowValues(RowPointer rp1, IndexableAdapter adapter1, RowPointer rp2, IndexableAdapter adapter2) {
if (rp1.getTimestamp() != rp2.getTimestamp()) {
throw new SegmentValidationException("Timestamp mismatch. Expected %d found %d", rp1.getTimestamp(), rp2.getTimestamp());
}
final List<Object> dims1 = rp1.getDimensionValuesForDebug();
final List<Object> dims2 = rp2.getDimensionValuesForDebug();
if (dims1.size() != dims2.size()) {
throw new SegmentValidationException("Dim lengths not equal %s vs %s", dims1, dims2);
}
final List<String> dim1Names = adapter1.getDimensionNames();
final List<String> dim2Names = adapter2.getDimensionNames();
int dimCount = dims1.size();
for (int i = 0; i < dimCount; ++i) {
final String dim1Name = dim1Names.get(i);
final String dim2Name = dim2Names.get(i);
ColumnCapabilities capabilities1 = adapter1.getCapabilities(dim1Name);
ColumnCapabilities capabilities2 = adapter2.getCapabilities(dim2Name);
ColumnType dim1Type = capabilities1.toColumnType();
ColumnType dim2Type = capabilities2.toColumnType();
if (!Objects.equals(dim1Type, dim2Type)) {
throw new SegmentValidationException("Dim [%s] types not equal. Expected %d found %d", dim1Name, dim1Type, dim2Type);
}
Object vals1 = dims1.get(i);
Object vals2 = dims2.get(i);
if (isNullRow(vals1) ^ isNullRow(vals2)) {
throw notEqualValidationException(dim1Name, vals1, vals2);
}
boolean vals1IsList = vals1 instanceof List;
boolean vals2IsList = vals2 instanceof List;
if (vals1IsList ^ vals2IsList) {
if (vals1IsList) {
if (((List) vals1).size() != 1 || !Objects.equals(((List) vals1).get(0), vals2)) {
throw notEqualValidationException(dim1Name, vals1, vals2);
}
} else {
if (((List) vals2).size() != 1 || !Objects.equals(((List) vals2).get(0), vals1)) {
throw notEqualValidationException(dim1Name, vals1, vals2);
}
}
} else {
if (!Objects.equals(vals1, vals2)) {
throw notEqualValidationException(dim1Name, vals1, vals2);
}
}
}
}
Aggregations