use of com.hazelcast.sql.impl.schema.map.MapTableIndex in project hazelcast by hazelcast.
the class IMapSqlConnector method createTable.
@Nonnull
@Override
public Table createTable(@Nonnull NodeEngine nodeEngine, @Nonnull String schemaName, @Nonnull String mappingName, @Nonnull String externalName, @Nonnull Map<String, String> options, @Nonnull List<MappingField> resolvedFields) {
InternalSerializationService ss = (InternalSerializationService) nodeEngine.getSerializationService();
KvMetadata keyMetadata = METADATA_RESOLVERS.resolveMetadata(true, resolvedFields, options, ss);
KvMetadata valueMetadata = METADATA_RESOLVERS.resolveMetadata(false, resolvedFields, options, ss);
List<TableField> fields = concat(keyMetadata.getFields().stream(), valueMetadata.getFields().stream()).collect(toList());
MapService service = nodeEngine.getService(MapService.SERVICE_NAME);
MapServiceContext context = service.getMapServiceContext();
MapContainer container = context.getExistingMapContainer(externalName);
long estimatedRowCount = estimatePartitionedMapRowCount(nodeEngine, context, externalName);
boolean hd = container != null && container.getMapConfig().getInMemoryFormat() == InMemoryFormat.NATIVE;
List<MapTableIndex> indexes = container != null ? MapTableUtils.getPartitionedMapIndexes(container, fields) : emptyList();
return new PartitionedMapTable(schemaName, mappingName, externalName, fields, new ConstantTableStatistics(estimatedRowCount), keyMetadata.getQueryTargetDescriptor(), valueMetadata.getQueryTargetDescriptor(), keyMetadata.getUpsertTargetDescriptor(), valueMetadata.getUpsertTargetDescriptor(), indexes, hd);
}
use of com.hazelcast.sql.impl.schema.map.MapTableIndex in project hazelcast by hazelcast.
the class IndexResolver method buildCollationTrait.
/**
* Builds a collation with collation fields re-mapped according to the table projections.
*
* @param scan the logical map scan
* @param index the index
* @param ascs the collation of index fields
* @return the new collation trait
*/
private static RelCollation buildCollationTrait(FullScanLogicalRel scan, MapTableIndex index, List<Boolean> ascs) {
if (index.getType() != SORTED) {
return RelCollations.of(Collections.emptyList());
}
List<RelFieldCollation> fields = new ArrayList<>(index.getFieldOrdinals().size());
HazelcastTable table = OptUtils.extractHazelcastTable(scan);
// Extract those projections that are direct input field references. Only those can be used
// for index access
List<Integer> fieldProjects = table.getProjects().stream().filter(expr -> expr instanceof RexInputRef).map(inputRef -> ((RexInputRef) inputRef).getIndex()).collect(Collectors.toList());
for (int i = 0; i < index.getFieldOrdinals().size(); ++i) {
Integer indexFieldOrdinal = index.getFieldOrdinals().get(i);
int remappedIndexFieldOrdinal = fieldProjects.indexOf(indexFieldOrdinal);
if (remappedIndexFieldOrdinal == -1) {
// The field is not used in the query
break;
}
Direction direction = ascs.get(i) ? ASCENDING : DESCENDING;
RelFieldCollation fieldCollation = new RelFieldCollation(remappedIndexFieldOrdinal, direction);
fields.add(fieldCollation);
}
return RelCollations.of(fields);
}
use of com.hazelcast.sql.impl.schema.map.MapTableIndex in project hazelcast by hazelcast.
the class SqlIndexResolutionTest method checkIndex.
private void checkIndex(IMap<?, ?> map, List<QueryDataType> expectedFieldConverterTypes) {
String mapName = map.getName();
List<PartitionedMapTable> tables = resolver.getTables().stream().filter(t -> t instanceof PartitionedMapTable).map(t -> (PartitionedMapTable) t).filter(t -> t.getMapName().equals(mapName)).collect(Collectors.toList());
assertEquals(1, tables.size());
PartitionedMapTable table = tables.get(0);
assertEquals(1, table.getIndexes().size());
MapTableIndex index = table.getIndexes().get(0);
assertEquals(indexName, index.getName());
assertEquals(indexType, index.getType());
// Components count depends on the index attribute count
assertEquals(composite ? 2 : 1, index.getComponentsCount());
int field1Ordinal = findFieldOrdinal(table, "field1");
int field2Ordinal = findFieldOrdinal(table, "field2");
// Check resolved field converter types. We do not test more than two components.
assertTrue(expectedFieldConverterTypes.size() <= 2);
assertEquals(expectedFieldConverterTypes, index.getFieldConverterTypes());
// Resolved field ordinals depend on the number of resolved converter types
if (expectedFieldConverterTypes.isEmpty()) {
assertTrue(index.getFieldOrdinals().isEmpty());
} else if (expectedFieldConverterTypes.size() == 1) {
assertEquals(singletonList(field1Ordinal), index.getFieldOrdinals());
} else {
assertEquals(Arrays.asList(field1Ordinal, field2Ordinal), index.getFieldOrdinals());
}
}
use of com.hazelcast.sql.impl.schema.map.MapTableIndex in project hazelcast by hazelcast.
the class IndexResolver method createIndexScans.
/**
* The main entry point for index planning.
* <p>
* Analyzes the filter of the input scan operator, and produces zero, one or more {@link IndexScanMapPhysicalRel}
* operators.
* <p>
* First, the full index scans are created and the covered (prefix-based) scans are excluded.
* Second, the lookups are created and if lookup's collation is equal to the full scan's collation,
* the latter one is excluded.
*
* @param scan scan operator to be analyzed
* @param indexes indexes available on the map being scanned
* @return zero, one or more index scan rels
*/
@SuppressWarnings({ "checkstyle:CyclomaticComplexity", "checkstyle:NPathComplexity", "checkstyle:MethodLength" })
public static Collection<RelNode> createIndexScans(FullScanLogicalRel scan, List<MapTableIndex> indexes) {
RexNode filter = OptUtils.extractHazelcastTable(scan).getFilter();
// Filter out unsupported indexes. Only SORTED and HASH indexes are supported.
List<MapTableIndex> supportedIndexes = new ArrayList<>(indexes.size());
Set<Integer> allIndexedFieldOrdinals = new HashSet<>();
for (MapTableIndex index : indexes) {
if (isIndexSupported(index)) {
supportedIndexes.add(index);
allIndexedFieldOrdinals.addAll(index.getFieldOrdinals());
}
}
// Early return if there are no indexes to consider.
if (supportedIndexes.isEmpty()) {
return Collections.emptyList();
}
List<RelNode> fullScanRels = new ArrayList<>(supportedIndexes.size());
// possible ORDER BY clause on the upper level
for (MapTableIndex index : supportedIndexes) {
if (index.getType() == SORTED) {
// Only for SORTED index create full index scans that might be potentially
// utilized by sorting operator.
List<Boolean> ascs = buildFieldDirections(index, true);
RelNode relAscending = createFullIndexScan(scan, index, ascs, true);
if (relAscending != null) {
fullScanRels.add(relAscending);
RelNode relDescending = replaceCollationDirection(relAscending, DESCENDING);
fullScanRels.add(relDescending);
}
}
}
Map<RelCollation, RelNode> fullScanRelsMap = excludeCoveredCollations(fullScanRels);
if (filter == null) {
// Exclude prefix-based covered index scans
return fullScanRelsMap.values();
}
// Convert expression into CNF. Examples:
// - {a=1 AND b=2} is converted into {a=1}, {b=2}
// - {a=1 OR b=2} is unchanged
List<RexNode> conjunctions = createConjunctiveFilter(filter);
// Create a map from a column to a list of expressions that could be used by indexes.
// For example, for the expression {a>1 AND a<3 AND b=5 AND c>d}, three candidates will be created:
// a -> {>1}, {<3}
// b -> {=5}
Map<Integer, List<IndexComponentCandidate>> candidates = prepareSingleColumnCandidates(conjunctions, getCluster(scan).getParameterMetadata(), allIndexedFieldOrdinals);
if (candidates.isEmpty()) {
return fullScanRelsMap.values();
}
List<RelNode> rels = new ArrayList<>(supportedIndexes.size());
for (MapTableIndex index : supportedIndexes) {
// Create index scan based on candidates, if possible. Candidates could be merged into more complex
// filters whenever possible.
List<Boolean> ascs = buildFieldDirections(index, true);
RelNode relAscending = createIndexScan(scan, index, conjunctions, candidates, ascs);
if (relAscending != null) {
RelCollation relAscCollation = getCollation(relAscending);
// Exclude a full scan that has the same collation
fullScanRelsMap.remove(relAscCollation);
rels.add(relAscending);
if (relAscCollation.getFieldCollations().size() > 0) {
RelNode relDescending = replaceCollationDirection(relAscending, DESCENDING);
rels.add(relDescending);
RelCollation relDescCollation = getCollation(relDescending);
// Exclude a full scan that has the same collation
fullScanRelsMap.remove(relDescCollation);
}
}
}
rels.addAll(fullScanRelsMap.values());
return rels;
}
Aggregations