use of com.hazelcast.sql.impl.schema.map.PartitionedMapTable in project hazelcast by hazelcast.
the class InsertMapPhysicalRel method entriesFn.
public Function<ExpressionEvalContext, List<Entry<Object, Object>>> entriesFn() {
PartitionedMapTable table = table();
ExpressionValues values = this.values;
return evalContext -> {
KvProjector projector = KvProjector.supplier(table.paths(), table.types(), (UpsertTargetDescriptor) table.getKeyJetMetadata(), (UpsertTargetDescriptor) table.getValueJetMetadata(), true).get(evalContext.getSerializationService());
return values.toValues(evalContext).map(projector::project).collect(toList());
};
}
use of com.hazelcast.sql.impl.schema.map.PartitionedMapTable in project hazelcast by hazelcast.
the class ParserOperationsTest method createContext.
private static OptimizerContext createContext() {
PartitionedMapTable partitionedMapTable = new PartitionedMapTable("public", "t", "t", Arrays.asList(field("a"), field("b")), new ConstantTableStatistics(100L), null, null, null, null, null, false);
TableResolver resolver = TestTableResolver.create("public", partitionedMapTable);
List<TableResolver> tableResolvers = Collections.singletonList(resolver);
List<List<String>> searchPaths = QueryUtils.prepareSearchPaths(emptyList(), tableResolvers);
return OptimizerContext.create(new SqlCatalog(tableResolvers), searchPaths, emptyList(), 1, name -> null);
}
use of com.hazelcast.sql.impl.schema.map.PartitionedMapTable in project hazelcast by hazelcast.
the class ParserNameResolutionTest method createContext.
private static OptimizerContext createContext() {
PartitionedMapTable table1 = new PartitionedMapTable(SCHEMA_1, TABLE_1, TABLE_1, Arrays.asList(field(FIELD_1), field(FIELD_2)), new ConstantTableStatistics(100L), null, null, null, null, null, false);
PartitionedMapTable table2 = new PartitionedMapTable(SCHEMA_2, TABLE_2, TABLE_2, Arrays.asList(field(FIELD_1), field(FIELD_2)), new ConstantTableStatistics(100L), null, null, null, null, null, false);
TableResolver resolver1 = TestTableResolver.create(SCHEMA_1, table1);
TableResolver resolver2 = TestTableResolver.create(SCHEMA_2, table2);
List<TableResolver> tableResolvers = Arrays.asList(resolver1, resolver2);
List<List<String>> searchPaths = QueryUtils.prepareSearchPaths(emptyList(), tableResolvers);
return OptimizerContext.create(new SqlCatalog(tableResolvers), searchPaths, emptyList(), 1, name -> null);
}
use of com.hazelcast.sql.impl.schema.map.PartitionedMapTable in project hazelcast by hazelcast.
the class SqlIndexResolutionTest method checkIndex.
private void checkIndex(IMap<?, ?> map, List<QueryDataType> expectedFieldConverterTypes) {
String mapName = map.getName();
List<PartitionedMapTable> tables = resolver.getTables().stream().filter(t -> t instanceof PartitionedMapTable).map(t -> (PartitionedMapTable) t).filter(t -> t.getMapName().equals(mapName)).collect(Collectors.toList());
assertEquals(1, tables.size());
PartitionedMapTable table = tables.get(0);
assertEquals(1, table.getIndexes().size());
MapTableIndex index = table.getIndexes().get(0);
assertEquals(indexName, index.getName());
assertEquals(indexType, index.getType());
// Components count depends on the index attribute count
assertEquals(composite ? 2 : 1, index.getComponentsCount());
int field1Ordinal = findFieldOrdinal(table, "field1");
int field2Ordinal = findFieldOrdinal(table, "field2");
// Check resolved field converter types. We do not test more than two components.
assertTrue(expectedFieldConverterTypes.size() <= 2);
assertEquals(expectedFieldConverterTypes, index.getFieldConverterTypes());
// Resolved field ordinals depend on the number of resolved converter types
if (expectedFieldConverterTypes.isEmpty()) {
assertTrue(index.getFieldOrdinals().isEmpty());
} else if (expectedFieldConverterTypes.size() == 1) {
assertEquals(singletonList(field1Ordinal), index.getFieldOrdinals());
} else {
assertEquals(Arrays.asList(field1Ordinal, field2Ordinal), index.getFieldOrdinals());
}
}
use of com.hazelcast.sql.impl.schema.map.PartitionedMapTable in project hazelcast by hazelcast.
the class IMapSqlConnector method fullScanReader.
@Nonnull
@Override
public Vertex fullScanReader(@Nonnull DAG dag, @Nonnull Table table0, @Nullable Expression<Boolean> filter, @Nonnull List<Expression<?>> projection, @Nullable FunctionEx<ExpressionEvalContext, EventTimePolicy<JetSqlRow>> eventTimePolicyProvider) {
if (eventTimePolicyProvider != null) {
throw QueryException.error("Ordering functions are not supported on top of " + TYPE_NAME + " mappings");
}
PartitionedMapTable table = (PartitionedMapTable) table0;
Vertex vStart = dag.newUniqueVertex(toString(table), SourceProcessors.readMapP(table.getMapName()));
Vertex vEnd = dag.newUniqueVertex("Project(" + toString(table) + ")", rowProjector(table.paths(), table.types(), table.getKeyDescriptor(), table.getValueDescriptor(), filter, projection));
dag.edge(Edge.from(vStart).to(vEnd).isolated());
return vEnd;
}
Aggregations