Search in sources :

Example 6 with PartitionedMapTable

use of com.hazelcast.sql.impl.schema.map.PartitionedMapTable in project hazelcast by hazelcast.

the class InsertMapPhysicalRel method entriesFn.

public Function<ExpressionEvalContext, List<Entry<Object, Object>>> entriesFn() {
    PartitionedMapTable table = table();
    ExpressionValues values = this.values;
    return evalContext -> {
        KvProjector projector = KvProjector.supplier(table.paths(), table.types(), (UpsertTargetDescriptor) table.getKeyJetMetadata(), (UpsertTargetDescriptor) table.getValueJetMetadata(), true).get(evalContext.getSerializationService());
        return values.toValues(evalContext).map(projector::project).collect(toList());
    };
}
Also used : RelOptCluster(org.apache.calcite.plan.RelOptCluster) RelDataType(org.apache.calcite.rel.type.RelDataType) SqlKind(org.apache.calcite.sql.SqlKind) PlanNodeSchema(com.hazelcast.sql.impl.plan.node.PlanNodeSchema) AbstractRelNode(org.apache.calcite.rel.AbstractRelNode) ExpressionValues(com.hazelcast.jet.sql.impl.opt.ExpressionValues) PlanObjectKey(com.hazelcast.sql.impl.optimizer.PlanObjectKey) RelNode(org.apache.calcite.rel.RelNode) RelOptUtil(org.apache.calcite.plan.RelOptUtil) Function(java.util.function.Function) QueryParameterMetadata(com.hazelcast.sql.impl.QueryParameterMetadata) RelOptTable(org.apache.calcite.plan.RelOptTable) RelWriter(org.apache.calcite.rel.RelWriter) Vertex(com.hazelcast.jet.core.Vertex) HazelcastTable(com.hazelcast.jet.sql.impl.schema.HazelcastTable) List(java.util.List) Collectors.toList(java.util.stream.Collectors.toList) ExpressionEvalContext(com.hazelcast.sql.impl.expression.ExpressionEvalContext) KvProjector(com.hazelcast.jet.sql.impl.connector.keyvalue.KvProjector) UpsertTargetDescriptor(com.hazelcast.jet.sql.impl.inject.UpsertTargetDescriptor) Entry(java.util.Map.Entry) PartitionedMapTable(com.hazelcast.sql.impl.schema.map.PartitionedMapTable) RelTraitSet(org.apache.calcite.plan.RelTraitSet) ExpressionValues(com.hazelcast.jet.sql.impl.opt.ExpressionValues) KvProjector(com.hazelcast.jet.sql.impl.connector.keyvalue.KvProjector) PartitionedMapTable(com.hazelcast.sql.impl.schema.map.PartitionedMapTable)

Example 7 with PartitionedMapTable

use of com.hazelcast.sql.impl.schema.map.PartitionedMapTable in project hazelcast by hazelcast.

the class ParserOperationsTest method createContext.

private static OptimizerContext createContext() {
    PartitionedMapTable partitionedMapTable = new PartitionedMapTable("public", "t", "t", Arrays.asList(field("a"), field("b")), new ConstantTableStatistics(100L), null, null, null, null, null, false);
    TableResolver resolver = TestTableResolver.create("public", partitionedMapTable);
    List<TableResolver> tableResolvers = Collections.singletonList(resolver);
    List<List<String>> searchPaths = QueryUtils.prepareSearchPaths(emptyList(), tableResolvers);
    return OptimizerContext.create(new SqlCatalog(tableResolvers), searchPaths, emptyList(), 1, name -> null);
}
Also used : SqlCatalog(com.hazelcast.sql.impl.schema.SqlCatalog) TableResolver(com.hazelcast.sql.impl.schema.TableResolver) TestTableResolver(com.hazelcast.jet.sql.impl.TestTableResolver) PartitionedMapTable(com.hazelcast.sql.impl.schema.map.PartitionedMapTable) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) ConstantTableStatistics(com.hazelcast.sql.impl.schema.ConstantTableStatistics)

Example 8 with PartitionedMapTable

use of com.hazelcast.sql.impl.schema.map.PartitionedMapTable in project hazelcast by hazelcast.

the class ParserNameResolutionTest method createContext.

private static OptimizerContext createContext() {
    PartitionedMapTable table1 = new PartitionedMapTable(SCHEMA_1, TABLE_1, TABLE_1, Arrays.asList(field(FIELD_1), field(FIELD_2)), new ConstantTableStatistics(100L), null, null, null, null, null, false);
    PartitionedMapTable table2 = new PartitionedMapTable(SCHEMA_2, TABLE_2, TABLE_2, Arrays.asList(field(FIELD_1), field(FIELD_2)), new ConstantTableStatistics(100L), null, null, null, null, null, false);
    TableResolver resolver1 = TestTableResolver.create(SCHEMA_1, table1);
    TableResolver resolver2 = TestTableResolver.create(SCHEMA_2, table2);
    List<TableResolver> tableResolvers = Arrays.asList(resolver1, resolver2);
    List<List<String>> searchPaths = QueryUtils.prepareSearchPaths(emptyList(), tableResolvers);
    return OptimizerContext.create(new SqlCatalog(tableResolvers), searchPaths, emptyList(), 1, name -> null);
}
Also used : SqlCatalog(com.hazelcast.sql.impl.schema.SqlCatalog) TableResolver(com.hazelcast.sql.impl.schema.TableResolver) TestTableResolver(com.hazelcast.jet.sql.impl.TestTableResolver) PartitionedMapTable(com.hazelcast.sql.impl.schema.map.PartitionedMapTable) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) SqlNodeList(org.apache.calcite.sql.SqlNodeList) ConstantTableStatistics(com.hazelcast.sql.impl.schema.ConstantTableStatistics)

Example 9 with PartitionedMapTable

use of com.hazelcast.sql.impl.schema.map.PartitionedMapTable in project hazelcast by hazelcast.

the class SqlIndexResolutionTest method checkIndex.

private void checkIndex(IMap<?, ?> map, List<QueryDataType> expectedFieldConverterTypes) {
    String mapName = map.getName();
    List<PartitionedMapTable> tables = resolver.getTables().stream().filter(t -> t instanceof PartitionedMapTable).map(t -> (PartitionedMapTable) t).filter(t -> t.getMapName().equals(mapName)).collect(Collectors.toList());
    assertEquals(1, tables.size());
    PartitionedMapTable table = tables.get(0);
    assertEquals(1, table.getIndexes().size());
    MapTableIndex index = table.getIndexes().get(0);
    assertEquals(indexName, index.getName());
    assertEquals(indexType, index.getType());
    // Components count depends on the index attribute count
    assertEquals(composite ? 2 : 1, index.getComponentsCount());
    int field1Ordinal = findFieldOrdinal(table, "field1");
    int field2Ordinal = findFieldOrdinal(table, "field2");
    // Check resolved field converter types. We do not test more than two components.
    assertTrue(expectedFieldConverterTypes.size() <= 2);
    assertEquals(expectedFieldConverterTypes, index.getFieldConverterTypes());
    // Resolved field ordinals depend on the number of resolved converter types
    if (expectedFieldConverterTypes.isEmpty()) {
        assertTrue(index.getFieldOrdinals().isEmpty());
    } else if (expectedFieldConverterTypes.size() == 1) {
        assertEquals(singletonList(field1Ordinal), index.getFieldOrdinals());
    } else {
        assertEquals(Arrays.asList(field1Ordinal, field2Ordinal), index.getFieldOrdinals());
    }
}
Also used : ParallelJVMTest(com.hazelcast.test.annotation.ParallelJVMTest) Arrays(java.util.Arrays) BeforeClass(org.junit.BeforeClass) QuickTest(com.hazelcast.test.annotation.QuickTest) SqlStatement(com.hazelcast.sql.SqlStatement) QueryDataType(com.hazelcast.sql.impl.type.QueryDataType) RunWith(org.junit.runner.RunWith) ExpressionBiValue(com.hazelcast.jet.sql.impl.support.expressions.ExpressionBiValue) SqlConnectorCache(com.hazelcast.jet.sql.impl.connector.SqlConnectorCache) ArrayList(java.util.ArrayList) Collections.singletonList(java.util.Collections.singletonList) HazelcastTable(com.hazelcast.jet.sql.impl.schema.HazelcastTable) ExpressionBiValue.createBiValue(com.hazelcast.jet.sql.impl.support.expressions.ExpressionBiValue.createBiValue) IndexType(com.hazelcast.config.IndexType) OptimizerTestSupport(com.hazelcast.jet.sql.impl.opt.OptimizerTestSupport) Arrays.asList(java.util.Arrays.asList) QueryPath(com.hazelcast.sql.impl.extract.QueryPath) Assert.fail(org.junit.Assert.fail) PartitionedMapTable(com.hazelcast.sql.impl.schema.map.PartitionedMapTable) Parameterized(org.junit.runners.Parameterized) Before(org.junit.Before) UseParametersRunnerFactory(org.junit.runners.Parameterized.UseParametersRunnerFactory) NodeEngine(com.hazelcast.spi.impl.NodeEngine) HazelcastParametrizedRunner(com.hazelcast.test.HazelcastParametrizedRunner) Assert.assertNotNull(org.junit.Assert.assertNotNull) Collection(java.util.Collection) TableResolverImpl(com.hazelcast.jet.sql.impl.schema.TableResolverImpl) MapTableUtils.getPartitionedMapIndexes(com.hazelcast.sql.impl.schema.map.MapTableUtils.getPartitionedMapIndexes) Assert.assertTrue(org.junit.Assert.assertTrue) HazelcastParallelParametersRunnerFactory(com.hazelcast.test.HazelcastParallelParametersRunnerFactory) Test(org.junit.Test) ExpressionBiValue.createBiClass(com.hazelcast.jet.sql.impl.support.expressions.ExpressionBiValue.createBiClass) IndexScanMapPhysicalRel(com.hazelcast.jet.sql.impl.opt.physical.IndexScanMapPhysicalRel) Category(org.junit.experimental.categories.Category) FullScanPhysicalRel(com.hazelcast.jet.sql.impl.opt.physical.FullScanPhysicalRel) Collectors(java.util.stream.Collectors) TableField(com.hazelcast.sql.impl.schema.TableField) TableResolver(com.hazelcast.sql.impl.schema.TableResolver) List(java.util.List) Assert.assertNull(org.junit.Assert.assertNull) Util.getNodeEngine(com.hazelcast.jet.impl.util.Util.getNodeEngine) FullScanLogicalRel(com.hazelcast.jet.sql.impl.opt.logical.FullScanLogicalRel) MapTableField(com.hazelcast.sql.impl.schema.map.MapTableField) MapTableIndex(com.hazelcast.sql.impl.schema.map.MapTableIndex) ExpressionType(com.hazelcast.jet.sql.impl.support.expressions.ExpressionType) TablesStorage(com.hazelcast.jet.sql.impl.schema.TablesStorage) Assert.assertEquals(org.junit.Assert.assertEquals) IMap(com.hazelcast.map.IMap) MapTableIndex(com.hazelcast.sql.impl.schema.map.MapTableIndex) PartitionedMapTable(com.hazelcast.sql.impl.schema.map.PartitionedMapTable)

Example 10 with PartitionedMapTable

use of com.hazelcast.sql.impl.schema.map.PartitionedMapTable in project hazelcast by hazelcast.

the class IMapSqlConnector method fullScanReader.

@Nonnull
@Override
public Vertex fullScanReader(@Nonnull DAG dag, @Nonnull Table table0, @Nullable Expression<Boolean> filter, @Nonnull List<Expression<?>> projection, @Nullable FunctionEx<ExpressionEvalContext, EventTimePolicy<JetSqlRow>> eventTimePolicyProvider) {
    if (eventTimePolicyProvider != null) {
        throw QueryException.error("Ordering functions are not supported on top of " + TYPE_NAME + " mappings");
    }
    PartitionedMapTable table = (PartitionedMapTable) table0;
    Vertex vStart = dag.newUniqueVertex(toString(table), SourceProcessors.readMapP(table.getMapName()));
    Vertex vEnd = dag.newUniqueVertex("Project(" + toString(table) + ")", rowProjector(table.paths(), table.types(), table.getKeyDescriptor(), table.getValueDescriptor(), filter, projection));
    dag.edge(Edge.from(vStart).to(vEnd).isolated());
    return vEnd;
}
Also used : Vertex(com.hazelcast.jet.core.Vertex) PartitionedMapTable(com.hazelcast.sql.impl.schema.map.PartitionedMapTable) Nonnull(javax.annotation.Nonnull)

Aggregations

PartitionedMapTable (com.hazelcast.sql.impl.schema.map.PartitionedMapTable)12 Vertex (com.hazelcast.jet.core.Vertex)6 Nonnull (javax.annotation.Nonnull)6 List (java.util.List)5 UpsertTargetDescriptor (com.hazelcast.jet.sql.impl.inject.UpsertTargetDescriptor)4 HazelcastTable (com.hazelcast.jet.sql.impl.schema.HazelcastTable)3 ConstantTableStatistics (com.hazelcast.sql.impl.schema.ConstantTableStatistics)3 TableResolver (com.hazelcast.sql.impl.schema.TableResolver)3 RelNode (org.apache.calcite.rel.RelNode)3 TestTableResolver (com.hazelcast.jet.sql.impl.TestTableResolver)2 KvProjector (com.hazelcast.jet.sql.impl.connector.keyvalue.KvProjector)2 ExpressionValues (com.hazelcast.jet.sql.impl.opt.ExpressionValues)2 FullScanLogicalRel (com.hazelcast.jet.sql.impl.opt.logical.FullScanLogicalRel)2 QueryParameterMetadata (com.hazelcast.sql.impl.QueryParameterMetadata)2 ExpressionEvalContext (com.hazelcast.sql.impl.expression.ExpressionEvalContext)2 PlanObjectKey (com.hazelcast.sql.impl.optimizer.PlanObjectKey)2 PlanNodeSchema (com.hazelcast.sql.impl.plan.node.PlanNodeSchema)2 SqlCatalog (com.hazelcast.sql.impl.schema.SqlCatalog)2 TableField (com.hazelcast.sql.impl.schema.TableField)2 MapTableIndex (com.hazelcast.sql.impl.schema.map.MapTableIndex)2