use of com.hazelcast.sql.impl.schema.SqlCatalog in project hazelcast by hazelcast.
the class SqlServiceImpl method prepare.
private SqlPlan prepare(String schema, String sql, List<Object> arguments, SqlExpectedResultType expectedResultType) {
List<List<String>> searchPaths = prepareSearchPaths(schema);
PlanKey planKey = new PlanKey(searchPaths, sql);
SqlPlan plan = planCache.get(planKey);
if (plan == null) {
SqlCatalog catalog = new SqlCatalog(optimizer.tableResolvers());
plan = optimizer.prepare(new OptimizationTask(sql, arguments, searchPaths, catalog));
if (plan.isCacheable()) {
planCache.put(planKey, plan);
}
}
checkReturnType(plan, expectedResultType);
return plan;
}
use of com.hazelcast.sql.impl.schema.SqlCatalog in project hazelcast by hazelcast.
the class ParserOperationsTest method createContext.
private static OptimizerContext createContext() {
PartitionedMapTable partitionedMapTable = new PartitionedMapTable("public", "t", "t", Arrays.asList(field("a"), field("b")), new ConstantTableStatistics(100L), null, null, null, null, null, false);
TableResolver resolver = TestTableResolver.create("public", partitionedMapTable);
List<TableResolver> tableResolvers = Collections.singletonList(resolver);
List<List<String>> searchPaths = QueryUtils.prepareSearchPaths(emptyList(), tableResolvers);
return OptimizerContext.create(new SqlCatalog(tableResolvers), searchPaths, emptyList(), 1, name -> null);
}
use of com.hazelcast.sql.impl.schema.SqlCatalog in project hazelcast by hazelcast.
the class ParserNameResolutionTest method createContext.
private static OptimizerContext createContext() {
PartitionedMapTable table1 = new PartitionedMapTable(SCHEMA_1, TABLE_1, TABLE_1, Arrays.asList(field(FIELD_1), field(FIELD_2)), new ConstantTableStatistics(100L), null, null, null, null, null, false);
PartitionedMapTable table2 = new PartitionedMapTable(SCHEMA_2, TABLE_2, TABLE_2, Arrays.asList(field(FIELD_1), field(FIELD_2)), new ConstantTableStatistics(100L), null, null, null, null, null, false);
TableResolver resolver1 = TestTableResolver.create(SCHEMA_1, table1);
TableResolver resolver2 = TestTableResolver.create(SCHEMA_2, table2);
List<TableResolver> tableResolvers = Arrays.asList(resolver1, resolver2);
List<List<String>> searchPaths = QueryUtils.prepareSearchPaths(emptyList(), tableResolvers);
return OptimizerContext.create(new SqlCatalog(tableResolvers), searchPaths, emptyList(), 1, name -> null);
}
use of com.hazelcast.sql.impl.schema.SqlCatalog in project hazelcast by hazelcast.
the class PlanCacheChecker method check.
public void check() {
if (planCache.size() == 0) {
return;
}
// Collect object IDs
SqlCatalog catalog = new SqlCatalog(tableResolvers);
Set<PlanObjectKey> objectKeys = new HashSet<>();
for (Map<String, Table> tableMap : catalog.getSchemas().values()) {
for (Table table : tableMap.values()) {
PlanObjectKey objectKey = table.getObjectKey();
if (objectKey != null) {
objectKeys.add(objectKey);
}
}
}
// Prepare partition distribution
Map<UUID, PartitionIdSet> partitions = QueryUtils.createPartitionMap(nodeEngine, null, false);
// Do check
planCache.check(new PlanCheckContext(objectKeys, partitions));
}
Aggregations