use of com.hazelcast.sql.impl.row.JetSqlRow in project hazelcast by hazelcast.
the class MapIndexScanPTest method test_whenFilterExistsWithoutSpecificProjection_sorted.
@Test
public void test_whenFilterExistsWithoutSpecificProjection_sorted() {
List<JetSqlRow> expected = new ArrayList<>();
for (int i = count; i > 0; i--) {
map.put(i, new Person("value-" + i, i));
if (i > count / 2) {
expected.add(jetRow((count - i + 1), "value-" + (count - i + 1), (count - i + 1)));
}
}
IndexConfig indexConfig = new IndexConfig(IndexType.SORTED, "age").setName(randomName());
map.addIndex(indexConfig);
IndexFilter filter = new IndexRangeFilter(intValue(0), true, intValue(count / 2), true);
MapIndexScanMetadata metadata = metadata(indexConfig.getName(), filter, 2, false);
TestSupport.verifyProcessor(adaptSupplier(MapIndexScanP.readMapIndexSupplier(metadata))).hazelcastInstance(instance()).jobConfig(new JobConfig().setArgument(SQL_ARGUMENTS_KEY_NAME, emptyList())).outputChecker(LENIENT_SAME_ITEMS_IN_ORDER).disableSnapshots().disableProgressAssertion().expectOutput(expected);
}
use of com.hazelcast.sql.impl.row.JetSqlRow in project hazelcast by hazelcast.
the class PlanExecutor method execute.
SqlResult execute(ExplainStatementPlan plan) {
Stream<String> planRows;
SqlRowMetadata metadata = new SqlRowMetadata(singletonList(new SqlColumnMetadata("rel", VARCHAR, false)));
InternalSerializationService serializationService = Util.getSerializationService(hazelcastInstance);
planRows = Arrays.stream(plan.getRel().explain().split(LE));
return new SqlResultImpl(QueryId.create(hazelcastInstance.getLocalEndpoint().getUuid()), new StaticQueryResultProducerImpl(planRows.map(rel -> new JetSqlRow(serializationService, new Object[] { rel })).iterator()), metadata, false);
}
use of com.hazelcast.sql.impl.row.JetSqlRow in project hazelcast by hazelcast.
the class PlanExecutor method execute.
SqlResult execute(IMapSelectPlan plan, QueryId queryId, List<Object> arguments, long timeout) {
List<Object> args = prepareArguments(plan.parameterMetadata(), arguments);
InternalSerializationService serializationService = Util.getSerializationService(hazelcastInstance);
ExpressionEvalContext evalContext = new ExpressionEvalContext(args, serializationService);
Object key = plan.keyCondition().eval(EmptyRow.INSTANCE, evalContext);
CompletableFuture<JetSqlRow> future = hazelcastInstance.getMap(plan.mapName()).getAsync(key).toCompletableFuture().thenApply(value -> plan.rowProjectorSupplier().get(evalContext, Extractors.newBuilder(serializationService).build()).project(key, value));
JetSqlRow row = await(future, timeout);
return new SqlResultImpl(queryId, new StaticQueryResultProducerImpl(row), plan.rowMetadata(), false);
}
use of com.hazelcast.sql.impl.row.JetSqlRow in project hazelcast by hazelcast.
the class QueryResultProducerImpl method consume.
public void consume(Inbox inbox) {
ensureNotDone();
if (limit <= 0) {
done.compareAndSet(null, new ResultLimitReachedException());
ensureNotDone();
}
while (offset > 0 && inbox.poll() != null) {
offset--;
}
for (JetSqlRow row; (row = (JetSqlRow) inbox.peek()) != null && rows.offer(row); ) {
inbox.remove();
if (limit != Long.MAX_VALUE) {
limit -= 1;
if (limit < 1) {
done.compareAndSet(null, new ResultLimitReachedException());
ensureNotDone();
}
}
}
}
use of com.hazelcast.sql.impl.row.JetSqlRow in project hazelcast by hazelcast.
the class CreateDagVisitor method onCombine.
public Vertex onCombine(AggregateCombinePhysicalRel rel) {
AggregateOperation<?, JetSqlRow> aggregateOperation = rel.aggrOp();
Vertex vertex = dag.newUniqueVertex("Combine", ProcessorMetaSupplier.forceTotalParallelismOne(ProcessorSupplier.of(Processors.combineP(aggregateOperation)), localMemberAddress));
connectInput(rel.getInput(), vertex, edge -> edge.distributeTo(localMemberAddress).allToOne(""));
return vertex;
}
Aggregations