use of org.apache.commons.lang3.tuple.Pair in project asterixdb by apache.
the class RemoveLeftOuterUnnestForLeftOuterJoinRule method checkNestedPlan.
// Checks the nested plan for the group-by operator.
private Pair<Boolean, ILogicalOperator> checkNestedPlan(GroupByOperator gbyOperator) {
List<ILogicalPlan> nestedPlans = gbyOperator.getNestedPlans();
if (nestedPlans.size() > 1) {
return new Pair<>(false, null);
}
ILogicalPlan plan = nestedPlans.get(0);
List<Mutable<ILogicalOperator>> roots = plan.getRoots();
if (roots.size() > 1) {
return new Pair<>(false, null);
}
ILogicalOperator root = roots.get(0).getValue();
return new Pair<>(true, root);
}
use of org.apache.commons.lang3.tuple.Pair in project asterixdb by apache.
the class ExternalGroupByPOperator method computeColumnSet.
public void computeColumnSet(List<Pair<LogicalVariable, Mutable<ILogicalExpression>>> gbyList) {
columnSet.clear();
for (Pair<LogicalVariable, Mutable<ILogicalExpression>> p : gbyList) {
ILogicalExpression expr = p.second.getValue();
if (expr.getExpressionTag() == LogicalExpressionTag.VARIABLE) {
VariableReferenceExpression v = (VariableReferenceExpression) expr;
columnSet.add(v.getVariableReference());
}
}
}
use of org.apache.commons.lang3.tuple.Pair in project asterixdb by apache.
the class PartitionMatchMaker method registerPartitionDescriptor.
public List<Pair<PartitionDescriptor, PartitionRequest>> registerPartitionDescriptor(PartitionDescriptor partitionDescriptor) {
List<Pair<PartitionDescriptor, PartitionRequest>> matches = new ArrayList<Pair<PartitionDescriptor, PartitionRequest>>();
PartitionId pid = partitionDescriptor.getPartitionId();
boolean matched = false;
List<PartitionRequest> requests = partitionRequests.get(pid);
if (requests != null) {
Iterator<PartitionRequest> i = requests.iterator();
while (i.hasNext()) {
PartitionRequest req = i.next();
if (partitionDescriptor.getState().isAtLeast(req.getMinimumState())) {
matches.add(Pair.<PartitionDescriptor, PartitionRequest>of(partitionDescriptor, req));
i.remove();
matched = true;
if (!partitionDescriptor.isReusable()) {
break;
}
}
}
if (requests.isEmpty()) {
partitionRequests.remove(pid);
}
}
if (!matched) {
List<PartitionDescriptor> descriptors = partitionDescriptors.get(pid);
if (descriptors == null) {
descriptors = new ArrayList<PartitionDescriptor>();
partitionDescriptors.put(pid, descriptors);
}
descriptors.add(partitionDescriptor);
}
return matches;
}
use of org.apache.commons.lang3.tuple.Pair in project asterixdb by apache.
the class FramewriterTest method createWriters.
/**
* @return a list of writers to test. these writers can be of the same type but behave differently based on included mocks
* @throws HyracksDataException
* @throws IndexException
*/
public IFrameWriter[] createWriters() throws HyracksDataException {
ArrayList<BTreeSearchOperatorNodePushable> writers = new ArrayList<>();
Pair<IIndexDataflowHelperFactory, ISearchOperationCallbackFactory>[] pairs = pairs();
IRecordDescriptorProvider[] recordDescProviders = mockRecDescProviders();
int partition = 0;
IHyracksTaskContext[] ctxs = mockIHyracksTaskContext();
int[] keys = { 0 };
boolean lowKeyInclusive = true;
boolean highKeyInclusive = true;
for (Pair<IIndexDataflowHelperFactory, ISearchOperationCallbackFactory> pair : pairs) {
for (IRecordDescriptorProvider recordDescProvider : recordDescProviders) {
for (IHyracksTaskContext ctx : ctxs) {
BTreeSearchOperatorNodePushable writer = new BTreeSearchOperatorNodePushable(ctx, partition, recordDescProvider.getInputRecordDescriptor(new ActivityId(new OperatorDescriptorId(0), 0), 0), keys, keys, lowKeyInclusive, highKeyInclusive, keys, keys, pair.getLeft(), false, false, null, pair.getRight(), false);
writers.add(writer);
}
}
}
// Create the framewriter using the mocks
return writers.toArray(new IFrameWriter[writers.size()]);
}
use of org.apache.commons.lang3.tuple.Pair in project beam by apache.
the class TranslationContext method populateDAG.
public void populateDAG(DAG dag) {
for (Map.Entry<String, Operator> nameAndOperator : this.operators.entrySet()) {
dag.addOperator(nameAndOperator.getKey(), nameAndOperator.getValue());
}
int streamIndex = 0;
for (Map.Entry<PCollection, Pair<OutputPortInfo, List<InputPortInfo>>> streamEntry : this.streams.entrySet()) {
List<InputPortInfo> destInfo = streamEntry.getValue().getRight();
InputPort[] sinks = new InputPort[destInfo.size()];
for (int i = 0; i < sinks.length; i++) {
sinks[i] = destInfo.get(i).port;
}
if (sinks.length > 0) {
DAG.StreamMeta streamMeta = dag.addStream("stream" + streamIndex++, streamEntry.getValue().getLeft().port, sinks);
if (pipelineOptions.isParDoFusionEnabled()) {
optimizeStreams(streamMeta, streamEntry);
}
for (InputPort port : sinks) {
PCollection pc = streamEntry.getKey();
Coder coder = pc.getCoder();
if (pc.getWindowingStrategy() != null) {
coder = FullWindowedValueCoder.of(pc.getCoder(), pc.getWindowingStrategy().getWindowFn().windowCoder());
}
Coder<Object> wrapperCoder = ApexStreamTuple.ApexStreamTupleCoder.of(coder);
CoderAdapterStreamCodec streamCodec = new CoderAdapterStreamCodec(wrapperCoder);
dag.setInputPortAttribute(port, PortContext.STREAM_CODEC, streamCodec);
}
}
}
}
Aggregations