use of org.apache.beam.repackaged.core.org.apache.commons.lang3.tuple.Pair in project asterixdb by apache.
the class CodeGenUtil method generateScalarFunctionDescriptorBinary.
/**
* Generates the byte code for a scalar function descriptor.
*
* @param packagePrefix,
* the prefix of evaluators for code generation.
* @param originalFuncDescriptorClassName,
* the original class name of the function descriptor.
* @param suffixForGeneratedClass,
* the suffix for the generated class.
* @param action,
* the customized action for the generated class definition bytes.
* @throws IOException
* @throws ClassNotFoundException
*/
public static List<Pair<String, String>> generateScalarFunctionDescriptorBinary(String packagePrefix, String originalFuncDescriptorClassName, String suffixForGeneratedClass, ClassLoader classLoader, ClassByteCodeAction action) throws IOException, ClassNotFoundException {
originalFuncDescriptorClassName = toInternalClassName(originalFuncDescriptorClassName);
if (originalFuncDescriptorClassName.equals(DESCRIPTOR_SUPER_CLASS_NAME)) {
return Collections.emptyList();
}
String targetFuncDescriptorClassName = getGeneratedFunctionDescriptorInternalClassName(originalFuncDescriptorClassName, suffixForGeneratedClass);
// Adds the mapping of the old/new names of the function descriptor.
List<Pair<String, String>> nameMappings = new ArrayList<>();
// Generates code for super classes except java.lang.Object.
Class<?> evaluatorClass = CodeGenUtil.class.getClassLoader().loadClass(toJdkStandardName(originalFuncDescriptorClassName));
nameMappings.addAll(generateScalarFunctionDescriptorBinary(packagePrefix, evaluatorClass.getSuperclass().getName(), suffixForGeneratedClass, classLoader, action));
nameMappings.add(Pair.of(originalFuncDescriptorClassName, targetFuncDescriptorClassName));
nameMappings.add(Pair.of(toJdkStandardName(originalFuncDescriptorClassName), toJdkStandardName(targetFuncDescriptorClassName)));
// Gathers evaluator factory classes that are created in the function descriptor.
ClassReader reader = new ClassReader(getResourceStream(originalFuncDescriptorClassName, classLoader));
GatherEvaluatorFactoryCreationVisitor evalFactoryCreationVisitor = new GatherEvaluatorFactoryCreationVisitor(toInternalClassName(packagePrefix));
reader.accept(evalFactoryCreationVisitor, 0);
Set<String> evaluatorFactoryClassNames = evalFactoryCreationVisitor.getCreatedEvaluatorFactoryClassNames();
// Generates inner classes other than evaluator factories.
generateNonEvalInnerClasses(reader, evaluatorFactoryClassNames, nameMappings, suffixForGeneratedClass, classLoader, action);
// Generates evaluator factories that are created in the function descriptor.
int evalFactoryCounter = 0;
for (String evaluateFactoryClassName : evaluatorFactoryClassNames) {
generateEvaluatorFactoryClassBinary(packagePrefix, evaluateFactoryClassName, suffixForGeneratedClass, evalFactoryCounter++, nameMappings, classLoader, action);
}
// Transforms the function descriptor class and outputs the generated class binary.
ClassWriter writer = new ClassWriter(reader, 0);
RenameClassVisitor renamingVisitor = new RenameClassVisitor(writer, nameMappings);
reader.accept(renamingVisitor, 0);
action.runAction(targetFuncDescriptorClassName, writer.toByteArray());
return nameMappings;
}
use of org.apache.beam.repackaged.core.org.apache.commons.lang3.tuple.Pair in project incubator-systemml by apache.
the class PlanSelectionFuseCostBased method rGetPlanCosts.
private static double rGetPlanCosts(CPlanMemoTable memo, Hop current, HashSet<Pair<Long, Long>> visited, HashSet<Long> partition, ArrayList<Long> M, boolean[] plan, HashMap<Long, Double> computeCosts, CostVector costsCurrent, TemplateType currentType) {
//memoization per hop id and cost vector to account for redundant
//computation without double counting materialized results or compute
//costs of complex operation DAGs within a single fused operator
Pair<Long, Long> tag = Pair.of(current.getHopID(), (costsCurrent == null) ? 0 : costsCurrent.ID);
if (visited.contains(tag))
return 0;
visited.add(tag);
//open template if necessary, including memoization
//under awareness of current plan choice
MemoTableEntry best = null;
boolean opened = false;
if (memo.contains(current.getHopID())) {
if (currentType == null) {
best = memo.get(current.getHopID()).stream().filter(p -> isValid(p, current)).filter(p -> hasNoRefToMaterialization(p, M, plan)).min(new BasicPlanComparator()).orElse(null);
opened = true;
} else {
best = memo.get(current.getHopID()).stream().filter(p -> p.type == currentType || p.type == TemplateType.CellTpl).filter(p -> hasNoRefToMaterialization(p, M, plan)).min(Comparator.comparing(p -> 7 - ((p.type == currentType) ? 4 : 0) - p.countPlanRefs())).orElse(null);
}
}
//create new cost vector if opened, initialized with write costs
CostVector costVect = !opened ? costsCurrent : new CostVector(Math.max(current.getDim1(), 1) * Math.max(current.getDim2(), 1));
//add compute costs of current operator to costs vector
if (partition.contains(current.getHopID()))
costVect.computeCosts += computeCosts.get(current.getHopID());
//process children recursively
double costs = 0;
for (int i = 0; i < current.getInput().size(); i++) {
Hop c = current.getInput().get(i);
if (best != null && best.isPlanRef(i))
costs += rGetPlanCosts(memo, c, visited, partition, M, plan, computeCosts, costVect, best.type);
else if (best != null && isImplicitlyFused(current, i, best.type))
costVect.addInputSize(c.getInput().get(0).getHopID(), Math.max(c.getDim1(), 1) * Math.max(c.getDim2(), 1));
else {
//include children and I/O costs
costs += rGetPlanCosts(memo, c, visited, partition, M, plan, computeCosts, null, null);
if (costVect != null && c.getDataType().isMatrix())
costVect.addInputSize(c.getHopID(), Math.max(c.getDim1(), 1) * Math.max(c.getDim2(), 1));
}
}
//add costs for opened fused operator
if (partition.contains(current.getHopID())) {
if (opened) {
if (LOG.isTraceEnabled())
LOG.trace("Cost vector for fused operator (hop " + current.getHopID() + "): " + costVect);
//time for output write
costs += costVect.outSize * 8 / WRITE_BANDWIDTH;
costs += Math.max(costVect.computeCosts * costVect.getMaxInputSize() / COMPUTE_BANDWIDTH, costVect.getSumInputSizes() * 8 / READ_BANDWIDTH);
} else //add costs for non-partition read in the middle of fused operator
if (hasNonPartitionConsumer(current, partition)) {
costs += rGetPlanCosts(memo, current, visited, partition, M, plan, computeCosts, null, null);
}
}
//sanity check non-negative costs
if (costs < 0 || Double.isNaN(costs) || Double.isInfinite(costs))
throw new RuntimeException("Wrong cost estimate: " + costs);
return costs;
}
use of org.apache.beam.repackaged.core.org.apache.commons.lang3.tuple.Pair in project asterixdb by apache.
the class FramewriterTest method createWriters.
/**
* @return a list of writers to test. these writers can be of the same type but behave differently based on included mocks
* @throws HyracksDataException
* @throws IndexException
*/
public IFrameWriter[] createWriters() throws HyracksDataException {
ArrayList<BTreeSearchOperatorNodePushable> writers = new ArrayList<>();
Pair<IIndexDataflowHelperFactory, ISearchOperationCallbackFactory>[] pairs = pairs();
IRecordDescriptorProvider[] recordDescProviders = mockRecDescProviders();
int partition = 0;
IHyracksTaskContext[] ctxs = mockIHyracksTaskContext();
int[] keys = { 0 };
boolean lowKeyInclusive = true;
boolean highKeyInclusive = true;
for (Pair<IIndexDataflowHelperFactory, ISearchOperationCallbackFactory> pair : pairs) {
for (IRecordDescriptorProvider recordDescProvider : recordDescProviders) {
for (IHyracksTaskContext ctx : ctxs) {
BTreeSearchOperatorNodePushable writer = new BTreeSearchOperatorNodePushable(ctx, partition, recordDescProvider.getInputRecordDescriptor(new ActivityId(new OperatorDescriptorId(0), 0), 0), keys, keys, lowKeyInclusive, highKeyInclusive, keys, keys, pair.getLeft(), false, false, null, pair.getRight(), false);
writers.add(writer);
}
}
}
// Create the framewriter using the mocks
return writers.toArray(new IFrameWriter[writers.size()]);
}
use of org.apache.beam.repackaged.core.org.apache.commons.lang3.tuple.Pair in project beam by apache.
the class TranslationContext method populateDAG.
public void populateDAG(DAG dag) {
for (Map.Entry<String, Operator> nameAndOperator : this.operators.entrySet()) {
dag.addOperator(nameAndOperator.getKey(), nameAndOperator.getValue());
}
int streamIndex = 0;
for (Map.Entry<PCollection, Pair<OutputPortInfo, List<InputPortInfo>>> streamEntry : this.streams.entrySet()) {
List<InputPortInfo> destInfo = streamEntry.getValue().getRight();
InputPort[] sinks = new InputPort[destInfo.size()];
for (int i = 0; i < sinks.length; i++) {
sinks[i] = destInfo.get(i).port;
}
if (sinks.length > 0) {
DAG.StreamMeta streamMeta = dag.addStream("stream" + streamIndex++, streamEntry.getValue().getLeft().port, sinks);
if (pipelineOptions.isParDoFusionEnabled()) {
optimizeStreams(streamMeta, streamEntry);
}
for (InputPort port : sinks) {
PCollection pc = streamEntry.getKey();
Coder coder = pc.getCoder();
if (pc.getWindowingStrategy() != null) {
coder = FullWindowedValueCoder.of(pc.getCoder(), pc.getWindowingStrategy().getWindowFn().windowCoder());
}
Coder<Object> wrapperCoder = ApexStreamTuple.ApexStreamTupleCoder.of(coder);
CoderAdapterStreamCodec streamCodec = new CoderAdapterStreamCodec(wrapperCoder);
dag.setInputPortAttribute(port, PortContext.STREAM_CODEC, streamCodec);
}
}
}
}
use of org.apache.beam.repackaged.core.org.apache.commons.lang3.tuple.Pair in project disunity by ata4.
the class BundleInfo method buildHeaderTable.
private Table<Integer, Integer, Object> buildHeaderTable(BundleHeader header) {
TableBuilder table = new TableBuilder();
table.row("Field", "Value");
table.row("signature", header.signature());
table.row("streamVersion", header.streamVersion());
table.row("unityVersion", header.unityVersion());
table.row("unityRevision", header.unityRevision());
table.row("minimumStreamedBytes", header.minimumStreamedBytes());
table.row("headerSize", header.headerSize());
table.row("numberOfLevelsToDownload", header.numberOfLevelsToDownload());
table.row("numberOfLevels", header.numberOfLevels());
List<Pair<Long, Long>> levelByteEnds = header.levelByteEnd();
for (int i = 0; i < levelByteEnds.size(); i++) {
Pair<Long, Long> levelByteEnd = levelByteEnds.get(i);
table.row("levelByteEnd[" + i + "][0]", levelByteEnd.getLeft());
table.row("levelByteEnd[" + i + "][1]", levelByteEnd.getRight());
}
if (header.streamVersion() >= 2) {
table.row("completeFileSize", header.completeFileSize());
}
if (header.streamVersion() >= 3) {
table.row("dataHeaderSize", header.dataHeaderSize());
}
return table.get();
}
Aggregations