use of com.jopdesign.common.code.ExecutionContext in project jop by jop-devel.
the class WCETAnalysis method exploreCacheAnalysis.
/**
* @param mca
* @param iter
* @throws InvalidFlowFactException
*/
@SuppressWarnings("unused")
private void exploreCacheAnalysis() throws InvalidFlowFactException {
// Segment Cache Analysis: Experiments
MethodCacheAnalysis mca = new MethodCacheAnalysis(wcetTool);
/* iterate top down the scope graph (currently: the call graph) */
TopologicalOrderIterator<ExecutionContext, ContextEdge> iter = wcetTool.getCallGraph().reverseTopologicalOrder();
LpSolveWrapper.resetSolverTime();
long blocks = 0;
long start = System.nanoTime();
while (iter.hasNext()) {
ExecutionContext scope = iter.next();
Segment segment = Segment.methodSegment(scope.getMethodInfo(), scope.getCallString(), wcetTool, wcetTool.getCallstringLength(), wcetTool);
int availBlocks = wcetTool.getWCETProcessorModel().getMethodCache().getNumBlocks();
long total, distinctApprox = -1, distinct = -1;
blocks = total = mca.countDistinctBlocksUsed(segment);
if (total > availBlocks || true) {
try {
blocks = distinctApprox = mca.countDistinctBlocksAccessed(segment, false);
if (blocks > availBlocks && blocks < availBlocks * 2 || true) {
blocks = distinct = mca.countDistinctBlocksAccessed(segment, true);
}
} catch (LpSolveException e) {
System.err.println((distinctApprox >= 0 ? "I" : "Relaxed ") + "LP Problem too difficult, giving up: " + e);
}
}
System.out.println(String.format("block-count < %2d [%2d,%2d,%2d] for %-30s @ %s", blocks, total, distinctApprox, distinct, scope.getMethodInfo().getFQMethodName(), scope.getCallString().toStringVerbose(false)));
}
long stop = System.nanoTime();
reportSpecial("block-count", WcetCost.totalCost(blocks), start, stop, LpSolveWrapper.getSolverTime());
System.out.println("solver-time: " + LpSolveWrapper.getSolverTime());
}
use of com.jopdesign.common.code.ExecutionContext in project jop by jop-devel.
the class PhaseExecutor method dumpCallgraph.
/////////////////////////////////////////////////////////////////////////////////////
// Dump Callgraph
/////////////////////////////////////////////////////////////////////////////////////
public void dumpCallgraph(String graphName) {
if (getDebugConfig().getOption(DUMP_CALLGRAPH) == CallGraph.DUMPTYPE.off && getDebugConfig().getOption(DUMP_JVM_CALLGRAPH) == CallGraph.DUMPTYPE.off) {
return;
}
try {
// Dumping the full graph is a bit much, we split it into several graphs
Set<ExecutionContext> appRoots = new LinkedHashSet<ExecutionContext>();
Set<ExecutionContext> jvmRoots = new LinkedHashSet<ExecutionContext>();
Set<ExecutionContext> clinitRoots = new LinkedHashSet<ExecutionContext>();
Set<String> jvmClasses = new LinkedHashSet<String>();
if (appInfo.getProcessorModel() != null) {
jvmClasses.addAll(appInfo.getProcessorModel().getJVMClasses());
jvmClasses.addAll(appInfo.getProcessorModel().getNativeClasses());
}
CallGraph graph = appInfo.getCallGraph();
for (ExecutionContext ctx : graph.getRootNodes()) {
if (ctx.getMethodInfo().getMethodSignature().equals(ClinitOrder.clinitSig)) {
clinitRoots.add(ctx);
} else if (jvmClasses.contains(ctx.getMethodInfo().getClassName())) {
jvmRoots.add(ctx);
} else if (appInfo.isJVMThread(ctx.getMethodInfo().getClassInfo())) {
// This is to add Runnables like Scheduler and RtThread to the JVM classes.
jvmRoots.add(ctx);
} else {
appRoots.add(ctx);
}
}
OptionGroup debug = getDebugConfig();
// TODO to keep the CG size down, we could add options to exclude methods (like '<init>') or packages
// from dumping and skip dumping methods reachable only over excluded methods
graph.dumpCallgraph(getConfig(), graphName, "app", appRoots, debug.getOption(DUMP_CALLGRAPH), false);
graph.dumpCallgraph(getConfig(), graphName, "clinit", clinitRoots, debug.getOption(DUMP_CALLGRAPH), false);
graph.dumpCallgraph(getConfig(), graphName, "jvm", jvmRoots, debug.getOption(DUMP_JVM_CALLGRAPH), !debug.getOption(DUMP_NOIM_CALLS));
} catch (IOException e) {
throw new AppInfoError("Unable to export to .dot file", e);
}
}
use of com.jopdesign.common.code.ExecutionContext in project jop by jop-devel.
the class ExecFrequencyAnalysis method inline.
/**
* Update the execution frequencies after inlining.
* This must be called after the underlying callgraph has been updated!
*
* @param invokeSite the inlined invokesite.
* @param invokee the inlined method.
* @param newInvokeSites the set of new invokesites in the invoker
*/
public void inline(InvokeSite invokeSite, MethodInfo invokee, Set<InvokeSite> newInvokeSites) {
List<ExecutionContext> queue = new ArrayList<ExecutionContext>();
for (ExecutionContext context : callGraph.getNodes(invokeSite.getInvoker())) {
for (ExecutionContext child : callGraph.getChildren(context)) {
if (child.getCallString().isEmpty() && child.getMethodInfo().equals(invokee)) {
// there can be at most one such node in the graph.. remove the total exec count of the
// inlined invokesite
nodeCount.put(child, nodeCount.get(child) - getExecCount(invokeSite, invokee));
} else if (!child.getCallString().isEmpty() && newInvokeSites.contains(child.getCallString().top())) {
// This is a new node, sum up the execution counts of all invokesite instances
addExecCount(child, getExecCount(context, child.getCallString().top().getInstructionHandle()));
queue.add(child);
}
}
}
// update exec frequencies for all new nodes. A node is new if it contains one of the new invoke sites
// We do not need to remove exec frequencies, since all nodes containing the old invokesite are now no
// longer in the callgraph. We could however remove those nodes from our data structures in a
// separate step before the callgraph is updated.
// To do this, we create a temporary subgraph containing all new nodes and no back edges, and then traverse
// it in topological order
// TODO if the callgraph is compressed, we need to look down up to callstringLength for new nodes!
DirectedGraph<ExecutionContext, ContextEdge> dag = GraphUtils.copyGraph(new InlineEdgeProvider(newInvokeSites), callGraph.getEdgeFactory(), queue, false);
updateExecCounts(dag);
// Despite all that is going on, the only *method* for which something changes in total is the inlined invokee
changeSet.add(invokee);
}
use of com.jopdesign.common.code.ExecutionContext in project jop by jop-devel.
the class ExecFrequencyAnalysis method updateChilds.
private void updateChilds(ExecutionContext context) {
long ecCount = nodeCount.get(context);
for (Map.Entry<InvokeSite, Set<ExecutionContext>> entry : callGraph.getChildsPerInvokeSite(context).entrySet()) {
InvokeSite invokeSite = entry.getKey();
long count = ecCount * getExecFrequency(context, invokeSite.getInstructionHandle());
for (ExecutionContext child : entry.getValue()) {
addExecCount(child, count);
}
}
}
use of com.jopdesign.common.code.ExecutionContext in project jop by jop-devel.
the class MethodCacheAnalysis method getAllFitChangeCosts.
private long getAllFitChangeCosts(ExecFrequencyProvider ecp, CodeModification modification, int deltaBlocks) {
if (analysisType == AnalysisType.ALWAYS_HIT || analysisType == AnalysisType.ALWAYS_MISS) {
return 0;
}
int deltaBytes = modification.getDeltaLocalCodesize();
MethodInfo method = modification.getMethod();
// for ALWAYS_MISS_HIT oder MOST_ONCE we need to find out what has changed for all-fit
Set<MethodInfo> changes = findClassificationChanges(method, deltaBlocks, modification.getRemovedInvokees(), false);
AppInfo appInfo = AppInfo.getSingleton();
// In all nodes where we have changes, we need to sum up the new costs
long deltaCosts = 0;
for (MethodInfo node : changes) {
// but all invokes in the method are now no longer always-hit/-miss
for (InvokeSite invokeSite : node.getCode().getInvokeSites()) {
// Note: this is very similar to getInvokeReturnCacheCosts(invokeSite), but we cannot use
// this here, because that method uses allFit and does not honor our 'virtual' codesize change
int size = 0;
for (MethodInfo impl : appInfo.findImplementations(invokeSite)) {
size = Math.max(size, getMethodSize(impl));
}
size = MiscUtils.bytesToWords(size);
int sizeInvoker = getMethodSize(invokeSite.getInvoker());
sizeInvoker = MiscUtils.bytesToWords(sizeInvoker);
long invokeCosts = cache.getMissPenaltyOnInvoke(size, invokeSite.getInvokeInstruction());
long returnCosts = cache.getMissPenaltyOnReturn(sizeInvoker, invokeSite.getInvokeeRef().getDescriptor().getType());
long count = ecp.getExecCount(invokeSite);
if (analysisType == AnalysisType.ALL_FIT_REGIONS) {
// for this analysis we already have one miss in the original cost estimation
count--;
}
deltaCosts += count * (invokeCosts + returnCosts);
}
}
// if the code increased, the classification changed from always-hit to always-miss ..
long costs = deltaBytes > 0 ? deltaCosts : -deltaCosts;
if (analysisType == AnalysisType.ALL_FIT_REGIONS) {
// find out how many additional persistent cache misses we have
// find out border of new all-fit region
Map<MethodInfo, Integer> deltaExec = new LinkedHashMap<MethodInfo, Integer>();
int deltaCount = 0;
Set<ExecutionContext> border = new LinkedHashSet<ExecutionContext>();
if (deltaBlocks < 0) {
throw new AppInfoError("Not implemented");
} else {
for (MethodInfo miss : changes) {
for (ExecutionContext context : callGraph.getNodes(miss)) {
for (ExecutionContext invokee : callGraph.getChildren(context)) {
// not all-fit if in changeset
if (changes.contains(invokee.getMethodInfo()))
continue;
// we ignore native stuff
if (invokee.getMethodInfo().isNative())
continue;
// invokee is all-fit
if (border.add(invokee)) {
deltaCount += ecp.getExecCount(invokee);
}
}
}
}
// remove old miss count
deltaCount -= getPersistentMisses(ecp, border);
}
// TODO this is not quite correct: instead of joining the reachable sets and multiplying
// with the delta count for the whole region, we should:
// - for every node in the reachable sets of the new border, sum up exec-counts of border nodes
// which contain that node in the reachable set
// - for every node in the reachable sets of the old border, subtract the exec counts of those border nodes
// - sum up invoke miss costs times calculates delta counts per node
// find out cache miss costs of new all-fit region
int regionCosts = 0;
Set<MethodInfo> visited = new LinkedHashSet<MethodInfo>();
for (ExecutionContext context : border) {
for (MethodInfo reachable : reachableMethods.get(context)) {
if (visited.add(reachable)) {
regionCosts += cache.getMissPenalty(reachable.getCode().getNumberOfWords(), cache.isLRU());
}
}
}
costs += deltaCount * regionCosts;
}
return costs;
}
Aggregations