use of soot.jimple.spark.geom.dataRep.PlainConstraint in project soot by Sable.
the class GeomPointsTo method finalizeInternalData.
/**
* 1. Update the call graph;
* 2. Eliminate the pointers, objects, and constraints related to the unreachable code.
*/
private void finalizeInternalData() {
// Compute the set of reachable functions after the points-to analysis
markReachableMethods();
// Clean the unreachable objects
for (Iterator<IVarAbstraction> it = allocations.iterator(); it.hasNext(); ) {
IVarAbstraction po = it.next();
AllocNode obj = (AllocNode) po.getWrappedNode();
SootMethod sm = obj.getMethod();
if (sm != null && func2int.containsKey(sm) == false)
it.remove();
}
// Clean the unreachable pointers
final Vector<AllocNode> removeSet = new Vector<AllocNode>();
for (Iterator<IVarAbstraction> it = pointers.iterator(); it.hasNext(); ) {
IVarAbstraction pn = it.next();
// Is this pointer obsoleted?
Node vn = pn.getWrappedNode();
SootMethod sm = null;
if (vn instanceof LocalVarNode) {
sm = ((LocalVarNode) vn).getMethod();
} else if (vn instanceof AllocDotField) {
sm = ((AllocDotField) vn).getBase().getMethod();
}
if (sm != null) {
if (func2int.containsKey(sm) == false) {
pn.deleteAll();
vn.discardP2Set();
it.remove();
continue;
}
}
if (pn.getRepresentative() != pn)
continue;
removeSet.clear();
if (pn.hasPTResult()) {
// We remove the useless shapes or objects
Set<AllocNode> objSet = pn.get_all_points_to_objects();
for (Iterator<AllocNode> oit = objSet.iterator(); oit.hasNext(); ) {
AllocNode obj = oit.next();
IVarAbstraction po = consG.get(obj);
if (!po.reachable() || pn.isDeadObject(obj)) {
removeSet.add(obj);
}
}
for (AllocNode obj : removeSet) pn.remove_points_to(obj);
pn.drop_duplicates();
} else {
// We also remove unreachable objects for SPARK nodes
PointsToSetInternal pts = vn.getP2Set();
pts.forall(new P2SetVisitor() {
@Override
public void visit(Node n) {
IVarAbstraction pan = findInternalNode(n);
// The removeSet is misused as a contains set
if (pan.reachable())
removeSet.add((AllocNode) n);
}
});
pts = vn.makeP2Set();
for (AllocNode an : removeSet) pts.add(an);
}
}
// Clean the useless constraints
for (Iterator<PlainConstraint> cIt = constraints.iterator(); cIt.hasNext(); ) {
PlainConstraint cons = cIt.next();
IVarAbstraction lhs = cons.getLHS();
IVarAbstraction rhs = cons.getRHS();
if (!lhs.reachable() || !rhs.reachable() || getMethodIDFromPtr(lhs) == Constants.UNKNOWN_FUNCTION || getMethodIDFromPtr(rhs) == Constants.UNKNOWN_FUNCTION) {
cIt.remove();
}
}
// We reassign the IDs to the pointers, objects and constraints
pointers.reassign();
allocations.reassign();
constraints.reassign();
}
use of soot.jimple.spark.geom.dataRep.PlainConstraint in project soot by Sable.
the class GeomPointsTo method mergeLocalVariables.
/**
* As pointed out by the single entry graph contraction, temporary variables incur high redundancy in points-to relations.
* Find and eliminate the redundancies as early as possible.
*
* Methodology:
* If q has unique incoming edge p -> q, p and q are both local to the same function, and they have the same type, we merge them.
*/
private void mergeLocalVariables() {
IVarAbstraction my_lhs, my_rhs;
Node lhs, rhs;
int[] count = new int[pointers.size()];
// We count how many ways a local pointer can be assigned
for (PlainConstraint cons : constraints) {
my_lhs = cons.getLHS();
my_rhs = cons.getRHS();
switch(cons.type) {
case Constants.NEW_CONS:
case Constants.ASSIGN_CONS:
count[my_rhs.id]++;
break;
case Constants.LOAD_CONS:
lhs = my_lhs.getWrappedNode();
count[my_rhs.id] += lhs.getP2Set().size();
break;
}
}
// Second time scan, we delete those constraints that only duplicate points-to information
for (Iterator<PlainConstraint> cons_it = constraints.iterator(); cons_it.hasNext(); ) {
PlainConstraint cons = cons_it.next();
if (cons.type == Constants.ASSIGN_CONS) {
my_lhs = cons.getLHS();
my_rhs = cons.getRHS();
lhs = my_lhs.getWrappedNode();
rhs = my_rhs.getWrappedNode();
if ((lhs instanceof LocalVarNode) && (rhs instanceof LocalVarNode)) {
SootMethod sm1 = ((LocalVarNode) lhs).getMethod();
SootMethod sm2 = ((LocalVarNode) rhs).getMethod();
if (sm1 == sm2 && count[my_rhs.id] == 1 && lhs.getType() == rhs.getType()) {
// They are local to the same function and the receiver pointer has unique incoming edge
// More importantly, they have the same type.
my_rhs.merge(my_lhs);
cons_it.remove();
}
}
}
}
// Third scan, update the constraints with the representatives
for (PlainConstraint cons : constraints) {
my_lhs = cons.getLHS();
my_rhs = cons.getRHS();
switch(cons.type) {
case Constants.NEW_CONS:
cons.setRHS(my_rhs.getRepresentative());
break;
case Constants.ASSIGN_CONS:
case Constants.LOAD_CONS:
case Constants.STORE_CONS:
cons.setLHS(my_lhs.getRepresentative());
cons.setRHS(my_rhs.getRepresentative());
break;
}
}
}
use of soot.jimple.spark.geom.dataRep.PlainConstraint in project soot by Sable.
the class GeomPointsTo method preprocess.
/**
* Read in the program facts generated by SPARK.
* We also construct our own call graph and pointer variables.
*/
private void preprocess() {
int id;
int s, t;
// Build the call graph
n_func = Scene.v().getReachableMethods().size() + 1;
call_graph = new CgEdge[n_func];
n_calls = 0;
n_reach_spark_user_methods = 0;
id = 1;
QueueReader<MethodOrMethodContext> smList = Scene.v().getReachableMethods().listener();
CallGraph soot_callgraph = Scene.v().getCallGraph();
while (smList.hasNext()) {
final SootMethod func = smList.next().method();
func2int.put(func, id);
int2func.put(id, func);
/*
* We cannot identify all entry methods since some entry methods call themselves.
* In that case, the Soot CallGraph.isEntryMethod() function returns false.
*/
if (soot_callgraph.isEntryMethod(func) || func.isEntryMethod()) {
CgEdge p = new CgEdge(Constants.SUPER_MAIN, id, null, call_graph[Constants.SUPER_MAIN]);
call_graph[Constants.SUPER_MAIN] = p;
n_calls++;
}
if (!func.isJavaLibraryMethod())
++n_reach_spark_user_methods;
id++;
}
// Next, we scan all the call edges and rebuild the call graph in our own vocabulary
QueueReader<Edge> edgeList = Scene.v().getCallGraph().listener();
while (edgeList.hasNext()) {
Edge edge = edgeList.next();
if (edge.isClinit()) {
continue;
}
SootMethod src_func = edge.src();
SootMethod tgt_func = edge.tgt();
s = func2int.get(src_func);
t = func2int.get(tgt_func);
// Create a new call edge in our own format
CgEdge p = new CgEdge(s, t, edge, call_graph[s]);
call_graph[s] = p;
edgeMapping.put(edge, p);
// We collect callsite information
Stmt callsite = edge.srcStmt();
if (edge.isThreadRunCall() || edge.kind().isExecutor() || edge.kind().isAsyncTask()) {
// We don't modify the treatment to the thread run() calls
thread_run_callsites.add(callsite);
} else if (edge.isInstance() && !edge.isSpecial()) {
// We try to refine the virtual callsites (virtual + interface) with multiple call targets
InstanceInvokeExpr expr = (InstanceInvokeExpr) callsite.getInvokeExpr();
if (expr.getMethodRef().getSignature().contains("<java.lang.Thread: void start()>")) {
// It is a thread start function
thread_run_callsites.add(callsite);
} else {
p.base_var = findLocalVarNode(expr.getBase());
if (SootInfo.countCallEdgesForCallsite(callsite, true) > 1 && p.base_var != null) {
multiCallsites.add(callsite);
}
}
}
++n_calls;
}
// We build the wrappers for all the pointers built by SPARK
for (Iterator<VarNode> it = getVarNodeNumberer().iterator(); it.hasNext(); ) {
VarNode vn = it.next();
IVarAbstraction pn = makeInternalNode(vn);
pointers.add(pn);
}
for (Iterator<AllocDotField> it = getAllocDotFieldNodeNumberer().iterator(); it.hasNext(); ) {
AllocDotField adf = it.next();
// Some allocdotfield is invalid, we check and remove them
SparkField field = adf.getField();
if (field instanceof SootField) {
// This is an instance field of a class
Type decType = ((SootField) field).getDeclaringClass().getType();
Type baseType = adf.getBase().getType();
// baseType must be a sub type of decType
if (!castNeverFails(baseType, decType))
continue;
}
IVarAbstraction pn = makeInternalNode(adf);
pointers.add(pn);
}
for (Iterator<AllocNode> it = getAllocNodeNumberer().iterator(); it.hasNext(); ) {
AllocNode obj = it.next();
IVarAbstraction pn = makeInternalNode(obj);
allocations.add(pn);
}
// The address constraints, new obj -> p
for (Object object : allocSources()) {
IVarAbstraction obj = makeInternalNode((AllocNode) object);
Node[] succs = allocLookup((AllocNode) object);
for (Node element0 : succs) {
PlainConstraint cons = new PlainConstraint();
IVarAbstraction p = makeInternalNode(element0);
cons.expr.setPair(obj, p);
cons.type = Constants.NEW_CONS;
constraints.add(cons);
}
}
// The assign constraints, p -> q
Pair<Node, Node> intercall = new Pair<Node, Node>();
for (Object object : simpleSources()) {
IVarAbstraction p = makeInternalNode((VarNode) object);
Node[] succs = simpleLookup((VarNode) object);
for (Node element0 : succs) {
PlainConstraint cons = new PlainConstraint();
IVarAbstraction q = makeInternalNode(element0);
cons.expr.setPair(p, q);
cons.type = Constants.ASSIGN_CONS;
intercall.setPair((VarNode) object, element0);
cons.interCallEdges = lookupEdgesForAssignment(intercall);
constraints.add(cons);
}
}
intercall = null;
assign2edges.clear();
// The load constraints, p.f -> q
for (Object object : loadSources()) {
FieldRefNode frn = (FieldRefNode) object;
IVarAbstraction p = makeInternalNode(frn.getBase());
Node[] succs = loadLookup(frn);
for (Node element0 : succs) {
PlainConstraint cons = new PlainConstraint();
IVarAbstraction q = makeInternalNode(element0);
cons.f = frn.getField();
cons.expr.setPair(p, q);
cons.type = Constants.LOAD_CONS;
constraints.add(cons);
}
}
// The store constraints, p -> q.f
for (Object object : storeSources()) {
IVarAbstraction p = makeInternalNode((VarNode) object);
Node[] succs = storeLookup((VarNode) object);
for (Node element0 : succs) {
PlainConstraint cons = new PlainConstraint();
FieldRefNode frn = (FieldRefNode) element0;
IVarAbstraction q = makeInternalNode(frn.getBase());
cons.f = frn.getField();
cons.expr.setPair(p, q);
cons.type = Constants.STORE_CONS;
constraints.add(cons);
}
}
n_init_constraints = constraints.size();
// Initialize other stuff
low_cg = new int[n_func];
vis_cg = new int[n_func];
rep_cg = new int[n_func];
indeg_cg = new int[n_func];
scc_size = new int[n_func];
block_num = new int[n_func];
context_size = new long[n_func];
max_context_size_block = new long[n_func];
}
use of soot.jimple.spark.geom.dataRep.PlainConstraint in project soot by Sable.
the class OfflineProcessor method buildDependenceGraph.
/**
* The dependence graph reverses the assignment relations. E.g., p = q => p -> q
* Note that, the assignments that are eliminated by local variable merging should be used here.
* Otherwise, the graph would be erroneously disconnected.
*/
protected void buildDependenceGraph() {
for (PlainConstraint cons : geomPTA.constraints) {
// In our constraint representation, lhs -> rhs means rhs = lhs.
final IVarAbstraction lhs = cons.getLHS();
final IVarAbstraction rhs = cons.getRHS();
final SparkField field = cons.f;
IVarAbstraction rep;
// Now we use this constraint for graph construction
switch(cons.type) {
// rhs = lhs
case Constants.ASSIGN_CONS:
add_graph_edge(rhs.id, lhs.id);
break;
// rhs = lhs.f
case Constants.LOAD_CONS:
{
rep = lhs.getRepresentative();
if (rep.hasPTResult() == false) {
lhs.getWrappedNode().getP2Set().forall(new P2SetVisitor() {
@Override
public void visit(Node n) {
IVarAbstraction padf = geomPTA.findInstanceField((AllocNode) n, field);
if (padf == null || padf.reachable() == false)
return;
off_graph_edge e = add_graph_edge(rhs.id, padf.id);
e.base_var = lhs;
}
});
} else {
// Use geom
for (AllocNode o : rep.get_all_points_to_objects()) {
IVarAbstraction padf = geomPTA.findInstanceField((AllocNode) o, field);
if (padf == null || padf.reachable() == false)
continue;
off_graph_edge e = add_graph_edge(rhs.id, padf.id);
e.base_var = lhs;
}
}
}
break;
// rhs.f = lhs
case Constants.STORE_CONS:
{
rep = rhs.getRepresentative();
if (rep.hasPTResult() == false) {
rhs.getWrappedNode().getP2Set().forall(new P2SetVisitor() {
@Override
public void visit(Node n) {
IVarAbstraction padf = geomPTA.findInstanceField((AllocNode) n, field);
if (padf == null || padf.reachable() == false)
return;
off_graph_edge e = add_graph_edge(padf.id, lhs.id);
e.base_var = rhs;
}
});
} else {
// use geom
for (AllocNode o : rep.get_all_points_to_objects()) {
IVarAbstraction padf = geomPTA.findInstanceField((AllocNode) o, field);
if (padf == null || padf.reachable() == false)
continue;
off_graph_edge e = add_graph_edge(padf.id, lhs.id);
e.base_var = rhs;
}
}
}
break;
}
}
}
use of soot.jimple.spark.geom.dataRep.PlainConstraint in project soot by Sable.
the class OfflineProcessor method distillConstraints.
/**
* Eliminate the constraints that do not contribute points-to information to the seed pointers.
* Prerequisite: dependence graph
*/
protected void distillConstraints() {
IVarAbstraction pn;
// Mark the pointers
computeReachablePts();
// Mark the constraints
for (PlainConstraint cons : geomPTA.constraints) {
// We only look at the receiver pointers
pn = cons.getRHS();
final SparkField field = cons.f;
visitedFlag = false;
switch(cons.type) {
case Constants.NEW_CONS:
case Constants.ASSIGN_CONS:
case Constants.LOAD_CONS:
visitedFlag = pn.willUpdate;
break;
case Constants.STORE_CONS:
/**
* Interesting point in store constraint p.f = q:
* For example, pts(p) = { o1, o2 };
* If any of the o1.f and the o2.f (e.g. o1.f) will be updated, this constraint should be kept.
* However, in the points-to analysis, we only assign to o1.f.
*/
pn = pn.getRepresentative();
if (pn.hasPTResult() == false) {
pn.getWrappedNode().getP2Set().forall(new P2SetVisitor() {
@Override
public void visit(Node n) {
if (visitedFlag)
return;
IVarAbstraction padf = geomPTA.findInstanceField((AllocNode) n, field);
if (padf == null || padf.reachable() == false)
return;
visitedFlag |= padf.willUpdate;
}
});
} else {
// Use the geometric points-to result
for (AllocNode o : pn.get_all_points_to_objects()) {
IVarAbstraction padf = geomPTA.findInstanceField((AllocNode) o, field);
if (padf == null || padf.reachable() == false)
continue;
visitedFlag |= padf.willUpdate;
if (visitedFlag)
break;
}
}
break;
}
cons.isActive = visitedFlag;
}
}
Aggregations