use of soot.jimple.spark.pag.AllocNode in project soot by Sable.
the class GeomPointsTo method getMethodIDFromPtr.
/**
* Get the index of the enclosing function of the specified node.
*/
public int getMethodIDFromPtr(IVarAbstraction pn) {
SootMethod sm = null;
int ret = Constants.SUPER_MAIN;
Node node = pn.getWrappedNode();
if (node instanceof AllocNode) {
sm = ((AllocNode) node).getMethod();
} else if (node instanceof LocalVarNode) {
sm = ((LocalVarNode) node).getMethod();
} else if (node instanceof AllocDotField) {
sm = ((AllocDotField) node).getBase().getMethod();
}
if (sm != null && func2int.containsKey(sm)) {
int id = func2int.get(sm);
if (vis_cg[id] == 0)
ret = Constants.UNKNOWN_FUNCTION;
else
ret = id;
}
return ret;
}
use of soot.jimple.spark.pag.AllocNode in project soot by Sable.
the class OfflineProcessor method buildImpactGraph.
/**
* The dependence graph will be destroyed and the impact graph will be built.
* p = q means q impacts p. Therefore, we add en edge q -> p in impact graph.
*/
protected void buildImpactGraph() {
for (int i = 0; i < n_var; ++i) {
varGraph.set(i, null);
}
queue.clear();
for (PlainConstraint cons : geomPTA.constraints) {
if (!cons.isActive)
continue;
final IVarAbstraction lhs = cons.getLHS();
final IVarAbstraction rhs = cons.getRHS();
final SparkField field = cons.f;
IVarAbstraction rep;
switch(cons.type) {
case Constants.NEW_CONS:
// We enqueue the pointers that are allocation result receivers
queue.add(rhs.id);
break;
case Constants.ASSIGN_CONS:
add_graph_edge(lhs.id, rhs.id);
break;
case Constants.LOAD_CONS:
rep = lhs.getRepresentative();
if (rep.hasPTResult() == false) {
lhs.getWrappedNode().getP2Set().forall(new P2SetVisitor() {
@Override
public void visit(Node n) {
IVarAbstraction padf = geomPTA.findInstanceField((AllocNode) n, field);
if (padf == null || padf.reachable() == false)
return;
add_graph_edge(padf.id, rhs.id);
}
});
} else {
// use geomPA
for (AllocNode o : rep.get_all_points_to_objects()) {
IVarAbstraction padf = geomPTA.findInstanceField((AllocNode) o, field);
if (padf == null || padf.reachable() == false)
continue;
add_graph_edge(padf.id, rhs.id);
}
}
break;
case Constants.STORE_CONS:
rep = rhs.getRepresentative();
if (rep.hasPTResult() == false) {
rhs.getWrappedNode().getP2Set().forall(new P2SetVisitor() {
@Override
public void visit(Node n) {
IVarAbstraction padf = geomPTA.findInstanceField((AllocNode) n, field);
if (padf == null || padf.reachable() == false)
return;
add_graph_edge(lhs.id, padf.id);
}
});
} else {
// use geomPA
for (AllocNode o : rep.get_all_points_to_objects()) {
IVarAbstraction padf = geomPTA.findInstanceField((AllocNode) o, field);
if (padf == null || padf.reachable() == false)
continue;
add_graph_edge(lhs.id, padf.id);
}
}
break;
}
}
}
use of soot.jimple.spark.pag.AllocNode in project soot by Sable.
the class HeapInsNode method heap_sensitive_intersection.
/**
* Query if this pointer and qv could point to the same object under any contexts
*/
@Override
public boolean heap_sensitive_intersection(IVarAbstraction qv) {
int i, j;
HeapInsNode qn;
SegmentNode p, q, pt[], qt[];
qn = (HeapInsNode) qv;
for (Iterator<AllocNode> it = pt_objs.keySet().iterator(); it.hasNext(); ) {
AllocNode an = it.next();
if (an instanceof ClassConstantNode)
continue;
if (an instanceof StringConstantNode)
continue;
qt = qn.find_points_to(an);
if (qt == null)
continue;
pt = find_points_to(an);
for (i = 0; i < HeapInsIntervalManager.Divisions; ++i) {
p = pt[i];
while (p != null) {
for (j = 0; j < HeapInsIntervalManager.Divisions; ++j) {
q = qt[j];
while (q != null) {
if (quick_intersecting_test(p, q))
return true;
q = q.next;
}
}
p = p.next;
}
}
}
return false;
}
use of soot.jimple.spark.pag.AllocNode in project soot by Sable.
the class HeapInsNode method do_before_propagation.
@Override
public void do_before_propagation() {
// if ( complex_cons == null )
do_pts_interval_merge();
// if ( !(me instanceof LocalVarNode) )
do_flow_edge_interval_merge();
// This pointer filter, please read the comments at this line in file FullSensitiveNode.java
Node wrappedNode = getWrappedNode();
if (wrappedNode instanceof LocalVarNode && ((LocalVarNode) wrappedNode).isThisPtr()) {
SootMethod func = ((LocalVarNode) wrappedNode).getMethod();
if (!func.isConstructor()) {
// We don't process the specialinvoke call edge
SootClass defClass = func.getDeclaringClass();
Hierarchy typeHierarchy = Scene.v().getActiveHierarchy();
for (Iterator<AllocNode> it = new_pts.keySet().iterator(); it.hasNext(); ) {
AllocNode obj = it.next();
if (obj.getType() instanceof RefType) {
SootClass sc = ((RefType) obj.getType()).getSootClass();
if (defClass != sc) {
try {
SootMethod rt_func = typeHierarchy.resolveConcreteDispatch(sc, func);
if (rt_func != func) {
it.remove();
// Also preclude it from propagation again
pt_objs.put(obj, (HeapInsIntervalManager) deadManager);
}
} catch (RuntimeException e) {
// If the input program has a wrong type cast, resolveConcreteDispatch fails and it goes here
// We simply ignore this error
}
}
}
}
}
}
}
use of soot.jimple.spark.pag.AllocNode in project soot by Sable.
the class HeapInsNode method propagate.
/**
* An efficient implementation of differential propagation.
*/
@Override
public void propagate(GeomPointsTo ptAnalyzer, IWorklist worklist) {
int i, j;
AllocNode obj;
SegmentNode pts, pe, int_entry1[], int_entry2[];
HeapInsIntervalManager him1, him2;
HeapInsNode qn, objn;
boolean added, has_new_edges;
// We first build the new flow edges via the field dereferences
if (complex_cons != null) {
for (Map.Entry<AllocNode, HeapInsIntervalManager> entry : new_pts.entrySet()) {
obj = entry.getKey();
int_entry1 = entry.getValue().getFigures();
for (PlainConstraint pcons : complex_cons) {
// Construct the two variables in assignment
objn = (HeapInsNode) ptAnalyzer.findAndInsertInstanceField(obj, pcons.f);
if (objn == null) {
// This combination of allocdotfield must be invalid
// This expression p.f also renders that p cannot point to obj, so we remove it
// We label this event and sweep the garbage later
pt_objs.put(obj, (HeapInsIntervalManager) deadManager);
entry.setValue((HeapInsIntervalManager) deadManager);
break;
}
if (objn.willUpdate == false) {
// the points-to information of the seed pointers
continue;
}
qn = (HeapInsNode) pcons.otherSide;
for (i = 0; i < HeapInsIntervalManager.Divisions; ++i) {
pts = int_entry1[i];
while (pts != null && pts.is_new) {
switch(pcons.type) {
case Constants.STORE_CONS:
// pts.I2 may be zero, pts.L may be less than zero
if (qn.add_simple_constraint_3(objn, pcons.code == GeometricManager.ONE_TO_ONE ? pts.I1 : 0, pts.I2, pts.L < 0 ? -pts.L : pts.L))
worklist.push(qn);
break;
case Constants.LOAD_CONS:
// Load, pv.field -> qv
if (objn.add_simple_constraint_3(qn, pts.I2, pcons.code == GeometricManager.ONE_TO_ONE ? pts.I1 : 0, pts.L < 0 ? -pts.L : pts.L))
worklist.push(objn);
break;
}
pts = pts.next;
}
}
}
}
}
for (Map.Entry<HeapInsNode, HeapInsIntervalManager> entry1 : flowto.entrySet()) {
// First, we pick one flow-to figure
added = false;
qn = entry1.getKey();
him1 = entry1.getValue();
// Figure collection for the flows-to tuple
int_entry1 = him1.getFigures();
has_new_edges = him1.isThereUnprocessedFigures();
Map<AllocNode, HeapInsIntervalManager> objs = (has_new_edges ? pt_objs : new_pts);
for (Map.Entry<AllocNode, HeapInsIntervalManager> entry2 : objs.entrySet()) {
// Second, we get the points-to intervals
obj = entry2.getKey();
him2 = entry2.getValue();
if (him2 == deadManager)
continue;
if (!ptAnalyzer.castNeverFails(obj.getType(), qn.getWrappedNode().getType()))
continue;
// Figure collection for the points-to tuple
int_entry2 = him2.getFigures();
// Loop over all points-to figures
for (i = 0; i < HeapInsIntervalManager.Divisions; ++i) {
pts = int_entry2[i];
while (pts != null) {
if (!has_new_edges && !pts.is_new)
break;
// Loop over all flows-to figures
for (j = 0; j < HeapInsIntervalManager.Divisions; ++j) {
pe = int_entry1[j];
while (pe != null) {
if (pts.is_new || pe.is_new) {
// Propagate this object
if (add_new_points_to_tuple(pts, pe, obj, qn))
added = true;
} else
break;
pe = pe.next;
}
}
pts = pts.next;
}
}
}
if (added)
worklist.push(qn);
// Now, we clean the new edges if necessary
if (has_new_edges)
him1.flush();
}
}
Aggregations