use of soot.jimple.spark.geom.dataRep.PlainConstraint in project soot by Sable.
the class PtInsNodeGenerator method initFlowGraph.
@Override
public void initFlowGraph(GeomPointsTo ptAnalyzer) {
int k;
int n_legal_cons;
int nf1, nf2;
int code;
CgEdge q;
IVarAbstraction my_lhs, my_rhs;
// Visit all the simple constraints
n_legal_cons = 0;
for (PlainConstraint cons : ptAnalyzer.constraints) {
if (!cons.isActive)
continue;
my_lhs = cons.getLHS().getRepresentative();
my_rhs = cons.getRHS().getRepresentative();
nf1 = ptAnalyzer.getMethodIDFromPtr(my_lhs);
nf2 = ptAnalyzer.getMethodIDFromPtr(my_rhs);
// Test how many globals are in this constraint
code = ((nf1 == Constants.SUPER_MAIN ? 1 : 0) << 1) | (nf2 == Constants.SUPER_MAIN ? 1 : 0);
switch(cons.type) {
case Constants.NEW_CONS:
// We directly add the objects to the points-to set
my_rhs.add_points_to_3((AllocNode) my_lhs.getWrappedNode(), nf2 == Constants.SUPER_MAIN ? 0 : 1, nf1 == Constants.SUPER_MAIN ? 0 : 1, nf2 == Constants.SUPER_MAIN ? ptAnalyzer.context_size[nf1] : ptAnalyzer.context_size[nf2]);
// Enqueue to the worklist
ptAnalyzer.getWorklist().push(my_rhs);
break;
case Constants.ASSIGN_CONS:
// The core part of any context sensitive algorithms
if (cons.interCallEdges != null) {
// Inter-procedural assignment
for (Iterator<Edge> it = cons.interCallEdges.iterator(); it.hasNext(); ) {
Edge sEdge = it.next();
q = ptAnalyzer.getInternalEdgeFromSootEdge(sEdge);
if (q.is_obsoleted == true) {
continue;
}
if (nf2 == q.t) {
// The receiver is a local, while the sender is perhaps not
if (nf1 == Constants.SUPER_MAIN) {
my_lhs.add_simple_constraint_3(my_rhs, 0, q.map_offset, ptAnalyzer.max_context_size_block[q.s]);
} else {
// We should treat the self recursive calls specially
if (q.s == q.t) {
my_lhs.add_simple_constraint_3(my_rhs, 1, 1, ptAnalyzer.context_size[nf1]);
} else {
for (k = 0; k < ptAnalyzer.block_num[nf1]; ++k) {
my_lhs.add_simple_constraint_3(my_rhs, k * ptAnalyzer.max_context_size_block[nf1] + 1, q.map_offset, ptAnalyzer.max_context_size_block[nf1]);
}
}
}
} else {
if (q.s == q.t) {
my_lhs.add_simple_constraint_3(my_rhs, 1, 1, ptAnalyzer.context_size[nf2]);
} else {
for (k = 0; k < ptAnalyzer.block_num[nf2]; ++k) {
my_lhs.add_simple_constraint_3(my_rhs, q.map_offset, k * ptAnalyzer.max_context_size_block[nf2] + 1, ptAnalyzer.max_context_size_block[nf2]);
}
}
}
}
} else {
// Intraprocedural
// And, assignment involves global variable goes here. By
// definition, global variables belong to SUPER_MAIN.
// By the Jimple IR, not both sides are global variables
my_lhs.add_simple_constraint_3(my_rhs, nf1 == Constants.SUPER_MAIN ? 0 : 1, nf2 == Constants.SUPER_MAIN ? 0 : 1, nf1 == Constants.SUPER_MAIN ? ptAnalyzer.context_size[nf2] : ptAnalyzer.context_size[nf1]);
}
break;
case Constants.LOAD_CONS:
// lhs is always a local
// rhs = lhs.f
cons.code = full_convertor[code];
cons.otherSide = my_rhs;
my_lhs.put_complex_constraint(cons);
break;
case Constants.STORE_CONS:
// rhs is always a local
// rhs.f = lhs
cons.code = full_convertor[code];
cons.otherSide = my_lhs;
my_rhs.put_complex_constraint(cons);
break;
default:
throw new RuntimeException("Invalid node type");
}
++n_legal_cons;
}
ptAnalyzer.ps.printf("Only %d (%.1f%%) constraints are needed for this run.\n", n_legal_cons, ((double) n_legal_cons / ptAnalyzer.n_init_constraints) * 100);
}
use of soot.jimple.spark.geom.dataRep.PlainConstraint in project soot by Sable.
the class FullSensitiveNode method propagate.
/**
* The place where you implement the pointer assignment reasoning.
*/
@Override
public void propagate(GeomPointsTo ptAnalyzer, IWorklist worklist) {
int i, j;
AllocNode obj;
SegmentNode pts, pe, entry_pts[], entry_pe[];
GeometricManager gm1, gm2;
FullSensitiveNode qn, objn;
boolean added, hasNewPointsTo;
if (pt_objs.size() == 0)
return;
// We first build the flow edges that flow in to/out of object fields
if (complex_cons != null) {
for (Map.Entry<AllocNode, GeometricManager> entry : new_pts.entrySet()) {
obj = entry.getKey();
entry_pts = entry.getValue().getFigures();
for (PlainConstraint pcons : complex_cons) {
// For each newly points-to object, construct its instance field
objn = (FullSensitiveNode) ptAnalyzer.findInstanceField(obj, pcons.f);
if (objn == null) {
// This combination of allocdotfield must be invalid
// This expression p.f also renders that p cannot point to obj, so we remove it
// We label this event and sweep the garbage later
pt_objs.put(obj, (GeometricManager) deadManager);
entry.setValue((GeometricManager) deadManager);
break;
}
if (objn.willUpdate == false) {
// the points-to information of the seed pointers
continue;
}
qn = (FullSensitiveNode) pcons.otherSide;
for (i = 0; i < GeometricManager.Divisions; ++i) {
pts = entry_pts[i];
while (pts != null && pts.is_new == true) {
switch(pcons.type) {
case Constants.STORE_CONS:
// Store, qv -> pv.field
if (instantiateStoreConstraint(qn, objn, pts, (pcons.code << 8) | i))
worklist.push(qn);
break;
case Constants.LOAD_CONS:
// Load, pv.field -> qv
if (instantiateLoadConstraint(objn, qn, pts, (pcons.code << 8) | i))
worklist.push(objn);
break;
}
pts = pts.next;
}
}
}
}
}
if (flowto.size() == 0)
return;
// Next, we process the assignments (e.g. p = q)
for (Map.Entry<FullSensitiveNode, GeometricManager> entry1 : flowto.entrySet()) {
added = false;
qn = entry1.getKey();
gm1 = entry1.getValue();
entry_pe = gm1.getFigures();
// We have new flow-to edges
if (gm1.isThereUnprocessedFigures()) {
// Second, we get the points-to shapes
for (Map.Entry<AllocNode, GeometricManager> entry2 : pt_objs.entrySet()) {
obj = entry2.getKey();
gm2 = entry2.getValue();
// Avoid the garbage
if (gm2 == deadManager)
continue;
// Type filtering and flow-to-this filtering, a simple approach
if (!ptAnalyzer.castNeverFails(obj.getType(), qn.getType()))
continue;
entry_pts = gm2.getFigures();
hasNewPointsTo = gm2.isThereUnprocessedFigures();
// We pair up all the geometric points-to tuples and flow edges
for (j = 0; j < GeometricManager.Divisions; ++j) {
pe = entry_pe[j];
while (pe != null) {
if (pe.is_new == false && hasNewPointsTo == false)
break;
for (i = 0; i < GeometricManager.Divisions; ++i) {
pts = entry_pts[i];
while (pts != null && (pts.is_new || pe.is_new)) {
// Propagate this object
if (reasonAndPropagate(qn, obj, pts, pe, (i << 8) | j))
added = true;
pts = pts.next;
}
}
pe = pe.next;
}
}
}
gm1.flush();
} else {
for (Map.Entry<AllocNode, GeometricManager> entry2 : new_pts.entrySet()) {
obj = entry2.getKey();
gm2 = entry2.getValue();
// Avoid the garbage
if (gm2 == deadManager)
continue;
// Type filtering and flow-to-this filtering, a simple approach
if (!ptAnalyzer.castNeverFails(obj.getType(), qn.getType()))
continue;
entry_pts = gm2.getFigures();
// We pair up all the geometric points-to tuples and flow edges
for (i = 0; i < GeometricManager.Divisions; ++i) {
pts = entry_pts[i];
while (pts != null && pts.is_new == true) {
for (j = 0; j < GeometricManager.Divisions; ++j) {
pe = entry_pe[j];
while (pe != null) {
// Propagate this object
if (reasonAndPropagate(qn, obj, pts, pe, (i << 8) | j))
added = true;
pe = pe.next;
}
}
pts = pts.next;
}
}
}
}
if (added)
worklist.push(qn);
}
// System.err.println();
}
use of soot.jimple.spark.geom.dataRep.PlainConstraint in project soot by Sable.
the class PtInsNode method propagate.
/**
* An efficient implementation of differential propagation.
*/
@Override
public void propagate(GeomPointsTo ptAnalyzer, IWorklist worklist) {
int i, j;
AllocNode obj;
SegmentNode pts, pe, int_entry1[], int_entry2[];
PtInsIntervalManager pim1, pim2;
PtInsNode qn, objn;
boolean added, has_new_edges;
// We first build the new flow edges via the field dereferences
if (complex_cons != null) {
for (Map.Entry<AllocNode, PtInsIntervalManager> entry : new_pts.entrySet()) {
obj = entry.getKey();
int_entry1 = entry.getValue().getFigures();
for (PlainConstraint pcons : complex_cons) {
// Construct the two variables in assignment
objn = (PtInsNode) ptAnalyzer.findAndInsertInstanceField(obj, pcons.f);
if (objn == null) {
// This combination of allocdotfield must be invalid
// This expression p.f also renders that p cannot point to obj, so we remove it
// We label this event and sweep the garbage later
pt_objs.put(obj, (PtInsIntervalManager) deadManager);
entry.setValue((PtInsIntervalManager) deadManager);
break;
}
if (objn.willUpdate == false) {
// the points-to information of the seed pointers
continue;
}
qn = (PtInsNode) pcons.otherSide;
for (i = 0; i < HeapInsIntervalManager.Divisions; ++i) {
pts = int_entry1[i];
while (pts != null && pts.is_new) {
switch(pcons.type) {
case Constants.STORE_CONS:
// pts.I2 may be zero, pts.L may be less than zero
if (qn.add_simple_constraint_3(objn, pcons.code == GeometricManager.ONE_TO_ONE ? pts.I1 : 0, pts.I2, pts.L))
worklist.push(qn);
break;
case Constants.LOAD_CONS:
// Load, pv.field -> qv
if (objn.add_simple_constraint_3(qn, pts.I2, pcons.code == GeometricManager.ONE_TO_ONE ? pts.I1 : 0, pts.L))
worklist.push(objn);
break;
}
pts = pts.next;
}
}
}
}
}
for (Map.Entry<PtInsNode, PtInsIntervalManager> entry1 : flowto.entrySet()) {
// First, we get the flow-to intervals
added = false;
qn = entry1.getKey();
pim1 = entry1.getValue();
int_entry1 = pim1.getFigures();
has_new_edges = pim1.isThereUnprocessedFigures();
Map<AllocNode, PtInsIntervalManager> objs = (has_new_edges ? pt_objs : new_pts);
for (Map.Entry<AllocNode, PtInsIntervalManager> entry2 : objs.entrySet()) {
// Second, we get the points-to intervals
obj = entry2.getKey();
pim2 = entry2.getValue();
if (pim2 == deadManager)
continue;
if (!ptAnalyzer.castNeverFails(obj.getType(), qn.getWrappedNode().getType()))
continue;
int_entry2 = pim2.getFigures();
// We pair up all the interval points-to tuples and interval flow edges
for (i = 0; i < PtInsIntervalManager.Divisions; ++i) {
pts = int_entry2[i];
while (pts != null) {
if (!has_new_edges && !pts.is_new)
break;
for (j = 0; j < PtInsIntervalManager.Divisions; ++j) {
pe = int_entry1[j];
while (pe != null) {
if (pts.is_new || pe.is_new) {
// Propagate this object
if (add_new_points_to_tuple(pts, pe, obj, qn))
added = true;
} else
break;
pe = pe.next;
}
}
pts = pts.next;
}
}
}
if (added)
worklist.push(qn);
// Now, we clean the new edges if necessary
if (has_new_edges) {
pim1.flush();
}
}
}
use of soot.jimple.spark.geom.dataRep.PlainConstraint in project soot by Sable.
the class OfflineProcessor method buildImpactGraph.
/**
* The dependence graph will be destroyed and the impact graph will be built.
* p = q means q impacts p. Therefore, we add en edge q -> p in impact graph.
*/
protected void buildImpactGraph() {
for (int i = 0; i < n_var; ++i) {
varGraph.set(i, null);
}
queue.clear();
for (PlainConstraint cons : geomPTA.constraints) {
if (!cons.isActive)
continue;
final IVarAbstraction lhs = cons.getLHS();
final IVarAbstraction rhs = cons.getRHS();
final SparkField field = cons.f;
IVarAbstraction rep;
switch(cons.type) {
case Constants.NEW_CONS:
// We enqueue the pointers that are allocation result receivers
queue.add(rhs.id);
break;
case Constants.ASSIGN_CONS:
add_graph_edge(lhs.id, rhs.id);
break;
case Constants.LOAD_CONS:
rep = lhs.getRepresentative();
if (rep.hasPTResult() == false) {
lhs.getWrappedNode().getP2Set().forall(new P2SetVisitor() {
@Override
public void visit(Node n) {
IVarAbstraction padf = geomPTA.findInstanceField((AllocNode) n, field);
if (padf == null || padf.reachable() == false)
return;
add_graph_edge(padf.id, rhs.id);
}
});
} else {
// use geomPA
for (AllocNode o : rep.get_all_points_to_objects()) {
IVarAbstraction padf = geomPTA.findInstanceField((AllocNode) o, field);
if (padf == null || padf.reachable() == false)
continue;
add_graph_edge(padf.id, rhs.id);
}
}
break;
case Constants.STORE_CONS:
rep = rhs.getRepresentative();
if (rep.hasPTResult() == false) {
rhs.getWrappedNode().getP2Set().forall(new P2SetVisitor() {
@Override
public void visit(Node n) {
IVarAbstraction padf = geomPTA.findInstanceField((AllocNode) n, field);
if (padf == null || padf.reachable() == false)
return;
add_graph_edge(lhs.id, padf.id);
}
});
} else {
// use geomPA
for (AllocNode o : rep.get_all_points_to_objects()) {
IVarAbstraction padf = geomPTA.findInstanceField((AllocNode) o, field);
if (padf == null || padf.reachable() == false)
continue;
add_graph_edge(lhs.id, padf.id);
}
}
break;
}
}
}
use of soot.jimple.spark.geom.dataRep.PlainConstraint in project soot by Sable.
the class HeapInsNode method propagate.
/**
* An efficient implementation of differential propagation.
*/
@Override
public void propagate(GeomPointsTo ptAnalyzer, IWorklist worklist) {
int i, j;
AllocNode obj;
SegmentNode pts, pe, int_entry1[], int_entry2[];
HeapInsIntervalManager him1, him2;
HeapInsNode qn, objn;
boolean added, has_new_edges;
// We first build the new flow edges via the field dereferences
if (complex_cons != null) {
for (Map.Entry<AllocNode, HeapInsIntervalManager> entry : new_pts.entrySet()) {
obj = entry.getKey();
int_entry1 = entry.getValue().getFigures();
for (PlainConstraint pcons : complex_cons) {
// Construct the two variables in assignment
objn = (HeapInsNode) ptAnalyzer.findAndInsertInstanceField(obj, pcons.f);
if (objn == null) {
// This combination of allocdotfield must be invalid
// This expression p.f also renders that p cannot point to obj, so we remove it
// We label this event and sweep the garbage later
pt_objs.put(obj, (HeapInsIntervalManager) deadManager);
entry.setValue((HeapInsIntervalManager) deadManager);
break;
}
if (objn.willUpdate == false) {
// the points-to information of the seed pointers
continue;
}
qn = (HeapInsNode) pcons.otherSide;
for (i = 0; i < HeapInsIntervalManager.Divisions; ++i) {
pts = int_entry1[i];
while (pts != null && pts.is_new) {
switch(pcons.type) {
case Constants.STORE_CONS:
// pts.I2 may be zero, pts.L may be less than zero
if (qn.add_simple_constraint_3(objn, pcons.code == GeometricManager.ONE_TO_ONE ? pts.I1 : 0, pts.I2, pts.L < 0 ? -pts.L : pts.L))
worklist.push(qn);
break;
case Constants.LOAD_CONS:
// Load, pv.field -> qv
if (objn.add_simple_constraint_3(qn, pts.I2, pcons.code == GeometricManager.ONE_TO_ONE ? pts.I1 : 0, pts.L < 0 ? -pts.L : pts.L))
worklist.push(objn);
break;
}
pts = pts.next;
}
}
}
}
}
for (Map.Entry<HeapInsNode, HeapInsIntervalManager> entry1 : flowto.entrySet()) {
// First, we pick one flow-to figure
added = false;
qn = entry1.getKey();
him1 = entry1.getValue();
// Figure collection for the flows-to tuple
int_entry1 = him1.getFigures();
has_new_edges = him1.isThereUnprocessedFigures();
Map<AllocNode, HeapInsIntervalManager> objs = (has_new_edges ? pt_objs : new_pts);
for (Map.Entry<AllocNode, HeapInsIntervalManager> entry2 : objs.entrySet()) {
// Second, we get the points-to intervals
obj = entry2.getKey();
him2 = entry2.getValue();
if (him2 == deadManager)
continue;
if (!ptAnalyzer.castNeverFails(obj.getType(), qn.getWrappedNode().getType()))
continue;
// Figure collection for the points-to tuple
int_entry2 = him2.getFigures();
// Loop over all points-to figures
for (i = 0; i < HeapInsIntervalManager.Divisions; ++i) {
pts = int_entry2[i];
while (pts != null) {
if (!has_new_edges && !pts.is_new)
break;
// Loop over all flows-to figures
for (j = 0; j < HeapInsIntervalManager.Divisions; ++j) {
pe = int_entry1[j];
while (pe != null) {
if (pts.is_new || pe.is_new) {
// Propagate this object
if (add_new_points_to_tuple(pts, pe, obj, qn))
added = true;
} else
break;
pe = pe.next;
}
}
pts = pts.next;
}
}
}
if (added)
worklist.push(qn);
// Now, we clean the new edges if necessary
if (has_new_edges)
him1.flush();
}
}
Aggregations