use of soot.jimple.spark.pag.Node in project soot by Sable.
the class HybridPointsToSet method intersection.
public static HybridPointsToSet intersection(final HybridPointsToSet set1, final HybridPointsToSet set2, PAG pag) {
final HybridPointsToSet ret = new HybridPointsToSet(Scene.v().getObjectType(), pag);
BitVector s1Bits = set1.bits;
BitVector s2Bits = set2.bits;
if (s1Bits == null || s2Bits == null) {
if (s1Bits != null) {
// set2 is smaller
set2.forall(new P2SetVisitor() {
@Override
public void visit(Node n) {
if (set1.contains(n))
ret.add(n);
}
});
} else {
// set1 smaller, or both small
set1.forall(new P2SetVisitor() {
@Override
public void visit(Node n) {
if (set2.contains(n))
ret.add(n);
}
});
}
} else {
// both big; do bit-vector operation
// potential issue: if intersection is small, might
// use inefficient bit-vector operations later
ret.bits = BitVector.and(s1Bits, s2Bits);
ret.empty = false;
}
return ret;
}
use of soot.jimple.spark.pag.Node in project soot by Sable.
the class SharedHybridSet method nativeAddAll.
private boolean nativeAddAll(SharedHybridSet other, SharedHybridSet exclude) {
/*
* If one of the shared hybrid sets has a bitvector but the other
* doesn't, set that bitvector as the base bitvector and add the stuff
* from the other overflow list. If they both have a bitvector, AND them
* together, then add it to the lookupMap. If neither of them has a
* bitvector, just combine the overflow lists.
*/
BitVector mask = getBitMask(other, pag);
if (exclude != null) {
if (exclude.overflow.size() > 0) {
// Make exclude only a bitvector, for simplicity
PointsToBitVector newBitVector;
if (exclude.bitVector == null) {
newBitVector = new PointsToBitVector(pag.getAllocNodeNumberer().size());
} else {
newBitVector = new PointsToBitVector(exclude.bitVector);
}
add(newBitVector, exclude.overflow);
exclude = new SharedHybridSet(type, pag);
exclude.bitVector = newBitVector;
} else // in that case.
if (exclude.bitVector == null)
exclude = null;
}
int originalSize = size(), originalOnes = originalSize - overflow.size(), otherBitVectorSize = other.size() - other.overflow.size();
// Decide on the base bitvector
if (bitVector == null) {
bitVector = other.bitVector;
if (bitVector != null) {
// Maybe both bitvectors were null; in
// that case, no need to do this
bitVector.incRefCount();
// Since merging in new bits might add elements that
// were
// already in the overflow list, we have to remove and re-add
// them all.
// TODO: Can this be avoided somehow?
// Maybe by allowing an element to be both in the overflow set
// and
// the bitvector?
// Or could it be better done by checking just the bitvector and
// removing elements that are there?
OverflowList toReAdd = overflow;
overflow = new OverflowList();
// whether a new bit vector
boolean newBitVectorCreated = false;
// was created, which is used to decide whether to re-add the
// overflow list as an overflow list again or merge it into the
// new bit vector.
numElements = otherBitVectorSize;
if (exclude != null || mask != null) {
PointsToBitVector result = new PointsToBitVector(bitVector);
if (exclude != null)
result.andNot(exclude.bitVector);
if (mask != null)
result.and(mask);
if (!result.equals(bitVector)) {
add(result, toReAdd);
int newBitVectorSize = result.cardinality();
numElements = newBitVectorSize;
findAppropriateBitVector(result, other.bitVector, otherBitVectorSize, otherBitVectorSize);
newBitVectorCreated = true;
}
}
if (// if it was, then toReAdd has
!newBitVectorCreated) // already been re-added
{
for (OverflowList.ListNode i = toReAdd.overflow; i != null; i = i.next) {
add(i.elem);
}
}
}
} else if (other.bitVector != null) {
// Now both bitvectors are non-null; merge them
PointsToBitVector newBitVector = new PointsToBitVector(other.bitVector);
if (exclude != null)
newBitVector.andNot(exclude.bitVector);
if (mask != null)
newBitVector.and(mask);
newBitVector.or(bitVector);
if (// if some elements were
!newBitVector.equals(bitVector)) // actually added
{
if (other.overflow.size() != 0) {
PointsToBitVector toAdd = new PointsToBitVector(newBitVector.size());
add(toAdd, other.overflow);
if (mask != null)
toAdd.and(mask);
if (exclude != null)
toAdd.andNot(exclude.bitVector);
newBitVector.or(toAdd);
}
// At this point newBitVector is still bitVector + some new bits
// # of bits in the
int numOnes = newBitVector.cardinality();
// new bitvector
int numAdded = add(newBitVector, overflow);
numElements += // number of new bits
numOnes - originalOnes + numAdded - // might be negative due to
overflow.size();
if (size() > originalSize) {
findAppropriateBitVector(newBitVector, other.bitVector, otherBitVectorSize, originalOnes);
// checkSize();
return true;
} else {
// It might happen that the bitvector being merged in adds some bits
return false;
// to the existing bitvector, but that those new bits are all elements that were already
// in the overflow list. In that case, the set might not change, and if not we return false.
// We also leave the set the way it was by not calling findAppropriateBitvector,
// which maximizes sharing and is fastest in the short term. I'm not sure whether it
// would be faster overall to keep the already calculated bitvector anyway.
}
}
}
// Add all the elements in the overflow list of other, unless they're in
// exclude
OverflowList overflow = other.overflow;
for (OverflowList.ListNode i = overflow.overflow; i != null; i = i.next) {
// for (int i = 0; i < overflow.size(); ++i) {
Node nodeToMaybeAdd = i.elem;
if ((exclude == null) || !exclude.contains(nodeToMaybeAdd)) {
if (mask == null || mask.get(nodeToMaybeAdd.getNumber())) {
add(nodeToMaybeAdd);
}
}
}
// checkSize();
return size() > originalSize;
}
use of soot.jimple.spark.pag.Node in project soot by Sable.
the class SparkTransformer method addTags.
protected void addTags(PAG pag) {
final Tag unknown = new StringTag("Untagged Spark node");
final Map<Node, Tag> nodeToTag = pag.getNodeTags();
for (final SootClass c : Scene.v().getClasses()) {
for (final SootMethod m : c.getMethods()) {
if (!m.isConcrete())
continue;
if (!m.hasActiveBody())
continue;
for (final Unit u : m.getActiveBody().getUnits()) {
final Stmt s = (Stmt) u;
if (s instanceof DefinitionStmt) {
Value lhs = ((DefinitionStmt) s).getLeftOp();
VarNode v = null;
if (lhs instanceof Local) {
v = pag.findLocalVarNode(lhs);
} else if (lhs instanceof FieldRef) {
v = pag.findGlobalVarNode(((FieldRef) lhs).getField());
}
if (v != null) {
PointsToSetInternal p2set = v.getP2Set();
p2set.forall(new P2SetVisitor() {
public final void visit(Node n) {
addTag(s, n, nodeToTag, unknown);
}
});
Node[] simpleSources = pag.simpleInvLookup(v);
for (Node element : simpleSources) {
addTag(s, element, nodeToTag, unknown);
}
simpleSources = pag.allocInvLookup(v);
for (Node element : simpleSources) {
addTag(s, element, nodeToTag, unknown);
}
simpleSources = pag.loadInvLookup(v);
for (Node element : simpleSources) {
addTag(s, element, nodeToTag, unknown);
}
}
}
}
}
}
}
use of soot.jimple.spark.pag.Node in project soot by Sable.
the class PropAlias method handleVarNode.
/**
* Propagates new points-to information of node src to all its successors.
*/
protected final boolean handleVarNode(final VarNode src) {
boolean ret = false;
if (src.getReplacement() != src)
throw new RuntimeException("Got bad node " + src + " with rep " + src.getReplacement());
final PointsToSetInternal newP2Set = src.getP2Set().getNewSet();
if (newP2Set.isEmpty())
return false;
if (ofcg != null) {
QueueReader<Node> addedEdges = pag.edgeReader();
ofcg.updatedNode(src);
ofcg.build();
while (addedEdges.hasNext()) {
Node addedSrc = (Node) addedEdges.next();
Node addedTgt = (Node) addedEdges.next();
ret = true;
if (addedSrc instanceof VarNode) {
VarNode edgeSrc = (VarNode) addedSrc;
if (addedTgt instanceof VarNode) {
VarNode edgeTgt = (VarNode) addedTgt;
if (edgeTgt.makeP2Set().addAll(edgeSrc.getP2Set(), null))
addToWorklist(edgeTgt);
} else if (addedTgt instanceof NewInstanceNode) {
NewInstanceNode edgeTgt = (NewInstanceNode) addedTgt.getReplacement();
if (edgeTgt.makeP2Set().addAll(edgeSrc.getP2Set(), null)) {
for (Node element : pag.assignInstanceLookup(edgeTgt)) {
addToWorklist((VarNode) element);
}
}
}
} else if (addedSrc instanceof AllocNode) {
AllocNode edgeSrc = (AllocNode) addedSrc;
VarNode edgeTgt = (VarNode) addedTgt;
if (edgeTgt.makeP2Set().add(edgeSrc))
addToWorklist(edgeTgt);
} else if (addedSrc instanceof NewInstanceNode && addedTgt instanceof VarNode) {
final NewInstanceNode edgeSrc = (NewInstanceNode) addedSrc.getReplacement();
final VarNode edgeTgt = (VarNode) addedTgt.getReplacement();
addedSrc.getP2Set().forall(new P2SetVisitor() {
@Override
public void visit(Node n) {
if (n instanceof ClassConstantNode) {
ClassConstantNode ccn = (ClassConstantNode) n;
Type ccnType = ccn.getClassConstant().toSootType();
// If the referenced class has not been loaded,
// we do this now
SootClass targetClass = ((RefType) ccnType).getSootClass();
if (targetClass.resolvingLevel() == SootClass.DANGLING)
Scene.v().forceResolve(targetClass.getName(), SootClass.SIGNATURES);
edgeTgt.makeP2Set().add(pag.makeAllocNode(edgeSrc.getValue(), ccnType, ccn.getMethod()));
addToWorklist(edgeTgt);
}
}
});
}
FieldRefNode frn = null;
if (addedSrc instanceof FieldRefNode)
frn = (FieldRefNode) addedSrc;
if (addedTgt instanceof FieldRefNode)
frn = (FieldRefNode) addedTgt;
if (frn != null) {
VarNode base = frn.getBase();
if (fieldToBase.put(frn.getField(), base)) {
aliasWorkList.add(base);
}
}
}
}
Node[] simpleTargets = pag.simpleLookup(src);
for (Node element : simpleTargets) {
if (element.makeP2Set().addAll(newP2Set, null)) {
addToWorklist((VarNode) element);
ret = true;
}
}
Node[] storeTargets = pag.storeLookup(src);
for (Node element : storeTargets) {
final FieldRefNode fr = (FieldRefNode) element;
if (fr.makeP2Set().addAll(newP2Set, null)) {
fieldRefWorkList.add(fr);
ret = true;
}
}
src.getP2Set().flushNew();
return ret;
}
use of soot.jimple.spark.pag.Node in project soot by Sable.
the class PropAlias method propagate.
/**
* Actually does the propagation.
*/
public final void propagate() {
ofcg = pag.getOnFlyCallGraph();
new TopoSorter(pag, false).sort();
for (Object object : pag.loadSources()) {
final FieldRefNode fr = (FieldRefNode) object;
fieldToBase.put(fr.getField(), fr.getBase());
}
for (Object object : pag.storeInvSources()) {
final FieldRefNode fr = (FieldRefNode) object;
fieldToBase.put(fr.getField(), fr.getBase());
}
for (Object object : pag.allocSources()) {
handleAllocNode((AllocNode) object);
}
boolean verbose = pag.getOpts().verbose();
do {
if (verbose) {
logger.debug("Worklist has " + varNodeWorkList.size() + " nodes.");
}
aliasWorkList = new HashSet<VarNode>();
while (!varNodeWorkList.isEmpty()) {
VarNode src = varNodeWorkList.iterator().next();
varNodeWorkList.remove(src);
aliasWorkList.add(src);
handleVarNode(src);
}
if (verbose) {
logger.debug("Now handling field references");
}
for (VarNode src : aliasWorkList) {
for (FieldRefNode srcFr : src.getAllFieldRefs()) {
SparkField field = srcFr.getField();
for (VarNode dst : fieldToBase.get(field)) {
if (src.getP2Set().hasNonEmptyIntersection(dst.getP2Set())) {
FieldRefNode dstFr = dst.dot(field);
aliasEdges.put(srcFr, dstFr);
aliasEdges.put(dstFr, srcFr);
fieldRefWorkList.add(srcFr);
fieldRefWorkList.add(dstFr);
if (makeP2Set(dstFr).addAll(srcFr.getP2Set().getOldSet(), null)) {
outFieldRefWorkList.add(dstFr);
}
if (makeP2Set(srcFr).addAll(dstFr.getP2Set().getOldSet(), null)) {
outFieldRefWorkList.add(srcFr);
}
}
}
}
}
for (FieldRefNode src : fieldRefWorkList) {
for (FieldRefNode dst : aliasEdges.get(src)) {
if (makeP2Set(dst).addAll(src.getP2Set().getNewSet(), null)) {
outFieldRefWorkList.add(dst);
}
}
src.getP2Set().flushNew();
}
fieldRefWorkList = new HashSet<FieldRefNode>();
for (FieldRefNode src : outFieldRefWorkList) {
PointsToSetInternal set = getP2Set(src).getNewSet();
if (set.isEmpty())
continue;
Node[] targets = pag.loadLookup(src);
for (Node element0 : targets) {
VarNode target = (VarNode) element0;
if (target.makeP2Set().addAll(set, null)) {
addToWorklist(target);
}
}
getP2Set(src).flushNew();
}
outFieldRefWorkList = new HashSet<FieldRefNode>();
} while (!varNodeWorkList.isEmpty());
}
Aggregations