use of org.graalvm.compiler.phases.common.ConvertDeoptimizeToGuardPhase in project graal by oracle.
the class RuntimeStrengthenStampsPhase method processMethod.
@SuppressWarnings("try")
private void processMethod(CallTreeNode node, Deque<CallTreeNode> worklist, BigBang bb) {
AnalysisMethod method = node.implementationMethod;
assert method.isImplementationInvoked();
if (node.graph == null) {
if (method.getAnnotation(Fold.class) != null || method.getAnnotation(NodeIntrinsic.class) != null) {
VMError.shouldNotReachHere("Parsing method annotated with @Fold or @NodeIntrinsic: " + method.format("%H.%n(%p)"));
}
boolean parse = false;
DebugContext debug = bb.getDebug();
StructuredGraph graph = method.buildGraph(debug, method, hostedProviders, Purpose.PREPARE_RUNTIME_COMPILATION);
if (graph == null) {
if (!method.hasBytecodes()) {
return;
}
parse = true;
graph = new StructuredGraph.Builder(debug.getOptions(), debug, AllowAssumptions.YES).method(method).build();
}
try (DebugContext.Scope scope = debug.scope("RuntimeCompile", graph)) {
if (parse) {
RuntimeGraphBuilderPhase builderPhase = new RuntimeGraphBuilderPhase(hostedProviders.getMetaAccess(), hostedProviders.getStampProvider(), hostedProviders.getConstantReflection(), hostedProviders.getConstantFieldProvider(), graphBuilderConfig, optimisticOpts, null, hostedProviders.getWordTypes(), deoptimizeOnExceptionPredicate, node);
builderPhase.apply(graph);
}
if (graph.getNodes(StackValueNode.TYPE).isNotEmpty()) {
/*
* Stack allocated memory is not seen by the deoptimization code, i.e., it is
* not copied in case of deoptimization. Also, pointers to it can be used for
* arbitrary address arithmetic, so we would not know how to update derived
* pointers into stack memory during deoptimization. Therefore, we cannot allow
* methods that allocate stack memory for runtime compilation. To remove this
* limitation, we would need to change how we handle stack allocated memory in
* Graal.
*/
return;
}
PhaseContext phaseContext = new PhaseContext(hostedProviders);
new CanonicalizerPhase().apply(graph, phaseContext);
new ConvertDeoptimizeToGuardPhase().apply(graph, phaseContext);
graphEncoder.prepare(graph);
node.graph = graph;
} catch (Throwable ex) {
debug.handle(ex);
}
}
assert node.graph != null;
List<MethodCallTargetNode> callTargets = node.graph.getNodes(MethodCallTargetNode.TYPE).snapshot();
callTargets.sort((t1, t2) -> Integer.compare(t1.invoke().bci(), t2.invoke().bci()));
for (MethodCallTargetNode targetNode : callTargets) {
AnalysisMethod targetMethod = (AnalysisMethod) targetNode.targetMethod();
AnalysisMethod callerMethod = (AnalysisMethod) targetNode.invoke().stateAfter().getMethod();
InvokeTypeFlow invokeFlow = callerMethod.getTypeFlow().getOriginalMethodFlows().getInvoke(targetNode.invoke().bci());
if (invokeFlow == null) {
continue;
}
Collection<AnalysisMethod> allImplementationMethods = invokeFlow.getCallees();
/*
* Eventually we want to remove all invokes that are unreachable, i.e., have no
* implementation. But the analysis is iterative, and we don't know here if we have
* already reached the fixed point. So we only collect unreachable invokes here, and
* remove them after the analysis has finished.
*/
if (allImplementationMethods.size() == 0) {
node.unreachableInvokes.add(targetNode.invoke());
} else {
node.unreachableInvokes.remove(targetNode.invoke());
}
List<AnalysisMethod> implementationMethods = new ArrayList<>();
for (AnalysisMethod implementationMethod : allImplementationMethods) {
/* Filter out all the implementation methods that have already been processed. */
if (!methods.containsKey(implementationMethod)) {
implementationMethods.add(implementationMethod);
}
}
if (implementationMethods.size() > 0) {
/* Sort to make printing order and method discovery order deterministic. */
implementationMethods.sort((m1, m2) -> m1.format("%H.%n(%p)").compareTo(m2.format("%H.%n(%p)")));
String sourceReference = buildSourceReference(targetNode.invoke().stateAfter());
for (AnalysisMethod implementationMethod : implementationMethods) {
CallTreeNode calleeNode = new CallTreeNode(implementationMethod, targetMethod, node, node.level + 1, sourceReference);
if (includeCalleePredicate.includeCallee(calleeNode, implementationMethods)) {
assert !methods.containsKey(implementationMethod);
methods.put(implementationMethod, calleeNode);
worklist.add(calleeNode);
node.children.add(calleeNode);
objectReplacer.createMethod(implementationMethod);
}
/*
* We must compile all methods which may be called. It may be the case that a
* call target does not reach the compile queue by default, e.g. if it is
* inlined at image generation but not at runtime compilation.
*/
CompilationInfoSupport.singleton().registerForcedCompilation(implementationMethod);
}
}
}
}
Aggregations