use of org.jikesrvm.compilers.opt.runtimesupport.OptCompiledMethod in project JikesRVM by JikesRVM.
the class DefaultInlineOracle method shouldInline.
@Override
public InlineDecision shouldInline(final CompilationState state) {
final OptOptions opts = state.getOptions();
final boolean verbose = opts.PRINT_DETAILED_INLINE_REPORT;
if (!opts.INLINE) {
return NO("inlining not enabled");
}
final RVMMethod staticCallee = state.obtainTarget();
final NormalMethod rootMethod = state.getRootMethod();
final RVMMethod caller = state.getMethod();
final int bcIndex = state.getRealBytecodeIndex();
if (verbose)
VM.sysWriteln("Begin inline decision for " + "<" + caller + "," + bcIndex + "," + staticCallee + ">");
// Stage 1: We definitely don't inline certain methods
if (!state.isInvokeInterface()) {
if (staticCallee.isNative()) {
reportUnguardedDecisionIfVerbose("NO: native method", verbose);
return NO("native method");
}
if (hasNoInlinePragma(staticCallee, state)) {
reportUnguardedDecisionIfVerbose("NO: pragmaNoInline", verbose);
return NO("pragmaNoInline");
}
// traces (see StackTrace).
if (staticCallee.isObjectInitializer() && staticCallee.getDeclaringClass().isAssignableToThrowable()) {
reportUnguardedDecisionIfVerbose("NO: constructor of class assignable to throwable", verbose);
return NO("constructor of class assignable to throwable");
}
}
// at worse replace one call instruction with another one).
if (!state.isInvokeInterface() && !staticCallee.isAbstract()) {
// above test passes
if (state.getHasPreciseTarget() || !needsGuard(staticCallee)) {
// call is guardless
int inlinedSizeEstimate = inlinedSizeEstimate((NormalMethod) staticCallee, state);
if (inlinedSizeEstimate < opts.INLINE_MAX_ALWAYS_INLINE_TARGET_SIZE) {
// inlining is desirable
if (!state.getSequence().containsMethod(staticCallee)) {
// not recursive
reportUnguardedDecisionIfVerbose("YES: trivial guardless inline", verbose);
return YES(staticCallee, "trivial inline");
}
}
if (hasInlinePragma(staticCallee, state)) {
// inlining is desirable
if (!state.getSequence().containsMethod(staticCallee)) {
// not recursive
reportUnguardedDecisionIfVerbose("YES: pragma inline", verbose);
return YES(staticCallee, "pragma inline");
}
}
}
}
if (opts.getOptLevel() == 0) {
// at opt level 0, trivial unguarded inlines are the only kind we consider
reportUnguardedDecisionIfVerbose("NO: only do trivial inlines at O0", verbose);
return NO("Only do trivial inlines at O0");
}
// than faster boot image compilation.
if (VM.runningVM && rootMethod.inlinedSizeEstimate() > opts.INLINE_MASSIVE_METHOD_SIZE) {
reportUnguardedDecisionIfVerbose("NO: only do trivial inlines into massive methods when the VM is running", verbose);
return NO("Root method is massive; no non-trivial inlines");
}
// Stage 3: Determine based on profile data and static information
// what are the possible targets of this call.
WeightedCallTargets targets = null;
boolean purelyStatic = true;
if (Controller.dcgAvailable() && Controller.options.ADAPTIVE_INLINING) {
targets = Controller.dcg.getCallTargets(caller, bcIndex);
if (targets != null) {
reportProfilingIfVerbose("Found profile data", verbose);
purelyStatic = false;
WeightedCallTargets filteredTargets = targets.filter(staticCallee, state.getHasPreciseTarget());
if (targets != filteredTargets) {
reportProfilingIfVerbose("Profiled callees filtered based on static information", verbose);
targets = filteredTargets;
if (targets == null) {
reportProfilingIfVerbose("After filterting no profile data...", verbose);
// After filtering, no matching profile data, fall back to
// static information to avoid degradations
targets = WeightedCallTargets.create(staticCallee, 0);
purelyStatic = true;
}
}
}
}
// we are inspecting it to determine how/whether to do the inline guard.
synchronized (RVMClass.classLoadListener) {
boolean guardOverrideOnStaticCallee = false;
if (targets == null) {
reportUnguardedDecisionIfVerbose("no profile data", verbose);
// be able to share all the decision making logic.
if (state.isInvokeInterface()) {
if (opts.INLINE_GUARDED_INTERFACES) {
RVMMethod singleImpl = InterfaceHierarchy.getUniqueImplementation(staticCallee);
if (singleImpl != null && hasBody(singleImpl)) {
if (verbose) {
VM.sysWriteln("\tFound a single implementation " + singleImpl + " of an interface method " + staticCallee);
}
targets = WeightedCallTargets.create(singleImpl, 0);
guardOverrideOnStaticCallee = true;
}
}
} else {
// invokestatic, invokevirtual, invokespecial
if (staticCallee.isAbstract()) {
// look for single non-abstract implementation of the abstract method
RVMClass klass = staticCallee.getDeclaringClass();
while (true) {
RVMClass[] subClasses = klass.getSubClasses();
// multiple subclasses => multiple targets
if (subClasses.length != 1)
break;
RVMMethod singleImpl = subClasses[0].findDeclaredMethod(staticCallee.getName(), staticCallee.getDescriptor());
if (singleImpl != null && !singleImpl.isAbstract()) {
// found something
reportProfilingIfVerbose("single impl of abstract method", verbose);
targets = WeightedCallTargets.create(singleImpl, 0);
guardOverrideOnStaticCallee = true;
break;
}
// keep crawling down the hierarchy
klass = subClasses[0];
}
} else {
targets = WeightedCallTargets.create(staticCallee, 0);
}
}
}
// If there is a precise target, then targets contains exactly that target method.
if (targets == null)
return NO("No potential targets identified");
// Stage 4: We have one or more targets. Determine what if anything should be done with them.
final ArrayList<RVMMethod> methodsToInline = new ArrayList<RVMMethod>();
final ArrayList<Boolean> methodsNeedGuard = new ArrayList<Boolean>();
final double callSiteWeight = targets.totalWeight();
// real closures anyone?
final boolean goosc = guardOverrideOnStaticCallee;
// real closures anyone?
final boolean ps = purelyStatic;
targets.visitTargets(new WeightedCallTargets.Visitor() {
@Override
public void visit(RVMMethod callee, double weight) {
if (hasBody(callee)) {
reportInitialProfileState(verbose, callee, weight);
// Don't inline recursively and respect no inline pragmas
InlineSequence seq = state.getSequence();
if (seq.containsMethod(callee)) {
reportSelectionIfVerbose("Reject: recursive", verbose);
return;
}
if (hasNoInlinePragma(callee, state)) {
reportSelectionIfVerbose("Reject: noinline pragma", verbose);
return;
}
// more or less figure out the guard situation early -- impacts size estimate.
boolean needsGuard = !state.getHasPreciseTarget() && (staticCallee != callee || needsGuard(staticCallee));
if (needsGuard && isForbiddenSpeculation(state.getRootMethod(), callee)) {
reportSelectionIfVerbose("Reject: forbidden speculation", verbose);
return;
}
boolean currentlyFinal = (goosc || (staticCallee == callee)) && isCurrentlyFinal(callee, !opts.guardWithClassTest());
boolean preEx = needsGuard && state.getIsExtant() && opts.INLINE_PREEX && currentlyFinal;
if (needsGuard && !preEx) {
if (!opts.INLINE_GUARDED) {
reportSelectionIfVerbose("Reject: guarded inlining disabled", verbose);
return;
}
if (!currentlyFinal && ps) {
reportSelectionIfVerbose("Reject: multiple targets and no profile data", verbose);
return;
}
}
// Estimate cost of performing this inlining action.
// Includes cost of guard & off-branch call if they are going to be generated.
boolean decideYes = false;
if (hasInlinePragma(callee, state)) {
reportSelectionIfVerbose("Select: pragma inline", verbose);
decideYes = true;
} else {
// Preserve previous inlining decisions
// Not the best thing in the world due to phase shifts, but
// it does buy some degree of stability. So, it is probably the lesser
// of two evils.
CompiledMethod prev = state.getRootMethod().getCurrentCompiledMethod();
if (prev != null && prev.getCompilerType() == CompiledMethod.OPT) {
if (((OptCompiledMethod) prev).getMCMap().hasInlinedEdge(caller, bcIndex, callee)) {
reportSelectionIfVerbose("Select: Previously inlined", verbose);
decideYes = true;
}
}
if (!decideYes) {
int inlinedSizeEstimate = inlinedSizeEstimate((NormalMethod) callee, state);
int cost = inliningActionCost(inlinedSizeEstimate, needsGuard, preEx, opts);
int maxCost = opts.INLINE_MAX_TARGET_SIZE;
if (callSiteWeight > Controller.options.INLINE_AI_SEED_MULTIPLIER) {
// real profile data with enough samples for us to trust it.
// Use weight and shape of call site distribution to compute
// a higher maxCost.
double fractionOfSample = weight / callSiteWeight;
if (needsGuard && fractionOfSample < opts.INLINE_AI_MIN_CALLSITE_FRACTION) {
// This call accounts for less than INLINE_AI_MIN_CALLSITE_FRACTION
// of the profiled targets at this call site.
// It is highly unlikely to be profitable to inline it.
reportSelectionIfVerbose("Reject: less than INLINE_AI_MIN_CALLSITE_FRACTION of distribution", verbose);
maxCost = 0;
} else {
if (cost > maxCost) {
/* We're going to increase the maximum callee size (maxCost) we're willing
* to inline based on how "hot" (what % of the total weight in the
* dynamic call graph) the edge is.
*/
double adjustedWeight = AdaptiveInlining.adjustedWeight(weight);
if (adjustedWeight > Controller.options.INLINE_AI_HOT_CALLSITE_THRESHOLD) {
/* A truly hot edge; use the max allowable callee size */
maxCost = opts.INLINE_AI_MAX_TARGET_SIZE;
} else {
/* A warm edge, we will use a value between the static default and the max allowable.
* The code below simply does a linear interpolation between 2x static default
* and max allowable.
* Other alternatives would be to do a log interpolation or some other step function.
*/
int range = opts.INLINE_AI_MAX_TARGET_SIZE - 2 * opts.INLINE_MAX_TARGET_SIZE;
double slope = (range) / Controller.options.INLINE_AI_HOT_CALLSITE_THRESHOLD;
int scaledAdj = (int) (slope * adjustedWeight);
maxCost += opts.INLINE_MAX_TARGET_SIZE + scaledAdj;
}
}
}
}
// Somewhat bogus, but if we get really deeply inlined we start backing off.
int curDepth = state.getInlineDepth();
if (curDepth > opts.INLINE_MAX_INLINE_DEPTH) {
maxCost /= (curDepth - opts.INLINE_MAX_INLINE_DEPTH + 1);
}
decideYes = cost <= maxCost;
if (decideYes) {
reportSelectionIfVerbose("Accept: cost of " + cost + " was below threshold " + maxCost, verbose);
} else {
reportSelectionIfVerbose("Reject: cost of " + cost + " was above threshold " + maxCost, verbose);
}
}
}
if (decideYes) {
// Ok, we're going to inline it.
// Record that and also whether or not we think it needs a guard.
methodsToInline.add(callee);
if (preEx) {
ClassLoadingDependencyManager cldm = (ClassLoadingDependencyManager) RVMClass.classLoadListener;
if (ClassLoadingDependencyManager.TRACE || ClassLoadingDependencyManager.DEBUG) {
cldm.report("PREEX_INLINE: Inlined " + callee + " into " + caller);
}
cldm.addNotOverriddenDependency(callee, state.getCompiledMethod());
if (goosc) {
cldm.addNotOverriddenDependency(staticCallee, state.getCompiledMethod());
}
methodsNeedGuard.add(Boolean.FALSE);
} else {
methodsNeedGuard.add(needsGuard);
}
}
}
}
private void reportInitialProfileState(final boolean verbose, RVMMethod callee, double weight) {
double adjustedWeight = AdaptiveInlining.adjustedWeight(weight);
String sampleString = " samples (";
if (Double.isNaN(adjustedWeight)) {
sampleString += "no DCG available)";
} else {
sampleString += (100 * adjustedWeight) + "%)";
}
reportProfilingIfVerbose("Evaluating target " + callee + " with " + weight + sampleString, verbose);
}
});
// Stage 5: Choose guards and package up the results in an InlineDecision object
if (methodsToInline.isEmpty()) {
InlineDecision d = NO("No desirable targets");
reportGuardedDecisionIfVerbose(d, verbose);
return d;
} else if (methodsToInline.size() == 1) {
RVMMethod target = methodsToInline.get(0);
boolean needsGuard = methodsNeedGuard.get(0);
if (needsGuard) {
if ((guardOverrideOnStaticCallee || target == staticCallee) && isCurrentlyFinal(target, !opts.guardWithClassTest())) {
InlineDecision d = guardedYES(target, chooseGuard(caller, target, staticCallee, state, true), "Guarded inline of single static target");
/*
* Determine if it is allowable to put an OSR point in the failed case of
* the guarded inline instead of generating a real call instruction.
* There are several conditions that must be met for this to be allowable:
* (1) OSR guarded inlining and recompilation must both be enabled
* (2) The current context must be an interruptible method
* (3) The application must be started. This is a rough proxy for the VM
* being fully booted so we can actually get through the OSR process.
* Note: One implication of this requirement is that we will
* never put an OSR on an off-branch of a guarded inline in bootimage
* code.
*/
if (opts.OSR_GUARDED_INLINING && Controller.options.ENABLE_RECOMPILATION && caller.isInterruptible() && OptimizingCompiler.getAppStarted()) {
if (VM.VerifyAssertions)
VM._assert(VM.runningVM);
d.setOSRTestFailed();
}
if (verbose)
VM.sysWriteln("\tDecide: " + d);
return d;
} else {
InlineDecision d = guardedYES(target, chooseGuard(caller, target, staticCallee, state, false), "Guarded inlining of one potential target");
reportGuardedDecisionIfVerbose(d, verbose);
return d;
}
} else {
InlineDecision d = YES(target, "Unique and desirable target");
reportGuardedDecisionIfVerbose(d, verbose);
return d;
}
} else {
RVMMethod[] methods = new RVMMethod[methodsNeedGuard.size()];
byte[] guards = new byte[methods.length];
int idx = 0;
Iterator<RVMMethod> methodIterator = methodsToInline.iterator();
Iterator<Boolean> guardIterator = methodsNeedGuard.iterator();
while (methodIterator.hasNext()) {
RVMMethod target = methodIterator.next();
boolean needsGuard = guardIterator.next();
if (VM.VerifyAssertions) {
if (!needsGuard) {
VM.sysWriteln("Error, inlining for " + methodsToInline.size() + " targets");
VM.sysWriteln("Inlining into " + rootMethod + " at bytecode index " + bcIndex);
VM.sysWriteln("Method: " + target + " doesn't need a guard");
for (int i = 0; i < methodsToInline.size(); i++) {
VM.sysWriteln(" Method " + i + ": " + methodsToInline.get(i));
VM.sysWriteln(" NeedsGuard: " + methodsNeedGuard.get(i));
}
VM._assert(VM.NOT_REACHED);
}
}
methods[idx] = target;
guards[idx] = chooseGuard(caller, target, staticCallee, state, false);
idx++;
}
InlineDecision d = guardedYES(methods, guards, "Inline multiple targets");
reportGuardedDecisionIfVerbose(d, verbose);
return d;
}
}
}
use of org.jikesrvm.compilers.opt.runtimesupport.OptCompiledMethod in project JikesRVM by JikesRVM.
the class StackTrace method countFrames.
/**
* Count number of stack frames including those inlined by the opt compiler
* @param first the first compiled method to look from
* @param last the last compiled method to look to
* @return the number of stack frames
*/
private int countFrames(int first, int last) {
int numElements = 0;
if (!VM.BuildForOptCompiler) {
numElements = last - first + 1;
} else {
for (int i = first; i <= last; i++) {
CompiledMethod compiledMethod = getCompiledMethod(i);
if ((compiledMethod == null) || (compiledMethod.getCompilerType() != CompiledMethod.OPT)) {
// Invisible or non-opt compiled method
numElements++;
} else {
Offset instructionOffset = Offset.fromIntSignExtend(instructionOffsets[i]);
OptCompiledMethod optInfo = (OptCompiledMethod) compiledMethod;
OptMachineCodeMap map = optInfo.getMCMap();
int iei = map.getInlineEncodingForMCOffset(instructionOffset);
if (iei < 0) {
numElements++;
} else {
int[] inlineEncoding = map.inlineEncoding;
for (; iei >= 0; iei = OptEncodedCallSiteTree.getParent(iei, inlineEncoding)) {
numElements++;
}
}
}
}
}
return numElements;
}
use of org.jikesrvm.compilers.opt.runtimesupport.OptCompiledMethod in project JikesRVM by JikesRVM.
the class CodeInstaller method install.
public static boolean install(ExecutionState state, CompiledMethod cm) {
RVMThread thread = state.getThread();
byte[] stack = thread.getStack();
Offset tsfromFPOffset = state.getTSFPOffset();
Offset fooFPOffset = state.getFPOffset();
int foomid = Magic.getIntAtOffset(stack, fooFPOffset.plus(STACKFRAME_METHOD_ID_OFFSET));
CompiledMethod foo = CompiledMethods.getCompiledMethod(foomid);
int cType = foo.getCompilerType();
// this offset is used to adjust SP to FP right after return
// from a call. 1 stack slot for return address and
// 1 stack slot for saved FP of tsfrom.
Offset sp2fpOffset = fooFPOffset.minus(tsfromFPOffset).minus(2 * BYTES_IN_STACKSLOT);
// should given an estimated length, and print the instructions
// for debugging
Assembler asm = new Assembler(50, VM.TraceOnStackReplacement);
// 1. generate bridge instructions to recover saved registers
if (cType == CompiledMethod.BASELINE) {
// unwind stack pointer, SP is FP now
if (VM.BuildFor32Addr) {
asm.emitADD_Reg_Imm(SP, sp2fpOffset.toInt());
} else {
asm.emitADD_Reg_Imm_Quad(SP, sp2fpOffset.toInt());
}
asm.generateJTOCloadWord(S0, cm.getOsrJTOCoffset());
// restore saved EDI
if (VM.BuildFor32Addr) {
asm.emitMOV_Reg_RegDisp(EDI, SP, EDI_SAVE_OFFSET);
} else {
asm.emitMOV_Reg_RegDisp_Quad(EDI, SP, EDI_SAVE_OFFSET);
}
// restore saved EBX
if (VM.BuildFor32Addr) {
asm.emitMOV_Reg_RegDisp(EBX, SP, EBX_SAVE_OFFSET);
} else {
asm.emitMOV_Reg_RegDisp_Quad(EBX, SP, EBX_SAVE_OFFSET);
}
// restore frame pointer
asm.emitPOP_RegDisp(TR, ArchEntrypoints.framePointerField.getOffset());
// do not pop return address and parameters,
// we make a faked call to newly compiled method
asm.emitJMP_Reg(S0);
} else if (cType == CompiledMethod.OPT) {
// /////////////////////////////////////////////////
// recover saved registers from foo's stack frame
// /////////////////////////////////////////////////
OptCompiledMethod fooOpt = (OptCompiledMethod) foo;
// foo definitely not save volatile
boolean saveVolatile = fooOpt.isSaveVolatile();
if (VM.VerifyAssertions) {
VM._assert(!saveVolatile);
}
// assume SP is on foo's stack frame,
int firstNonVolatile = fooOpt.getFirstNonVolatileGPR();
int nonVolatiles = fooOpt.getNumberOfNonvolatileGPRs();
int nonVolatileOffset = fooOpt.getUnsignedNonVolatileOffset();
for (int i = firstNonVolatile; i < firstNonVolatile + nonVolatiles; i++) {
if (VM.BuildFor32Addr) {
asm.emitMOV_Reg_RegDisp(NONVOLATILE_GPRS[i], SP, sp2fpOffset.minus(nonVolatileOffset));
} else {
asm.emitMOV_Reg_RegDisp_Quad(NONVOLATILE_GPRS[i], SP, sp2fpOffset.minus(nonVolatileOffset));
}
nonVolatileOffset += BYTES_IN_STACKSLOT;
}
// adjust SP to frame pointer
if (VM.BuildFor32Addr) {
asm.emitADD_Reg_Imm(SP, sp2fpOffset.toInt());
} else {
asm.emitADD_Reg_Imm_Quad(SP, sp2fpOffset.toInt());
}
// restore frame pointer
asm.emitPOP_RegDisp(TR, ArchEntrypoints.framePointerField.getOffset());
// branch to the newly compiled instructions
asm.generateJTOCjmp(cm.getOsrJTOCoffset());
}
if (VM.TraceOnStackReplacement) {
VM.sysWrite("new CM instr addr ");
VM.sysWriteHex(Statics.getSlotContentsAsAddress(cm.getOsrJTOCoffset()));
VM.sysWriteln();
VM.sysWrite("JTOC register ");
VM.sysWriteHex(Magic.getTocPointer());
VM.sysWriteln();
VM.sysWrite("Thread register ");
VM.sysWriteHex(Magic.objectAsAddress(Magic.getThreadRegister()));
VM.sysWriteln();
VM.sysWriteln("tsfromFPOffset ", tsfromFPOffset);
VM.sysWriteln("fooFPOffset ", fooFPOffset);
VM.sysWriteln("SP + ", sp2fpOffset.plus(BYTES_IN_STACKSLOT));
}
// 3. set thread flags
thread.isWaitingForOsr = true;
thread.bridgeInstructions = asm.getMachineCodes();
thread.fooFPOffset = fooFPOffset;
thread.tsFPOffset = tsfromFPOffset;
Address bridgeaddr = Magic.objectAsAddress(thread.bridgeInstructions);
Memory.sync(bridgeaddr, thread.bridgeInstructions.length() << LG_INSTRUCTION_WIDTH);
AOSLogging.logger.logOsrEvent("OSR code installation succeeded");
return true;
}
use of org.jikesrvm.compilers.opt.runtimesupport.OptCompiledMethod in project JikesRVM by JikesRVM.
the class OptExecutionStateExtractor method extractState.
@Override
public ExecutionState extractState(RVMThread thread, Offset osrFPoff, Offset methFPoff, int cmid) {
/* perform machine and compiler dependent operations here
* osrFPoff is the fp offset of
* OptSaveVolatile.threadSwithFrom<...>
*
* (stack grows downward)
* foo
* |-> <-- methFPoff
* |
* | <tsfrom>
* |-- <-- osrFPoff
*
*
* The <tsfrom> saves all volatiles, nonvolatiles, and scratch
* registers. All register values for 'foo' can be obtained from
* the register save area of '<tsfrom>' method.
*/
byte[] stack = thread.getStack();
// get registers for the caller ( real method )
TempRegisters registers = new TempRegisters(thread.getContextRegisters());
if (VM.VerifyAssertions) {
int foocmid = Magic.getIntAtOffset(stack, methFPoff.plus(STACKFRAME_METHOD_ID_OFFSET));
if (foocmid != cmid) {
for (Offset o = osrFPoff; o.sGE(methFPoff.minus(2 * BYTES_IN_ADDRESS)); o = o.minus(BYTES_IN_ADDRESS)) {
VM.sysWriteHex(Magic.objectAsAddress(stack).plus(o));
VM.sysWrite(" : ");
VM.sysWriteHex(Magic.getWordAtOffset(stack, o).toAddress());
VM.sysWriteln();
}
CompiledMethod cm = CompiledMethods.getCompiledMethod(cmid);
VM.sysWriteln("unmatch method, it should be " + cm.getMethod());
CompiledMethod foo = CompiledMethods.getCompiledMethod(foocmid);
VM.sysWriteln("but now it is " + foo.getMethod());
walkOnStack(stack, osrFPoff);
}
VM._assert(foocmid == cmid);
}
OptCompiledMethod fooCM = (OptCompiledMethod) CompiledMethods.getCompiledMethod(cmid);
/* Following code get the machine code offset to the
* next instruction. All operation of the stack frame
* are kept in GC critical section.
* All code in the section should not cause any GC
* activities, and avoid lazy compilation.
*/
// get the next machine code offset of the real method
VM.disableGC();
Address methFP = Magic.objectAsAddress(stack).plus(methFPoff);
Address nextIP = Magic.getNextInstructionAddress(methFP);
Offset ipOffset = fooCM.getInstructionOffset(nextIP);
VM.enableGC();
EncodedOSRMap fooOSRMap = fooCM.getOSRMap();
/* get register reference map from OSR map
* we are using this map to convert addresses to objects,
* thus we can operate objects out of GC section.
*/
int regmap = fooOSRMap.getRegisterMapForMCOffset(ipOffset);
{
int bufCMID = Magic.getIntAtOffset(stack, osrFPoff.plus(STACKFRAME_METHOD_ID_OFFSET));
CompiledMethod bufCM = CompiledMethods.getCompiledMethod(bufCMID);
// SaveVolatile can only be compiled by OPT compiler
if (VM.VerifyAssertions) {
VM._assert(bufCM instanceof OptCompiledMethod);
}
restoreValuesFromOptSaveVolatile(stack, osrFPoff, registers, regmap, bufCM);
}
// return a list of states: from caller to callee
// if the osr happens in an inlined method, the state is
// a chain of recoverd methods.
ExecutionState state = getExecStateSequence(thread, stack, ipOffset, methFPoff, cmid, osrFPoff, registers, fooOSRMap);
// reverse callerState points
ExecutionState prevState = null;
ExecutionState nextState = state;
while (nextState != null) {
// 1. current node
state = nextState;
// 1. hold the next state first
nextState = nextState.callerState;
// 2. redirect pointer
state.callerState = prevState;
// 3. move prev to current
prevState = state;
}
if (VM.TraceOnStackReplacement) {
VM.sysWriteln("OptExecutionState : recovered states");
ExecutionState temp = state;
while (temp != null) {
VM.sysWriteln(temp.toString());
temp = temp.callerState;
}
}
return state;
}
use of org.jikesrvm.compilers.opt.runtimesupport.OptCompiledMethod in project JikesRVM by JikesRVM.
the class TraceInterface method skipOwnFramesAndDump.
@Override
@NoInline
// This can't be uninterruptible --- it is an IO routine
@Interruptible
public Address skipOwnFramesAndDump(ObjectReference typeRef) {
TIB tib = Magic.addressAsTIB(typeRef.toAddress());
RVMMethod m = null;
int bci = -1;
int compiledMethodID = 0;
Offset ipOffset = Offset.zero();
Address fp = Magic.getFramePointer();
Address ip = Magic.getReturnAddressUnchecked(fp);
fp = Magic.getCallerFramePointer(fp);
// This code borrows heavily from RVMThread.dumpStack
final Address STACKFRAME_SENTINEL_FP = StackFrameLayout.getStackFrameSentinelFP();
final int INVISIBLE_METHOD_ID = StackFrameLayout.getInvisibleMethodID();
while (Magic.getCallerFramePointer(fp).NE(STACKFRAME_SENTINEL_FP)) {
compiledMethodID = Magic.getCompiledMethodID(fp);
if (compiledMethodID != INVISIBLE_METHOD_ID) {
// normal java frame(s)
CompiledMethod compiledMethod = CompiledMethods.getCompiledMethod(compiledMethodID);
if (compiledMethod.getCompilerType() != CompiledMethod.TRAP) {
ipOffset = compiledMethod.getInstructionOffset(ip);
m = compiledMethod.getMethod();
if (VM.BuildForOptCompiler && compiledMethod.getCompilerType() == CompiledMethod.OPT) {
OptCompiledMethod optInfo = (OptCompiledMethod) compiledMethod;
/* Opt stack frames may contain multiple inlined methods. */
OptMachineCodeMap map = optInfo.getMCMap();
int iei = map.getInlineEncodingForMCOffset(ipOffset);
if (iei >= 0) {
int[] inlineEncoding = map.inlineEncoding;
boolean allocCall = true;
bci = map.getBytecodeIndexForMCOffset(ipOffset);
for (int j = iei; j >= 0 && allocCall; j = OptEncodedCallSiteTree.getParent(j, inlineEncoding)) {
int mid = OptEncodedCallSiteTree.getMethodID(j, inlineEncoding);
m = MemberReference.getMethodRef(mid).getResolvedMember();
if (!isAllocCall(m.getName().getBytes()))
allocCall = false;
if (j > 0)
bci = OptEncodedCallSiteTree.getByteCodeOffset(j, inlineEncoding);
}
if (!allocCall)
break;
}
} else {
if (!isAllocCall(m.getName().getBytes())) {
BaselineCompiledMethod baseInfo = (BaselineCompiledMethod) compiledMethod;
final int INSTRUCTION_WIDTH = ArchConstants.getInstructionWidth();
bci = baseInfo.findBytecodeIndexForInstruction(ipOffset.toWord().lsh(INSTRUCTION_WIDTH).toOffset());
break;
}
}
}
}
ip = Magic.getReturnAddressUnchecked(fp);
fp = Magic.getCallerFramePointer(fp);
}
if (m != null) {
int allocid = (((compiledMethodID & 0x0000ffff) << 15) ^ ((compiledMethodID & 0xffff0000) >> 16) ^ ipOffset.toInt()) & ~0x80000000;
/* Now print the location string. */
VM.sysWrite('\n');
VM.writeHex(allocid);
VM.sysWrite('-');
VM.sysWrite('>');
VM.sysWrite('[');
VM.writeHex(compiledMethodID);
VM.sysWrite(']');
m.getDeclaringClass().getDescriptor().sysWrite();
VM.sysWrite(':');
m.getName().sysWrite();
m.getDescriptor().sysWrite();
VM.sysWrite(':');
VM.writeHex(bci);
VM.sysWrite('\t');
RVMType type = tib.getType();
type.getDescriptor().sysWrite();
VM.sysWrite('\n');
}
return fp;
}
Aggregations