use of org.jikesrvm.compilers.common.assembler.ForwardReference in project JikesRVM by JikesRVM.
the class JNICompiler method genSVR4ParameterPassingCode.
/**
* Generates instructions to copy parameters from RVM convention to OS convention.
* @param asm The {@link Assembler} object
* @param types The parameter types
* @param nextVMArgReg The first parameter GPR in RVM convention,
* the last parameter GPR is defined as LAST_VOLATILE_GPR.
* @param nextVMArgFloatReg The first parameter FPR in RVM convention,
* the last parameter FPR is defined as LAST_VOLATILE_FPR.
* @param spillOffsetVM The spill offset (related to FP) in RVM convention
* @param nextOSArgReg the first parameter GPR in OS convention,
* the last parameter GPR is defined as LAST_OS_PARAMETER_GPR.
* @param nextOSArgFloatReg The first parameter FPR in OS convention,
* the last parameter FPR is defined as LAST_OS_PARAMETER_FPR.
* @param spillOffsetOS The spill offset (related to FP) in OS convention
*/
private static void genSVR4ParameterPassingCode(Assembler asm, TypeReference[] types, int nextVMArgReg, int nextVMArgFloatReg, int spillOffsetVM, int nextOSArgReg, int nextOSArgFloatReg, int spillOffsetOS) {
if (VM.BuildForSVR4ABI) {
// create one Assembler object for each argument
// This is needed for the following reason:
// -2 new arguments are added in front for native methods, so the normal arguments
// need to be shifted down in addition to being moved
// -to avoid overwriting each other, the arguments must be copied in reverse order
// -the analysis for mapping however must be done in forward order
// -the moving/mapping for each argument may involve a sequence of 1-3 instructions
// which must be kept in the normal order
// To solve this problem, the instructions for each argument is generated in its
// own Assembler in the forward pass, then in the reverse pass, each Assembler
// emist the instruction sequence and copies it into the main Assembler
int numArguments = types.length;
Assembler[] asmForArgs = new Assembler[numArguments];
for (int arg = 0; arg < numArguments; arg++) {
asmForArgs[arg] = new Assembler(0);
Assembler asmArg = asmForArgs[arg];
//
if (types[arg].isFloatingPointType()) {
boolean is32bits = types[arg].isFloatType();
// 1. check the source, the value will be in srcVMArg
// scratch fpr
FPR srcVMArg;
if (nextVMArgFloatReg <= LAST_VOLATILE_FPR.value()) {
srcVMArg = FPR.lookup(nextVMArgFloatReg);
nextVMArgFloatReg++;
} else {
srcVMArg = FIRST_SCRATCH_FPR;
// VM float reg is in spill area
if (is32bits) {
spillOffsetVM += BYTES_IN_STACKSLOT;
asmArg.emitLFS(srcVMArg, spillOffsetVM - BYTES_IN_FLOAT, FP);
} else {
asmArg.emitLFD(srcVMArg, spillOffsetVM, FP);
spillOffsetVM += BYTES_IN_DOUBLE;
}
}
// 2. check the destination,
if (nextOSArgFloatReg <= LAST_OS_PARAMETER_FPR.value()) {
// leave it there
nextOSArgFloatReg++;
} else {
if (VM.BuildForSVR4ABI) {
if (is32bits) {
asmArg.emitSTFS(srcVMArg, spillOffsetOS, FP);
spillOffsetOS += BYTES_IN_ADDRESS;
} else {
// spill it, round the spill address to 8
// assuming FP is aligned to 8
spillOffsetOS = (spillOffsetOS + 7) & -8;
asmArg.emitSTFD(srcVMArg, spillOffsetOS, FP);
spillOffsetOS += BYTES_IN_DOUBLE;
}
}
}
// for 64-bit long arguments
} else if (types[arg].isLongType() && VM.BuildFor32Addr) {
// handle OS first
boolean dstSpilling;
// it is register number or spilling offset
int regOrSpilling = -1;
// 1. check if Linux register > 9
if (nextOSArgReg > (LAST_OS_PARAMETER_GPR.value() - 1)) {
// goes to spilling area
dstSpilling = true;
if (VM.BuildForSVR4ABI) {
/* NOTE: following adjustment is not stated in SVR4 ABI, but
* was implemented in GCC.
* -- Feng
*/
nextOSArgReg = LAST_OS_PARAMETER_GPR.value() + 1;
// do alignment and compute spilling offset
spillOffsetOS = (spillOffsetOS + 7) & -8;
regOrSpilling = spillOffsetOS;
spillOffsetOS += BYTES_IN_LONG;
}
} else {
// use registers
dstSpilling = false;
if (VM.BuildForSVR4ABI) {
// rounds to odd
// if gpr is even, gpr += 1
nextOSArgReg += (nextOSArgReg + 1) & 0x01;
regOrSpilling = nextOSArgReg;
nextOSArgReg += 2;
}
}
// handle RVM source
if (nextVMArgReg < LAST_VOLATILE_GPR.value()) {
// both parts in registers
if (dstSpilling) {
asmArg.emitSTW(GPR.lookup(nextVMArgReg + 1), regOrSpilling + 4, FP);
if (VM.BuildForSVR4ABI) {
asmArg.emitSTW(GPR.lookup(nextVMArgReg), regOrSpilling, FP);
}
} else {
asmArg.emitMR(GPR.lookup(regOrSpilling + 1), GPR.lookup(nextVMArgReg + 1));
asmArg.emitMR(GPR.lookup(regOrSpilling), GPR.lookup(nextVMArgReg));
}
// advance register counting, Linux register number
// already advanced
nextVMArgReg += 2;
} else if (nextVMArgReg == LAST_VOLATILE_GPR.value()) {
// VM striding
if (dstSpilling) {
asmArg.emitLWZ(REGISTER_ZERO, spillOffsetVM, FP);
asmArg.emitSTW(REGISTER_ZERO, regOrSpilling + 4, FP);
asmArg.emitSTW(GPR.lookup(nextVMArgReg), regOrSpilling, FP);
} else {
asmArg.emitLWZ(GPR.lookup(regOrSpilling + 1), spillOffsetVM, FP);
asmArg.emitMR(GPR.lookup(regOrSpilling), GPR.lookup(nextVMArgReg));
}
// advance spillOffsetVM and nextVMArgReg
nextVMArgReg++;
spillOffsetVM += BYTES_IN_STACKSLOT;
} else if (nextVMArgReg > LAST_VOLATILE_GPR.value()) {
if (dstSpilling) {
asmArg.emitLFD(FIRST_SCRATCH_FPR, spillOffsetVM, FP);
asmArg.emitSTFD(FIRST_SCRATCH_FPR, regOrSpilling, FP);
} else {
// this shouldnot happen, VM spills, OS has registers
asmArg.emitLWZ(GPR.lookup(regOrSpilling + 1), spillOffsetVM + 4, FP);
asmArg.emitLWZ(GPR.lookup(regOrSpilling), spillOffsetVM, FP);
}
spillOffsetVM += BYTES_IN_LONG;
}
} else if (types[arg].isLongType() && VM.BuildFor64Addr) {
// handle OS first
boolean dstSpilling;
// it is register number or spilling offset
int regOrSpilling = -1;
// 1. check if Linux register > 9
if (nextOSArgReg > LAST_OS_PARAMETER_GPR.value()) {
// goes to spilling area
dstSpilling = true;
/* NOTE: following adjustment is not stated in SVR4 ABI, but
* was implemented in GCC.
* -- Feng
*/
nextOSArgReg = LAST_OS_PARAMETER_GPR.value() + 1;
// do alignment and compute spilling offset
spillOffsetOS = (spillOffsetOS + 7) & -8;
regOrSpilling = spillOffsetOS;
spillOffsetOS += BYTES_IN_LONG;
} else {
// use registers
dstSpilling = false;
// rounds to odd
regOrSpilling = nextOSArgReg;
nextOSArgReg += 1;
}
// handle RVM source
if (nextVMArgReg <= LAST_VOLATILE_GPR.value()) {
// both parts in registers
if (dstSpilling) {
asmArg.emitSTD(GPR.lookup(nextVMArgReg), regOrSpilling, FP);
} else {
asmArg.emitMR(GPR.lookup(regOrSpilling), GPR.lookup(nextVMArgReg));
}
// advance register counting, Linux register number
// already advanced
nextVMArgReg += 1;
} else if (nextVMArgReg > LAST_VOLATILE_GPR.value()) {
if (dstSpilling) {
asmArg.emitLFD(FIRST_SCRATCH_FPR, spillOffsetVM, FP);
asmArg.emitSTFD(FIRST_SCRATCH_FPR, regOrSpilling, FP);
} else {
// this shouldnot happen, VM spills, OS has registers;
asmArg.emitLD(GPR.lookup(regOrSpilling), spillOffsetVM, FP);
}
spillOffsetVM += BYTES_IN_LONG;
}
} else if (types[arg].isReferenceType()) {
// For reference type, replace with handles before passing to native
GPR srcreg;
if (nextVMArgReg <= LAST_VOLATILE_GPR.value()) {
srcreg = GPR.lookup(nextVMArgReg++);
} else {
srcreg = REGISTER_ZERO;
asmArg.emitLAddr(srcreg, spillOffsetVM, FP);
spillOffsetVM += BYTES_IN_ADDRESS;
}
// Are we passing NULL?
asmArg.emitCMPI(srcreg, 0);
ForwardReference isNull = asmArg.emitForwardBC(EQ);
// NO: put it in the JNIRefs array and pass offset
asmArg.emitSTAddrU(srcreg, BYTES_IN_ADDRESS, KLUDGE_TI_REG);
if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value()) {
asmArg.emitSUBFC(GPR.lookup(nextOSArgReg), THREAD_REGISTER, KLUDGE_TI_REG);
} else {
asmArg.emitSUBFC(REGISTER_ZERO, THREAD_REGISTER, KLUDGE_TI_REG);
asmArg.emitSTAddr(REGISTER_ZERO, spillOffsetOS, FP);
}
ForwardReference done = asmArg.emitForwardB();
// YES: pass NULL (0)
isNull.resolve(asmArg);
if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value()) {
asmArg.emitLVAL(GPR.lookup(nextOSArgReg), 0);
} else {
asmArg.emitSTAddr(srcreg, spillOffsetOS, FP);
}
// JOIN PATHS
done.resolve(asmArg);
if (VM.BuildForSVR4ABI) {
if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value()) {
nextOSArgReg++;
} else {
spillOffsetOS += BYTES_IN_ADDRESS;
}
}
} else {
// (1a) fit in OS register, move the register
if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value()) {
if (VM.BuildForSVR4ABI) {
asmArg.emitMR(GPR.lookup(nextOSArgReg++), GPR.lookup(nextVMArgReg++));
} else {
asmArg.emitMR(GPR.lookup(nextOSArgReg), GPR.lookup(nextVMArgReg++));
}
} else if (nextVMArgReg <= LAST_VOLATILE_GPR.value()) {
// (1b) spill OS register, but still fit in VM register
asmArg.emitSTAddr(GPR.lookup(nextVMArgReg++), spillOffsetOS, FP);
if (VM.BuildForSVR4ABI) {
spillOffsetOS += BYTES_IN_ADDRESS;
}
} else {
// (1c) spill VM register
spillOffsetVM += BYTES_IN_STACKSLOT;
// retrieve arg from VM spill area
asmArg.emitLInt(REGISTER_ZERO, spillOffsetVM - BYTES_IN_INT, FP);
asmArg.emitSTAddr(REGISTER_ZERO, spillOffsetOS, FP);
if (VM.BuildForSVR4ABI) {
spillOffsetOS += BYTES_IN_ADDRESS;
}
}
}
}
// so that the move does not overwrite the parameters
for (int arg = asmForArgs.length - 1; arg >= 0; arg--) {
asm.appendInstructions(asmForArgs[arg].getMachineCodes());
}
}
}
use of org.jikesrvm.compilers.common.assembler.ForwardReference in project JikesRVM by JikesRVM.
the class JNICompiler method compile.
/**
* This method creates the stub to link native method. It will be called
* from the lazy linker the first time a native method is invoked. The stub
* generated will be patched by the lazy linker to link to the native method
* for all future calls. <p>
* <pre>
* The stub performs the following tasks in the prologue:
* <ol>
* <li>Allocate the glue frame
* <li>Save the TR and JTOC registers in the JNI Environment for reentering Java later
* <li>Shuffle the parameters in the registers to conform to the OS calling convention
* <li>Save the nonvolatile registers in a known space in the frame to be used
* for the GC stack map
* <li>Push a new JREF frame on the JNIRefs stack
* <li>Supply the first JNI argument: the JNI environment pointer
* <li>Supply the second JNI argument: class object if static, "this" if virtual
* </ol>
* <p>
* The stub performs the following tasks in the epilogue:
* <ol>
* <li>Restore TR and JTOC registers saved in JNI Environment
* <li>Restore the nonvolatile registers if GC has occurred
* <li>Pop the JREF frame off the JNIRefs stack
* <li>Check for pending exception and deliver to Java caller if present
* <li>Process the return value from native: push onto caller's Java stack
* </ol>
* <p>
* Within the stackframe, we have two frames.
* The "main" frame exactly follows the OS native ABI and is therefore
* different for each ABI.
* The "mini-frame" is identical on all platforms and is stores RVM-specific fields.
* The picture below shows the frames for 64-bit PowerPC ELF ABI.
* <pre>
*
* | fp | <- native frame
* | cr |
* | lr |
* | resv |
* | resv |
* + toc +
* | |
* | |
* |----------| <- Java to C glue frame using native calling conventions
* | fp | saved fp of mini-frame
* | cr |
* | lr | native caller saves return address of native method here
* | resv |
* | resv |
* + toc +
* | 0 | spill area (at least 8 words reserved)
* | 1 | (also used for saving volatile regs during calls in prolog)
* | 2 |
* | 3 |
* | 4 |
* | 5 |
* | 6 |
* | 7 |
* | ... |
* |----------| <- mini-frame for use by RVM stackwalkers
* | fp | saved fp of Java caller <- JNI_SAVE_AREA_OFFSET
* | mid | cmid of native method
* | xxx (lr) | lr slot not used in mini frame
* |GC flag | did GC happen while thread in native? <- JNI_GC_FLAG_OFFSET
* |ENV | JNIEnvironment <- JNI_ENV_OFFSET
* |RVM nonvol| save RVM nonvolatile GPRs for updating by GC stack mapper
* | ... |
* |RVM nonvol| <- JNI_RVM_NONVOLATILE_OFFSET
* |----------|
* | fp | <- Java caller frame
* | mid |
* | xxx |
* | |
* | |
* | |
* |----------|
* | |
* </pre>
* <p>
* Runtime.unwindNativeStackFrame will return a pointer to the mini-frame
* because none of our stack walkers need to do anything with the main frame.
*/
public static synchronized CompiledMethod compile(NativeMethod method) {
JNICompiledMethod cm = (JNICompiledMethod) CompiledMethods.createCompiledMethod(method, CompiledMethod.JNI);
int compiledMethodId = cm.getId();
Assembler asm = new Assembler(0);
int frameSize = getFrameSize(method);
RVMClass klass = method.getDeclaringClass();
// need 4 gp temps
if (VM.VerifyAssertions)
VM._assert(T3.value() <= LAST_VOLATILE_GPR.value());
// need 4 fp temps
if (VM.VerifyAssertions)
VM._assert(F3.value() <= LAST_VOLATILE_FPR.value());
if (VM.VerifyAssertions)
VM._assert(S0.value() < S1.value() && // need 2 scratch
S1.value() <= LAST_SCRATCH_GPR.value());
Address nativeIP = method.getNativeIP();
Address nativeTOC = method.getNativeTOC();
// NOTE: this must be done before the condition Thread.hasNativeStackFrame() become true
// so that the first Java to C transition will be allowed to resize the stack
// (currently, this is true when the JNIRefsTop index has been incremented from 0)
// add at least 14 for C frame (header + spill)
asm.emitNativeStackOverflowCheck(frameSize + 14);
// save return address in caller frame
asm.emitMFLR(REGISTER_ZERO);
asm.emitSTAddr(REGISTER_ZERO, STACKFRAME_RETURN_ADDRESS_OFFSET.toInt(), FP);
// buy mini frame
asm.emitSTAddrU(FP, -JNI_SAVE_AREA_SIZE, FP);
// store CMID for native method in mini-frame
asm.emitLVAL(S0, compiledMethodId);
asm.emitSTW(S0, STACKFRAME_METHOD_ID_OFFSET.toInt(), FP);
// buy main frame, the total size equals to frameSize
asm.emitSTAddrU(FP, -frameSize + JNI_SAVE_AREA_SIZE, FP);
// establish S0 -> threads JNIEnv structure
asm.emitLAddrOffset(S0, THREAD_REGISTER, Entrypoints.jniEnvField.getOffset());
// save the TR register in the JNIEnvironment object for possible calls back into Java
asm.emitSTAddrOffset(THREAD_REGISTER, S0, Entrypoints.JNIEnvSavedTRField.getOffset());
// save the JNIEnvironment in the stack frame so we can use it to acquire the TR
// when we return from native code.
// save TR in frame
asm.emitSTAddr(S0, frameSize - JNI_ENV_OFFSET, FP);
// save mini-frame frame pointer in JNIEnv, JNITopJavaFP, which will be the frame
// to start scanning this stack during GC, if top of stack is still executing in C
asm.emitLAddr(THREAD_REGISTER, 0, FP);
asm.emitSTAddrOffset(THREAD_REGISTER, S0, Entrypoints.JNITopJavaFPField.getOffset());
// save the RVM nonvolatile GPRs, to be scanned by GC stack mapper
for (int i = LAST_NONVOLATILE_GPR.value(), offset = JNI_RVM_NONVOLATILE_OFFSET; i >= FIRST_NONVOLATILE_GPR.value(); --i, offset += BYTES_IN_STACKSLOT) {
asm.emitSTAddr(GPR.lookup(i), frameSize - offset, FP);
}
// clear the GC flag on entry to native code
// use TR as scratch
asm.emitLVAL(THREAD_REGISTER, 0);
asm.emitSTW(THREAD_REGISTER, frameSize - JNI_GC_FLAG_OFFSET, FP);
// generate the code to map the parameters to OS convention and add the
// second parameter (either the "this" ptr or class if a static method).
// The JNI Function ptr first parameter is set before making the call
// by the out of line machine code we invoke below.
// Opens a new frame in the JNIRefs table to register the references.
// Assumes S0 set to JNIEnv, kills KLUDGE_TI_REG, S1 & THREAD_REGISTER
// On return, S0 still contains JNIEnv
storeParameters(asm, frameSize, method, klass);
//
// Load required JNI function ptr into first parameter reg (GPR3/T0)
// This pointer is an interior pointer to the JNIEnvironment which is
// currently in S0.
//
asm.emitADDI(T0, Entrypoints.JNIExternalFunctionsField.getOffset(), S0);
//
// change the status of the thread to IN_JNI
//
asm.emitLAddrOffset(THREAD_REGISTER, S0, Entrypoints.JNIEnvSavedTRField.getOffset());
asm.emitLVALAddr(S1, Entrypoints.execStatusField.getOffset());
// get status for thread
asm.emitLWARX(S0, S1, THREAD_REGISTER);
// we should be in java code?
asm.emitCMPI(S0, RVMThread.IN_JAVA + (RVMThread.ALWAYS_LOCK_ON_STATE_TRANSITION ? 100 : 0));
ForwardReference notInJava = asm.emitForwardBC(NE);
// S0 <- new state value
asm.emitLVAL(S0, RVMThread.IN_JNI);
// attempt to change state to IN_JNI
asm.emitSTWCXr(S0, S1, THREAD_REGISTER);
// branch if success over slow path
ForwardReference enteredJNIRef = asm.emitForwardBC(EQ);
notInJava.resolve(asm);
asm.emitLAddrOffset(S0, THREAD_REGISTER, Entrypoints.threadContextRegistersField.getOffset());
asm.emitLAddrOffset(S1, JTOC, ArchEntrypoints.saveVolatilesInstructionsField.getOffset());
asm.emitMTLR(S1);
asm.emitBCLRL();
// NOTE: THREAD_REGISTER should still have the thread
// pointer, since up to this point we would have saved it but not
// overwritten it.
// call into our friendly slow path function. note that this should
// work because:
// 1) we're not calling from C so we don't care what registers are
// considered non-volatile in C
// 2) all Java non-volatiles have been saved
// 3) the only other registers we need - TR and S0 are taken care
// of (see above)
// 4) the prologue and epilogue will take care of the frame pointer
// accordingly (it will just save it on the stack and then restore
// it - so we don't even have to know what its value is here)
// the only thing we have to make sure of is that MMTk ignores the
// framePointer field in RVMThread and uses the one in the JNI
// environment instead (see Collection.prepareMutator)...
// T1 gets address of function
asm.emitLAddrOffset(S1, JTOC, Entrypoints.enterJNIBlockedFromCallIntoNativeMethod.getOffset());
asm.emitMTLR(S1);
// call RVMThread.enterJNIBlocked
asm.emitBCLRL();
asm.emitLAddrOffset(S0, THREAD_REGISTER, Entrypoints.threadContextRegistersField.getOffset());
asm.emitLAddrOffset(S1, JTOC, ArchEntrypoints.restoreVolatilesInstructionsField.getOffset());
asm.emitMTLR(S1);
asm.emitBCLRL();
// come here when we're done
enteredJNIRef.resolve(asm);
// set the TOC and IP for branch to out_of_line code
asm.emitLVALAddr(JTOC, nativeTOC);
asm.emitLVALAddr(S1, nativeIP);
// move native code address to CTR reg;
// do this early so that S1 will be available as a scratch.
asm.emitMTCTR(S1);
//
// CALL NATIVE METHOD
//
asm.emitBCCTRL();
// if we have to call sysVirtualProcessorYield because we are locked in native.
if (VM.BuildFor64Addr) {
asm.emitSTD(T0, NATIVE_FRAME_HEADER_SIZE, FP);
} else {
asm.emitSTW(T0, NATIVE_FRAME_HEADER_SIZE, FP);
asm.emitSTW(T1, NATIVE_FRAME_HEADER_SIZE + BYTES_IN_ADDRESS, FP);
}
//
// try to return thread status to IN_JAVA
//
int label1 = asm.getMachineCodeIndex();
// TODO: we can do this directly from FP because we know framesize at compiletime
// (the same way we stored the JNI Env above)
// get mini-frame
asm.emitLAddr(S0, 0, FP);
// get Java caller FP
asm.emitLAddr(S0, 0, S0);
// load JNIEnvironment into TR
asm.emitLAddr(THREAD_REGISTER, -JNI_ENV_OFFSET, S0);
// Restore JTOC and TR
asm.emitLAddrOffset(JTOC, THREAD_REGISTER, Entrypoints.JNIEnvSavedJTOCField.getOffset());
asm.emitLAddrOffset(THREAD_REGISTER, THREAD_REGISTER, Entrypoints.JNIEnvSavedTRField.getOffset());
asm.emitLVALAddr(S1, Entrypoints.execStatusField.getOffset());
// get status for processor
asm.emitLWARX(S0, S1, THREAD_REGISTER);
// are we IN_JNI code?
asm.emitCMPI(S0, RVMThread.IN_JNI + (RVMThread.ALWAYS_LOCK_ON_STATE_TRANSITION ? 100 : 0));
ForwardReference blocked = asm.emitForwardBC(NE);
// S0 <- new state value
asm.emitLVAL(S0, RVMThread.IN_JAVA);
// attempt to change state to java
asm.emitSTWCXr(S0, S1, THREAD_REGISTER);
// branch over blocked call if state change successful
ForwardReference fr = asm.emitForwardBC(EQ);
blocked.resolve(asm);
// if not IN_JNI call RVMThread.leaveJNIBlockedFromCallIntoNative
// T1 gets address of function
asm.emitLAddrOffset(T1, JTOC, Entrypoints.leaveJNIBlockedFromCallIntoNativeMethod.getOffset());
asm.emitMTLR(T1);
// call RVMThread.leaveJNIBlockedFromCallIntoNative
asm.emitBCLRL();
fr.resolve(asm);
// check if GC has occurred, If GC did not occur, then
// VM NON_VOLATILE regs were restored by OS and are valid. If GC did occur
// objects referenced by these restored regs may have moved, in this case we
// restore the nonvolatile registers from our save area,
// where any object references would have been relocated during GC.
// use T2 as scratch (not needed any more on return from call)
//
asm.emitLWZ(T2, frameSize - JNI_GC_FLAG_OFFSET, FP);
asm.emitCMPI(T2, 0);
ForwardReference fr1 = asm.emitForwardBC(EQ);
for (int i = LAST_NONVOLATILE_GPR.value(), offset = JNI_RVM_NONVOLATILE_OFFSET; i >= FIRST_NONVOLATILE_GPR.value(); --i, offset += BYTES_IN_STACKSLOT) {
asm.emitLAddr(GPR.lookup(i), frameSize - offset, FP);
}
fr1.resolve(asm);
// reestablish S0 to hold pointer to JNIEnvironment
asm.emitLAddrOffset(S0, THREAD_REGISTER, Entrypoints.jniEnvField.getOffset());
// pop jrefs frame off the JNIRefs stack, "reopen" the previous top jref frame
// use S1 as scratch, also use T2, T3 for scratch which are no longer needed
// load base of JNIRefs array
asm.emitLAddrOffset(S1, S0, Entrypoints.JNIRefsField.getOffset());
asm.emitLIntOffset(T2, S0, // get saved offset for JNIRefs frame ptr previously pushed onto JNIRefs array
Entrypoints.JNIRefsSavedFPField.getOffset());
// compute offset for new TOP
asm.emitADDI(T3, -BYTES_IN_STACKSLOT, T2);
// store new offset for TOP into JNIEnv
asm.emitSTWoffset(T3, S0, Entrypoints.JNIRefsTopField.getOffset());
// retrieve the previous frame ptr
asm.emitLIntX(T2, S1, T2);
asm.emitSTWoffset(T2, S0, // store new offset for JNIRefs frame ptr into JNIEnv
Entrypoints.JNIRefsSavedFPField.getOffset());
// Restore the return value R3-R4 saved in the glue frame spill area before the migration
if (VM.BuildFor64Addr) {
asm.emitLD(T0, NATIVE_FRAME_HEADER_SIZE, FP);
} else {
asm.emitLWZ(T0, NATIVE_FRAME_HEADER_SIZE, FP);
asm.emitLWZ(T1, NATIVE_FRAME_HEADER_SIZE + BYTES_IN_STACKSLOT, FP);
}
// if the the return type is a reference, the native C is returning a jref
// which is a byte offset from the beginning of the threads JNIRefs stack/array
// of the corresponding ref. In this case, emit code to replace the returned
// offset (in R3) with the ref from the JNIRefs array
TypeReference returnType = method.getReturnType();
if (returnType.isReferenceType()) {
asm.emitCMPI(T0, 0);
ForwardReference globalRef = asm.emitForwardBC(LT);
// Local ref - load from JNIRefs
// S1 is still the base of the JNIRefs array
asm.emitLAddrX(T0, S1, T0);
ForwardReference afterGlobalRef = asm.emitForwardB();
// Deal with global references
globalRef.resolve(asm);
asm.emitLVAL(T3, JNIGlobalRefTable.STRONG_REF_BIT);
asm.emitAND(T1, T0, T3);
asm.emitLAddrOffset(T2, JTOC, Entrypoints.JNIGlobalRefsField.getOffset());
asm.emitCMPI(T1, 0);
ForwardReference weakGlobalRef = asm.emitForwardBC(EQ);
// Strong global references
asm.emitNEG(T0, T0);
// convert index to offset
asm.emitSLWI(T0, T0, LOG_BYTES_IN_ADDRESS);
asm.emitLAddrX(T0, T2, T0);
ForwardReference afterWeakGlobalRef = asm.emitForwardB();
// Weak global references
weakGlobalRef.resolve(asm);
// STRONG_REF_BIT
asm.emitOR(T0, T0, T3);
asm.emitNEG(T0, T0);
// convert index to offset
asm.emitSLWI(T0, T0, LOG_BYTES_IN_ADDRESS);
asm.emitLAddrX(T0, T2, T0);
asm.emitLAddrOffset(T0, T0, Entrypoints.referenceReferentField.getOffset());
afterWeakGlobalRef.resolve(asm);
afterGlobalRef.resolve(asm);
}
// pop the whole stack frame (main & mini), restore the Java caller frame
asm.emitADDI(FP, +frameSize, FP);
// C return value is already where caller expected it (T0/T1 or F0)
// So, just restore the return address to the link register.
asm.emitLAddr(REGISTER_ZERO, STACKFRAME_RETURN_ADDRESS_OFFSET.toInt(), FP);
// restore return address
asm.emitMTLR(REGISTER_ZERO);
// CHECK EXCEPTION AND BRANCH TO ATHROW CODE OR RETURN NORMALLY
asm.emitLIntOffset(T2, S0, Entrypoints.JNIHasPendingExceptionField.getOffset());
// get a zero value to compare
asm.emitLVAL(T3, 0);
asm.emitCMP(T2, T3);
ForwardReference fr3 = asm.emitForwardBC(NE);
// if no pending exception, proceed to return to caller
asm.emitBCLR();
fr3.resolve(asm);
// T1 gets address of function
asm.emitLAddrToc(T1, Entrypoints.jniThrowPendingException.getOffset());
// point LR to the exception delivery code
asm.emitMTCTR(T1);
// then branch to the exception delivery code, does not return
asm.emitBCCTR();
cm.compileComplete(asm.getMachineCodes());
return cm;
}
use of org.jikesrvm.compilers.common.assembler.ForwardReference in project JikesRVM by JikesRVM.
the class JNICompiler method generateGlueCodeForJNIMethod.
/**
* Emit code to do the C to Java transition: JNI methods in JNIFunctions.java
*/
public static void generateGlueCodeForJNIMethod(Assembler asm, RVMMethod mth) {
int offset;
int varargAmount = 0;
String mthName = mth.getName().toString();
final boolean usesVarargs = (mthName.startsWith("Call") && mthName.endsWith("Method")) || mthName.equals("NewObject");
int glueFrameSize = JNI_GLUE_FRAME_SIZE + varargAmount;
// buy the glue frame
asm.emitSTAddrU(FP, -glueFrameSize, FP);
if (usesVarargs) {
if (VM.BuildForPower64ELF_ABI) {
// skip over slots for GPR 3-5
offset = STACKFRAME_HEADER_SIZE + 3 * BYTES_IN_STACKSLOT;
for (int i = 6; i <= 10; i++) {
asm.emitSTAddr(GPR.lookup(i), offset, FP);
offset += BYTES_IN_ADDRESS;
}
// store FPRs 1-3 in first 3 slots of volatile FPR save area
for (int i = 1; i <= 3; i++) {
asm.emitSTFD(FPR.lookup(i), offset, FP);
offset += BYTES_IN_DOUBLE;
}
} else if (VM.BuildForSVR4ABI) {
// save all parameter registers
offset = STACKFRAME_HEADER_SIZE + 0;
for (int i = FIRST_OS_PARAMETER_GPR.value(); i <= LAST_OS_PARAMETER_GPR.value(); i++) {
asm.emitSTAddr(GPR.lookup(i), offset, FP);
offset += BYTES_IN_ADDRESS;
}
for (int i = FIRST_OS_PARAMETER_FPR.value(); i <= LAST_OS_PARAMETER_FPR.value(); i++) {
asm.emitSTFD(FPR.lookup(i), offset, FP);
offset += BYTES_IN_DOUBLE;
}
}
} else {
if (VM.BuildForSVR4ABI) {
// adjust register contents (following SVR4 ABI) for normal JNI functions
// especially dealing with long, spills
// number of parameters of normal JNI functions should fix in
// r3 - r12, f1 - f15, + 24 words,
convertParametersFromSVR4ToJava(asm, mth);
}
}
// Save non-volatile GPRs that will not be saved and restored by RVM.
//
// skip 20 word volatile reg save area
offset = STACKFRAME_HEADER_SIZE + JNI_GLUE_SAVED_VOL_SIZE;
for (int i = FIRST_RVM_RESERVED_NV_GPR.value(); i <= LAST_RVM_RESERVED_NV_GPR.value(); i++) {
asm.emitSTAddr(GPR.lookup(i), offset, FP);
offset += BYTES_IN_ADDRESS;
}
// set the method ID for the glue frame
// and save the return address in the previous frame
//
asm.emitLVAL(S0, INVISIBLE_METHOD_ID);
asm.emitMFLR(REGISTER_ZERO);
asm.emitSTW(S0, STACKFRAME_METHOD_ID_OFFSET.toInt(), FP);
asm.emitSTAddr(REGISTER_ZERO, glueFrameSize + STACKFRAME_RETURN_ADDRESS_OFFSET.toInt(), FP);
// Attempt to change the vpStatus of the current Processor to IN_JAVA
//
// on entry T0 = JNIEnv* which is an interior pointer to this thread's JNIEnvironment.
// We first adjust this in place to be a pointer to a JNIEnvironment and then use
// it to acquire THREAD_REGISTER (and JTOC on Linux).
//
// TODO update for AIX removal
// AIX non volatile gprs 13-16 have been saved & are available (also gprs 11-13 can be used).
// S0=13, S1=14, TI=15, THREAD_REGISTER=16 are available (&have labels) for changing state.
// we leave the passed arguments untouched, unless we are blocked and have to call sysVirtualProcessorYield
// Map from JNIEnv* to JNIEnvironment.
// Must do this outside the loop as we need to do it exactly once.
asm.emitADDI(T0, Offset.zero().minus(Entrypoints.JNIExternalFunctionsField.getOffset()), T0);
int retryLoop = asm.getMachineCodeIndex();
// acquire Jikes RVM THREAD_REGISTER (and JTOC Linux only).
asm.emitLAddrOffset(THREAD_REGISTER, T0, Entrypoints.JNIEnvSavedTRField.getOffset());
if (VM.BuildForSVR4ABI) {
// When using the 64-bit PowerPC ELF ABI (e.g. on PPC64 Linux), the JTOC is part of
// the Linkage triplet and this already set by our caller.
// Thus, we only need this load when not on PPC64 Linux.
asm.emitLAddrOffset(JTOC, T0, Entrypoints.JNIEnvSavedJTOCField.getOffset());
}
asm.emitLVALAddr(S1, Entrypoints.execStatusField.getOffset());
// get status for processor
asm.emitLWARX(S0, S1, THREAD_REGISTER);
// check if GC in progress, blocked in native mode
asm.emitCMPI(S0, RVMThread.IN_JNI + (RVMThread.ALWAYS_LOCK_ON_STATE_TRANSITION ? 100 : 0));
ForwardReference frBlocked = asm.emitForwardBC(NE);
// S0 <- new state value
asm.emitLVAL(S0, RVMThread.IN_JAVA);
// attempt to change state to IN_JAVA
asm.emitSTWCXr(S0, S1, THREAD_REGISTER);
// br if failure -retry lwarx by jumping to label0
asm.emitBC(NE, retryLoop);
// branch around code to call sysYield
ForwardReference frInJava = asm.emitForwardB();
// branch to here if blocked in native, call leaveJNIBlocked
// must save volatile gprs & fprs before the call and restore after
//
frBlocked.resolve(asm);
offset = STACKFRAME_HEADER_SIZE;
// save volatile GPRS 3-10
for (int i = FIRST_OS_PARAMETER_GPR.value(); i <= LAST_OS_PARAMETER_GPR.value(); i++) {
asm.emitSTAddr(GPR.lookup(i), offset, FP);
offset += BYTES_IN_ADDRESS;
}
// save volatile FPRS 1-6
for (int i = FIRST_OS_PARAMETER_FPR.value(); i <= LAST_OS_VARARG_PARAMETER_FPR.value(); i++) {
asm.emitSTFD(FPR.lookup(i), offset, FP);
offset += BYTES_IN_DOUBLE;
}
asm.emitLAddrOffset(KLUDGE_TI_REG, JTOC, // load addr of function
Entrypoints.leaveJNIBlockedFromJNIFunctionCallMethod.getOffset());
asm.emitMTLR(KLUDGE_TI_REG);
// call RVMThread.leaveJNIBlockFromJNIFunction
asm.emitBCLRL();
// restore the saved volatile GPRs 3-10 and FPRs 1-6
offset = STACKFRAME_HEADER_SIZE;
// restore volatile GPRS 3-10
for (int i = FIRST_OS_PARAMETER_GPR.value(); i <= LAST_OS_PARAMETER_GPR.value(); i++) {
asm.emitLAddr(GPR.lookup(i), offset, FP);
offset += BYTES_IN_ADDRESS;
}
// restore volatile FPRS 1-6
for (int i = FIRST_OS_PARAMETER_FPR.value(); i <= LAST_OS_VARARG_PARAMETER_FPR.value(); i++) {
asm.emitLFD(FPR.lookup(i), offset, FP);
offset += BYTES_IN_DOUBLE;
}
// NOW_IN_JAVA:
// JTOC, and TR are all as Jikes RVM expects them;
// params are where the Jikes RVM calling conventions expects them.
//
frInJava.resolve(asm);
// get pointer to top java frame from JNIEnv, compute offset from current
// frame pointer (offset to avoid more interior pointers) and save offset
// in this glue frame
//
asm.emitLAddrOffset(S0, T0, // get addr of top java frame from JNIEnv
Entrypoints.JNITopJavaFPField.getOffset());
// S0 <- offset from current FP
asm.emitSUBFC(S0, FP, S0);
// LINUX - 8
// store offset at end of glue frame
asm.emitSTW(S0, glueFrameSize + JNI_GLUE_OFFSET_TO_PREV_JFRAME, FP);
// BRANCH TO THE PROLOG FOR THE JNI FUNCTION
ForwardReference frNormalPrologue = asm.emitForwardBL();
// relative branch and link past the following epilog, to the normal prolog of the method
// the normal epilog of the method will return to the epilog here to pop the glue stack frame
// RETURN TO HERE FROM EPILOG OF JNI FUNCTION
// CAUTION: START OF EPILOG OF GLUE CODE
// The section of code from here to "END OF EPILOG OF GLUE CODE" is nestled between
// the glue code prolog and the real body of the JNI method.
// T0 & T1 (R3 & R4) or F1 contain the return value from the function - DO NOT USE
// assume: JTOC and THREAD_REG are valid, and all RVM non-volatile
// GPRs and FPRs have been restored. Our processor state will be IN_JAVA.
// establish T2 -> current thread's JNIEnvironment, from activeThread field
// of current processor
// T2 <- JNIEnvironment
asm.emitLAddrOffset(T2, THREAD_REGISTER, Entrypoints.jniEnvField.getOffset());
// before returning to C, set pointer to top java frame in JNIEnv, using offset
// saved in this glue frame during transition from C to Java. GC will use this saved
// frame pointer if it is necessary to do GC with a processors active thread
// stuck (and blocked) in native C, ie. GC starts scanning the threads stack at that frame.
// LINUX -8
// load offset from FP to top java frame
asm.emitLInt(T3, glueFrameSize + JNI_GLUE_OFFSET_TO_PREV_JFRAME, FP);
// T3 <- address of top java frame
asm.emitADD(T3, FP, T3);
// store TopJavaFP back into JNIEnv
asm.emitSTAddrOffset(T3, T2, Entrypoints.JNITopJavaFPField.getOffset());
// check to see if this frame address is the sentinel since there
// may be no further Java frame below
asm.emitCMPAddrI(T3, STACKFRAME_SENTINEL_FP.toInt());
ForwardReference fr4 = asm.emitForwardBC(EQ);
// get fp for caller of prev J to C transition frame
asm.emitLAddr(S0, 0, T3);
fr4.resolve(asm);
// store current TR into JNIEnvironment; we may have switched TRs while in Java mode.
asm.emitSTAddrOffset(THREAD_REGISTER, T2, Entrypoints.JNIEnvSavedTRField.getOffset());
// change the state of the TR to IN_JNI
//
asm.emitLVALAddr(S1, Entrypoints.execStatusField.getOffset());
asm.emitLWARX(S0, S1, THREAD_REGISTER);
asm.emitCMPI(S0, RVMThread.IN_JAVA + (RVMThread.ALWAYS_LOCK_ON_STATE_TRANSITION ? 100 : 0));
ForwardReference notInJava = asm.emitForwardBC(NE);
asm.emitLVAL(S0, RVMThread.IN_JNI);
asm.emitSTWCXr(S0, S1, THREAD_REGISTER);
ForwardReference enteredJNIRef = asm.emitForwardBC(EQ);
notInJava.resolve(asm);
// NOTE: we save and restore volatiles here. that's overkill. we really
// only need to save/restore the return registers (see above). oh well.
// if it works then I can't bring myself to care.
asm.emitLAddrOffset(S0, THREAD_REGISTER, Entrypoints.threadContextRegistersField.getOffset());
asm.emitLAddrOffset(S1, JTOC, ArchEntrypoints.saveVolatilesInstructionsField.getOffset());
asm.emitMTLR(S1);
asm.emitBCLRL();
asm.emitLAddrOffset(S0, JTOC, Entrypoints.enterJNIBlockedFromJNIFunctionCallMethod.getOffset());
asm.emitMTLR(S0);
asm.emitBCLRL();
asm.emitLAddrOffset(S0, THREAD_REGISTER, Entrypoints.threadContextRegistersField.getOffset());
asm.emitLAddrOffset(S1, JTOC, ArchEntrypoints.restoreVolatilesInstructionsField.getOffset());
asm.emitMTLR(S1);
asm.emitBCLRL();
enteredJNIRef.resolve(asm);
// Restore the nonvolatile registers saved in the prolog above
// Here we only save & restore ONLY those registers not restored by RVM
//
// skip 20 word volatile reg save area
offset = STACKFRAME_HEADER_SIZE + JNI_GLUE_SAVED_VOL_SIZE;
for (int i = FIRST_RVM_RESERVED_NV_GPR.value(); i <= LAST_RVM_RESERVED_NV_GPR.value(); i++) {
// 4 instructions
asm.emitLAddr(GPR.lookup(i), offset, FP);
offset += BYTES_IN_ADDRESS;
}
// pop frame
asm.emitADDI(FP, glueFrameSize, FP);
// load return address & return to caller
// T0 & T1 (or F1) should still contain the return value
//
asm.emitLAddr(T2, STACKFRAME_RETURN_ADDRESS_OFFSET.toInt(), FP);
asm.emitMTLR(T2);
// branch always, through link register
asm.emitBCLR();
// END OF EPILOG OF GLUE CODE; rest of method generated by Compiler from bytecodes of method in JNIFunctions
frNormalPrologue.resolve(asm);
}
use of org.jikesrvm.compilers.common.assembler.ForwardReference in project JikesRVM by JikesRVM.
the class JNICompiler method compile.
/**
* Compiles a method to handle the Java to C transition and back
* Transitioning from Java to C then back:
* <ol>
* <li>Set up stack frame and save non-volatile registers<li>
* <li>Set up jniEnv - set up a register to hold JNIEnv and store
* the Processor in the JNIEnv for easy access</li>
* <li>Move all native method arguments on to stack (NB at this point all
* non-volatile state is saved)</li>
* <li>Record the frame pointer of the last Java frame (this) in the jniEnv</li>
* <li>Call out to convert reference arguments to IDs</li>
* <li>Set processor as being "in native"</li>
* <li>Set up stack frame and registers for transition to C</li>
* <li>Call out to C</li>
* <li>Save result to stack</li>
* <li>Transition back from "in native" to "in Java", take care that the
* Processor isn't "blocked in native", ie other processors have decided to
* start a GC and we're not permitted to execute Java code whilst this
* occurs</li>
* <li>Convert a reference result (currently a JNI ref) into a true reference</li>
* <li>Release JNI refs</li>
* <li>Restore stack and place result in register</li>
* </ol>
*
* @param method the method to compile
* @return the compiled method (always a {@link JNICompiledMethod})
*/
public static synchronized CompiledMethod compile(NativeMethod method) {
// Meaning of constant offset into frame (assuming 4byte word size):
// Stack frame:
// on entry after prolog
//
// high address high address
// | | | | Caller frame
// | | | |
// + |arg 0 | |arg 0 | <- firstParameterOffset
// + |arg 1 | |arg 1 |
// + |... | |... |
// +8 |arg n-1 | |arg n-1 | <- lastParameterOffset
// +4 |returnAddr| |returnAddr|
// 0 + + +saved FP + <- EBP/FP value in glue frame
// -4 | | |methodID |
// -8 | | |saved EDI |
// -C | | |saved EBX |
// -10 | | |saved EBP |
// -14 | | |saved ENV | (JNIEnvironment)
// -18 | | |arg n-1 | reordered args to native method
// -1C | | | ... | ...
// -20 | | |arg 1 | ...
// -24 | | |arg 0 | ...
// -28 | | |class/obj | required second arg to native method
// -2C | | |jni funcs | required first arg to native method
// -30 | | | |
// | | | |
// | | | |
// low address low address
// Register values:
// EBP - after step 1 EBP holds a frame pointer allowing easy
// access to both this and the proceeding frame
// ESP - gradually floats down as the stack frame is initialized
// S0/ECX - reference to the JNI environment after step 3
JNICompiledMethod cm = (JNICompiledMethod) CompiledMethods.createCompiledMethod(method, CompiledMethod.JNI);
// some size for the instruction array
Assembler asm = new Assembler(100);
Address nativeIP = method.getNativeIP();
final Offset lastParameterOffset = Offset.fromIntSignExtend(2 * WORDSIZE);
// final Offset firstParameterOffset = Offset.fromIntSignExtend(WORDSIZE+(method.getParameterWords() << LG_WORDSIZE));
final TypeReference[] args = method.getParameterTypes();
// (1) Set up stack frame and save non-volatile registers
// TODO: check and resize stack once on the lowest Java to C transition
// on the stack. Not needed if we use the thread original stack
// set 2nd word of header = return address already pushed by CALL
asm.emitPUSH_RegDisp(THREAD_REGISTER, ArchEntrypoints.framePointerField.getOffset());
// establish new frame
if (VM.BuildFor32Addr) {
asm.emitMOV_RegDisp_Reg(THREAD_REGISTER, ArchEntrypoints.framePointerField.getOffset(), SP);
} else {
asm.emitMOV_RegDisp_Reg_Quad(THREAD_REGISTER, ArchEntrypoints.framePointerField.getOffset(), SP);
}
// set first word of header: method ID
if (VM.VerifyAssertions)
VM._assert(STACKFRAME_METHOD_ID_OFFSET.toInt() == -WORDSIZE);
asm.emitPUSH_Imm(cm.getId());
// save nonvolatile registrs: EDI, EBX, EBP
if (VM.VerifyAssertions)
VM._assert(EDI_SAVE_OFFSET.toInt() == -2 * WORDSIZE);
// save nonvolatile EDI register
asm.emitPUSH_Reg(EDI);
if (VM.VerifyAssertions)
VM._assert(EBX_SAVE_OFFSET.toInt() == -3 * WORDSIZE);
// save nonvolatile EBX register
asm.emitPUSH_Reg(EBX);
if (VM.VerifyAssertions)
VM._assert(EBP_SAVE_OFFSET.toInt() == -4 * WORDSIZE);
// save nonvolatile EBP register
asm.emitPUSH_Reg(EBP);
// Establish EBP as the framepointer for use in the rest of the glue frame
if (VM.BuildFor32Addr) {
asm.emitLEA_Reg_RegDisp(EBP, SP, Offset.fromIntSignExtend(4 * WORDSIZE));
} else {
asm.emitLEA_Reg_RegDisp_Quad(EBP, SP, Offset.fromIntSignExtend(4 * WORDSIZE));
}
// S0 = RVMThread.jniEnv
if (VM.BuildFor32Addr) {
asm.emitMOV_Reg_RegDisp(S0, THREAD_REGISTER, Entrypoints.jniEnvField.getOffset());
} else {
asm.emitMOV_Reg_RegDisp_Quad(S0, THREAD_REGISTER, Entrypoints.jniEnvField.getOffset());
}
if (VM.VerifyAssertions)
VM._assert(JNI_ENV_OFFSET.toInt() == -5 * WORDSIZE);
// save JNI Env for after call
asm.emitPUSH_Reg(S0);
if (VM.VerifyAssertions)
VM._assert(BP_ON_ENTRY_OFFSET.toInt() == -6 * WORDSIZE);
asm.emitPUSH_RegDisp(S0, Entrypoints.JNIEnvBasePointerOnEntryToNative.getOffset());
// save BP into JNIEnv
if (VM.BuildFor32Addr) {
asm.emitMOV_RegDisp_Reg(S0, Entrypoints.JNIEnvBasePointerOnEntryToNative.getOffset(), EBP);
} else {
asm.emitMOV_RegDisp_Reg_Quad(S0, Entrypoints.JNIEnvBasePointerOnEntryToNative.getOffset(), EBP);
}
// (3) Move all native method arguments on to stack (NB at this
// point all non-volatile state is saved)
// (3.1) Count how many arguments could be passed in either FPRs or GPRs
int numFprArgs = 0;
int numGprArgs = 0;
for (TypeReference arg : args) {
if (arg.isFloatingPointType()) {
numFprArgs++;
} else if (VM.BuildFor32Addr && arg.isLongType()) {
numGprArgs += 2;
} else {
numGprArgs++;
}
}
// (3.2) add stack aligning padding
if (VM.BuildFor64Addr) {
int argsInRegisters = Math.min(numFprArgs, NATIVE_PARAMETER_FPRS.length) + Math.min(numGprArgs + 2, NATIVE_PARAMETER_GPRS.length);
int argsOnStack = numGprArgs + numFprArgs + 2 - argsInRegisters;
if (VM.VerifyAssertions)
VM._assert(argsOnStack >= 0);
if ((argsOnStack & 1) != 0) {
// need odd alignment prior to pushes
asm.emitAND_Reg_Imm_Quad(SP, -16);
asm.emitPUSH_Reg(T0);
} else {
// need even alignment prior to pushes
asm.emitAND_Reg_Imm_Quad(SP, -16);
}
}
// (we always pass a this or a class but we only pop this)
if (!method.isStatic()) {
numGprArgs++;
}
// (3.3) Walk over arguments backwards pushing either from memory or registers
Offset currentArg = lastParameterOffset;
int argFpr = numFprArgs - 1;
int argGpr = numGprArgs - 1;
for (int i = args.length - 1; i >= 0; i--) {
TypeReference arg = args[i];
if (arg.isFloatType()) {
if (argFpr < PARAMETER_FPRS.length) {
// make space
asm.emitPUSH_Reg(T0);
if (SSE2_FULL) {
asm.emitMOVSS_RegInd_Reg(SP, (XMM) PARAMETER_FPRS[argFpr]);
} else {
asm.emitFSTP_RegInd_Reg(SP, FP0);
}
} else {
asm.emitPUSH_RegDisp(EBP, currentArg);
}
argFpr--;
} else if (arg.isDoubleType()) {
if (VM.BuildFor32Addr) {
if (argFpr < PARAMETER_FPRS.length) {
// make space
asm.emitPUSH_Reg(T0);
// need 2 slots with 32bit addresses
asm.emitPUSH_Reg(T0);
if (SSE2_FULL) {
asm.emitMOVSD_RegInd_Reg(SP, (XMM) PARAMETER_FPRS[argFpr]);
} else {
asm.emitFSTP_RegInd_Reg_Quad(SP, FP0);
}
} else {
asm.emitPUSH_RegDisp(EBP, currentArg.plus(WORDSIZE));
// need 2 slots with 32bit addresses
asm.emitPUSH_RegDisp(EBP, currentArg);
}
} else {
if (argFpr < PARAMETER_FPRS.length) {
// make space
asm.emitPUSH_Reg(T0);
if (SSE2_FULL) {
asm.emitMOVSD_RegInd_Reg(SP, (XMM) PARAMETER_FPRS[argFpr]);
} else {
asm.emitFSTP_RegInd_Reg_Quad(SP, FP0);
}
} else {
asm.emitPUSH_RegDisp(EBP, currentArg);
}
}
argFpr--;
currentArg = currentArg.plus(WORDSIZE);
} else if (VM.BuildFor32Addr && arg.isLongType()) {
if (argGpr < PARAMETER_GPRS.length) {
asm.emitPUSH_Reg(PARAMETER_GPRS[argGpr - 1]);
asm.emitPUSH_Reg(PARAMETER_GPRS[argGpr]);
} else if (argGpr - 1 < PARAMETER_GPRS.length) {
asm.emitPUSH_Reg(PARAMETER_GPRS[argGpr - 1]);
asm.emitPUSH_RegDisp(EBP, currentArg);
} else {
asm.emitPUSH_RegDisp(EBP, currentArg.plus(WORDSIZE));
asm.emitPUSH_RegDisp(EBP, currentArg);
}
argGpr -= 2;
currentArg = currentArg.plus(WORDSIZE);
} else {
if (argGpr < PARAMETER_GPRS.length) {
asm.emitPUSH_Reg(PARAMETER_GPRS[argGpr]);
} else {
asm.emitPUSH_RegDisp(EBP, currentArg);
}
argGpr--;
if (VM.BuildFor64Addr && arg.isLongType()) {
currentArg = currentArg.plus(WORDSIZE);
}
}
currentArg = currentArg.plus(WORDSIZE);
}
// (3.4) push class or object argument
if (method.isStatic()) {
// push java.lang.Class object for klass
Offset klassOffset = Offset.fromIntSignExtend(Statics.findOrCreateObjectLiteral(method.getDeclaringClass().getClassForType()));
asm.generateJTOCpush(klassOffset);
} else {
if (VM.VerifyAssertions)
VM._assert(argGpr == 0);
asm.emitPUSH_Reg(PARAMETER_GPRS[0]);
}
// (3.5) push a pointer to the JNI functions that will be
// dereferenced in native code
asm.emitPUSH_Reg(S0);
if (jniExternalFunctionsFieldOffset != 0) {
if (VM.BuildFor32Addr) {
asm.emitADD_RegInd_Imm(ESP, jniExternalFunctionsFieldOffset);
} else {
asm.emitADD_RegInd_Imm_Quad(ESP, jniExternalFunctionsFieldOffset);
}
}
// (4) Call out to convert reference arguments to IDs, set thread as
// being "in native" and record the frame pointer of the last Java frame
// (this) in the jniEnv
// Encode reference arguments into a long
int encodedReferenceOffsets = 0;
for (int i = 0, pos = 0; i < args.length; i++, pos++) {
TypeReference arg = args[i];
if (arg.isReferenceType()) {
if (VM.VerifyAssertions)
VM._assert(pos < 32);
encodedReferenceOffsets |= 1 << pos;
} else if (VM.BuildFor32Addr && (arg.isLongType() || arg.isDoubleType())) {
pos++;
}
}
// Call out to JNI environment JNI entry
if (VM.BuildFor32Addr) {
asm.emitMOV_Reg_RegDisp(PARAMETER_GPRS[0], EBP, JNI_ENV_OFFSET);
} else {
asm.emitMOV_Reg_RegDisp_Quad(PARAMETER_GPRS[0], EBP, JNI_ENV_OFFSET);
}
asm.emitPUSH_Reg(PARAMETER_GPRS[0]);
asm.emitMOV_Reg_Imm(PARAMETER_GPRS[1], encodedReferenceOffsets);
asm.emitPUSH_Reg(PARAMETER_GPRS[1]);
asm.baselineEmitLoadTIB(S0, PARAMETER_GPRS[0]);
asm.emitCALL_RegDisp(S0, Entrypoints.jniEntry.getOffset());
// (5) Set up stack frame and registers for transition to C
int stackholes = 0;
int position = 0;
int argsPassedInRegister = 0;
if (VM.BuildFor64Addr) {
int gpRegistersInUse = 2;
int fpRegistersInUse = 0;
boolean dataOnStack = false;
// JNI env
asm.emitPOP_Reg(NATIVE_PARAMETER_GPRS[0]);
// Object/Class
asm.emitPOP_Reg(NATIVE_PARAMETER_GPRS[1]);
argsPassedInRegister += 2;
for (TypeReference arg : method.getParameterTypes()) {
if (arg.isFloatType()) {
if (fpRegistersInUse < NATIVE_PARAMETER_FPRS.length) {
asm.emitMOVSS_Reg_RegDisp((XMM) NATIVE_PARAMETER_FPRS[fpRegistersInUse], SP, Offset.fromIntZeroExtend(position << LG_WORDSIZE));
if (dataOnStack) {
stackholes |= 1 << position;
} else {
asm.emitPOP_Reg(T0);
}
fpRegistersInUse++;
argsPassedInRegister++;
} else {
// no register available so we have data on the stack
dataOnStack = true;
}
} else if (arg.isDoubleType()) {
if (fpRegistersInUse < NATIVE_PARAMETER_FPRS.length) {
asm.emitMOVSD_Reg_RegDisp((XMM) NATIVE_PARAMETER_FPRS[fpRegistersInUse], SP, Offset.fromIntZeroExtend(position << LG_WORDSIZE));
if (dataOnStack) {
stackholes |= 1 << position;
} else {
asm.emitPOP_Reg(T0);
}
if (VM.BuildFor32Addr)
asm.emitPOP_Reg(T0);
fpRegistersInUse++;
argsPassedInRegister += VM.BuildFor32Addr ? 2 : 1;
} else {
// no register available so we have data on the stack
dataOnStack = true;
}
} else {
if (gpRegistersInUse < NATIVE_PARAMETER_GPRS.length) {
// TODO: we can't have holes in the data that is on the stack, we need to shuffle it up
asm.emitMOV_Reg_RegDisp_Quad(NATIVE_PARAMETER_GPRS[gpRegistersInUse], SP, Offset.fromIntZeroExtend(position << LG_WORDSIZE));
if (dataOnStack) {
stackholes |= 1 << position;
} else {
asm.emitPOP_Reg(T0);
}
gpRegistersInUse++;
argsPassedInRegister++;
} else {
// no register available so we have data on the stack
dataOnStack = true;
}
}
if (dataOnStack) {
position++;
}
}
position--;
int onStackOffset = position;
int mask = 0;
for (int i = position; i >= 0; i--) {
mask = 1 << i;
if ((stackholes & mask) != 0) {
continue;
}
if (i < onStackOffset) {
asm.emitMOV_Reg_RegDisp_Quad(T0, SP, Offset.fromIntZeroExtend(i << LOG_BYTES_IN_WORD));
asm.emitMOV_RegDisp_Reg_Quad(SP, Offset.fromIntZeroExtend(onStackOffset << LOG_BYTES_IN_WORD), T0);
}
onStackOffset--;
}
while (onStackOffset >= 0) {
asm.emitPOP_Reg(T0);
onStackOffset--;
}
}
// move address of native code to invoke into T0
if (VM.BuildFor32Addr) {
asm.emitMOV_Reg_Imm(T0, nativeIP.toInt());
} else {
asm.emitMOV_Reg_Imm_Quad(T0, nativeIP.toLong());
}
// Trap if stack alignment fails
if (VM.ExtremeAssertions && VM.BuildFor64Addr) {
asm.emitBT_Reg_Imm(ESP, 3);
ForwardReference fr = asm.forwardJcc(LGE);
asm.emitINT_Imm(3);
fr.resolve(asm);
}
// make the call to native code
asm.emitCALL_Reg(T0);
// (7) Discard parameters on stack
if (VM.BuildFor32Addr) {
// throw away args, class/this ptr and env
int argsToThrowAway = method.getParameterWords() + 2 - argsPassedInRegister;
if (argsToThrowAway != 0) {
asm.emitLEA_Reg_RegDisp(SP, EBP, BP_ON_ENTRY_OFFSET);
}
} else {
// throw away args, class/this ptr and env (and padding)
asm.emitLEA_Reg_RegDisp_Quad(SP, EBP, BP_ON_ENTRY_OFFSET);
}
// (8) Save result to stack
final TypeReference returnType = method.getReturnType();
if (returnType.isVoidType()) {
// Nothing to save
} else if (returnType.isFloatType()) {
// adjust stack
asm.emitPUSH_Reg(T0);
if (VM.BuildFor32Addr) {
asm.emitFSTP_RegInd_Reg(ESP, FP0);
} else {
asm.emitMOVSS_RegInd_Reg(ESP, XMM0);
}
} else if (returnType.isDoubleType()) {
// adjust stack
asm.emitPUSH_Reg(T0);
// adjust stack
asm.emitPUSH_Reg(T0);
if (VM.BuildFor32Addr) {
asm.emitFSTP_RegInd_Reg_Quad(ESP, FP0);
} else {
asm.emitMOVSD_RegInd_Reg(ESP, XMM0);
}
} else if (VM.BuildFor32Addr && returnType.isLongType()) {
asm.emitPUSH_Reg(T0);
asm.emitPUSH_Reg(T1);
} else {
// Ensure sign-extension is correct
if (returnType.isBooleanType()) {
asm.emitMOVZX_Reg_Reg_Byte(T0, T0);
} else if (returnType.isByteType()) {
asm.emitMOVSX_Reg_Reg_Byte(T0, T0);
} else if (returnType.isCharType()) {
asm.emitMOVZX_Reg_Reg_Word(T0, T0);
} else if (returnType.isShortType()) {
asm.emitMOVSX_Reg_Reg_Word(T0, T0);
}
asm.emitPUSH_Reg(T0);
}
// (9.1) reload JNIEnvironment from glue frame
if (VM.BuildFor32Addr) {
asm.emitMOV_Reg_RegDisp(S0, EBP, JNICompiler.JNI_ENV_OFFSET);
} else {
asm.emitMOV_Reg_RegDisp_Quad(S0, EBP, JNICompiler.JNI_ENV_OFFSET);
}
// (9.2) Reload thread register from JNIEnvironment
if (VM.BuildFor32Addr) {
asm.emitMOV_Reg_RegDisp(THREAD_REGISTER, S0, Entrypoints.JNIEnvSavedTRField.getOffset());
} else {
asm.emitMOV_Reg_RegDisp_Quad(THREAD_REGISTER, S0, Entrypoints.JNIEnvSavedTRField.getOffset());
}
// (9.3) Establish frame pointer to this glue method
if (VM.BuildFor32Addr) {
asm.emitMOV_RegDisp_Reg(THREAD_REGISTER, ArchEntrypoints.framePointerField.getOffset(), EBP);
} else {
asm.emitMOV_RegDisp_Reg_Quad(THREAD_REGISTER, ArchEntrypoints.framePointerField.getOffset(), EBP);
}
// result (currently a JNI ref) into a true reference, release JNI refs
if (VM.BuildFor32Addr) {
// 1st arg is JNI Env
asm.emitMOV_Reg_Reg(PARAMETER_GPRS[0], S0);
} else {
// 1st arg is JNI Env
asm.emitMOV_Reg_Reg_Quad(PARAMETER_GPRS[0], S0);
}
if (returnType.isReferenceType()) {
// 2nd arg is ref result
asm.emitPOP_Reg(PARAMETER_GPRS[1]);
} else {
// place dummy (null) operand on stack
asm.emitXOR_Reg_Reg(PARAMETER_GPRS[1], PARAMETER_GPRS[1]);
}
// save JNIEnv
asm.emitPUSH_Reg(S0);
// push arg 1
asm.emitPUSH_Reg(S0);
// push arg 2
asm.emitPUSH_Reg(PARAMETER_GPRS[1]);
// Do the call
asm.baselineEmitLoadTIB(S0, S0);
asm.emitCALL_RegDisp(S0, Entrypoints.jniExit.getOffset());
// restore JNIEnv
asm.emitPOP_Reg(S0);
// place result in register
if (returnType.isVoidType()) {
// Nothing to save
} else if (returnType.isReferenceType()) {
// value already in register
} else if (returnType.isFloatType()) {
if (SSE2_FULL) {
asm.emitMOVSS_Reg_RegInd(XMM0, ESP);
} else {
asm.emitFLD_Reg_RegInd(FP0, ESP);
}
// adjust stack
asm.emitPOP_Reg(T0);
} else if (returnType.isDoubleType()) {
if (SSE2_FULL) {
asm.emitMOVSD_Reg_RegInd(XMM0, ESP);
} else {
asm.emitFLD_Reg_RegInd_Quad(FP0, ESP);
}
// adjust stack
asm.emitPOP_Reg(T0);
// adjust stack
asm.emitPOP_Reg(T0);
} else if (VM.BuildFor32Addr && returnType.isLongType()) {
asm.emitPOP_Reg(T0);
asm.emitPOP_Reg(T1);
} else {
asm.emitPOP_Reg(T0);
}
// saved previous native BP
asm.emitPOP_Reg(EBX);
if (VM.BuildFor32Addr) {
asm.emitMOV_RegDisp_Reg(S0, Entrypoints.JNIEnvBasePointerOnEntryToNative.getOffset(), EBX);
} else {
asm.emitMOV_RegDisp_Reg_Quad(S0, Entrypoints.JNIEnvBasePointerOnEntryToNative.getOffset(), EBX);
}
// throw away JNI env
asm.emitPOP_Reg(EBX);
// restore non-volatile EBP
asm.emitPOP_Reg(EBP);
// restore non-volatile EBX
asm.emitPOP_Reg(EBX);
// restore non-volatile EDI
asm.emitPOP_Reg(EDI);
// throw away cmid
asm.emitPOP_Reg(S0);
asm.emitPOP_RegDisp(THREAD_REGISTER, ArchEntrypoints.framePointerField.getOffset());
// pop parameters from stack (Note that parameterWords does not include "this")
if (method.isStatic()) {
asm.emitRET_Imm(method.getParameterWords() << LG_WORDSIZE);
} else {
asm.emitRET_Imm((method.getParameterWords() + 1) << LG_WORDSIZE);
}
CodeArray code = asm.getMachineCodes();
cm.compileComplete(code);
return cm;
}
use of org.jikesrvm.compilers.common.assembler.ForwardReference in project JikesRVM by JikesRVM.
the class TemplateCompilerFramework method genCode.
/**
* Main code generation loop.
*
* @return generated machine code
*/
protected final MachineCode genCode() {
AbstractAssembler asm = getAssembler();
AbstractLister lister = getLister();
emit_prologue();
while (bcodes.hasMoreBytecodes()) {
biStart = bcodes.index();
bytecodeMap[biStart] = asm.getMachineCodeIndex();
asm.resolveForwardReferences(biStart);
starting_bytecode();
int code = bcodes.nextInstruction();
switch(code) {
case JBC_nop:
{
if (shouldPrint)
lister.noteBytecode(biStart, "nop");
break;
}
case JBC_aconst_null:
{
if (shouldPrint)
lister.noteBytecode(biStart, "aconst_null");
emit_aconst_null();
break;
}
case JBC_iconst_m1:
{
if (shouldPrint)
lister.noteBytecode(biStart, "iconst_m1");
emit_iconst(-1);
break;
}
case JBC_iconst_0:
{
if (shouldPrint)
lister.noteBytecode(biStart, "iconst_0");
emit_iconst(0);
break;
}
case JBC_iconst_1:
{
if (shouldPrint)
lister.noteBytecode(biStart, "iconst_1");
emit_iconst(1);
break;
}
case JBC_iconst_2:
{
if (shouldPrint)
lister.noteBytecode(biStart, "iconst_2");
emit_iconst(2);
break;
}
case JBC_iconst_3:
{
if (shouldPrint)
lister.noteBytecode(biStart, "iconst_3");
emit_iconst(3);
break;
}
case JBC_iconst_4:
{
if (shouldPrint)
lister.noteBytecode(biStart, "iconst_4");
emit_iconst(4);
break;
}
case JBC_iconst_5:
{
if (shouldPrint)
lister.noteBytecode(biStart, "iconst_5");
emit_iconst(5);
break;
}
case JBC_lconst_0:
{
// floating-point 0 is long 0
if (shouldPrint)
lister.noteBytecode(biStart, "lconst_0");
emit_lconst(0);
break;
}
case JBC_lconst_1:
{
if (shouldPrint)
lister.noteBytecode(biStart, "lconst_1");
emit_lconst(1);
break;
}
case JBC_fconst_0:
{
if (shouldPrint)
lister.noteBytecode(biStart, "fconst_0");
emit_fconst_0();
break;
}
case JBC_fconst_1:
{
if (shouldPrint)
lister.noteBytecode(biStart, "fconst_1");
emit_fconst_1();
break;
}
case JBC_fconst_2:
{
if (shouldPrint)
lister.noteBytecode(biStart, "fconst_2");
emit_fconst_2();
break;
}
case JBC_dconst_0:
{
if (shouldPrint)
lister.noteBytecode(biStart, "dconst_0");
emit_dconst_0();
break;
}
case JBC_dconst_1:
{
if (shouldPrint)
lister.noteBytecode(biStart, "dconst_1");
emit_dconst_1();
break;
}
case JBC_bipush:
{
int val = bcodes.getByteValue();
if (shouldPrint)
lister.noteBytecode(biStart, "bipush", val);
emit_iconst(val);
break;
}
case JBC_sipush:
{
int val = bcodes.getShortValue();
if (shouldPrint)
lister.noteBytecode(biStart, "sipush", val);
emit_iconst(val);
break;
}
case JBC_ldc:
{
int index = bcodes.getConstantIndex();
if (shouldPrint)
lister.noteBytecode(biStart, "ldc", index);
Offset offset = klass.getLiteralOffset(index);
byte type = klass.getLiteralDescription(index);
emit_ldc(offset, type);
break;
}
case JBC_ldc_w:
{
int index = bcodes.getWideConstantIndex();
if (shouldPrint)
lister.noteBytecode(biStart, "ldc_w", index);
Offset offset = klass.getLiteralOffset(index);
byte type = klass.getLiteralDescription(index);
emit_ldc(offset, type);
break;
}
case JBC_ldc2_w:
{
int index = bcodes.getWideConstantIndex();
if (shouldPrint)
lister.noteBytecode(biStart, "ldc2_w", index);
Offset offset = klass.getLiteralOffset(index);
byte type = klass.getLiteralDescription(index);
emit_ldc2(offset, type);
break;
}
case JBC_iload:
{
int index = bcodes.getLocalNumber();
if (shouldPrint)
lister.noteBytecode(biStart, "iload", index);
emit_iload(index);
break;
}
case JBC_lload:
{
int index = bcodes.getLocalNumber();
if (shouldPrint)
lister.noteBytecode(biStart, "lload", index);
emit_lload(index);
break;
}
case JBC_fload:
{
int index = bcodes.getLocalNumber();
if (shouldPrint)
lister.noteBytecode(biStart, "fload", index);
emit_fload(index);
break;
}
case JBC_dload:
{
int index = bcodes.getLocalNumber();
if (shouldPrint)
lister.noteBytecode(biStart, "dload", index);
emit_dload(index);
break;
}
case JBC_aload:
{
int index = bcodes.getLocalNumber();
if (shouldPrint)
lister.noteBytecode(biStart, "aload", index);
emit_aload(index);
break;
}
case JBC_iload_0:
{
if (shouldPrint)
lister.noteBytecode(biStart, "iload_0");
emit_iload(0);
break;
}
case JBC_iload_1:
{
if (shouldPrint)
lister.noteBytecode(biStart, "iload_1");
emit_iload(1);
break;
}
case JBC_iload_2:
{
if (shouldPrint)
lister.noteBytecode(biStart, "iload_2");
emit_iload(2);
break;
}
case JBC_iload_3:
{
if (shouldPrint)
lister.noteBytecode(biStart, "iload_3");
emit_iload(3);
break;
}
case JBC_lload_0:
{
if (shouldPrint)
lister.noteBytecode(biStart, "lload_0");
emit_lload(0);
break;
}
case JBC_lload_1:
{
if (shouldPrint)
lister.noteBytecode(biStart, "lload_1");
emit_lload(1);
break;
}
case JBC_lload_2:
{
if (shouldPrint)
lister.noteBytecode(biStart, "lload_2");
emit_lload(2);
break;
}
case JBC_lload_3:
{
if (shouldPrint)
lister.noteBytecode(biStart, "lload_3");
emit_lload(3);
break;
}
case JBC_fload_0:
{
if (shouldPrint)
lister.noteBytecode(biStart, "fload_0");
emit_fload(0);
break;
}
case JBC_fload_1:
{
if (shouldPrint)
lister.noteBytecode(biStart, "fload_1");
emit_fload(1);
break;
}
case JBC_fload_2:
{
if (shouldPrint)
lister.noteBytecode(biStart, "fload_2");
emit_fload(2);
break;
}
case JBC_fload_3:
{
if (shouldPrint)
lister.noteBytecode(biStart, "fload_3");
emit_fload(3);
break;
}
case JBC_dload_0:
{
if (shouldPrint)
lister.noteBytecode(biStart, "dload_0");
emit_dload(0);
break;
}
case JBC_dload_1:
{
if (shouldPrint)
lister.noteBytecode(biStart, "dload_1");
emit_dload(1);
break;
}
case JBC_dload_2:
{
if (shouldPrint)
lister.noteBytecode(biStart, "dload_2");
emit_dload(2);
break;
}
case JBC_dload_3:
{
if (shouldPrint)
lister.noteBytecode(biStart, "dload_3");
emit_dload(3);
break;
}
case JBC_aload_0:
{
if (shouldPrint)
lister.noteBytecode(biStart, "aload_0");
emit_aload(0);
break;
}
case JBC_aload_1:
{
if (shouldPrint)
lister.noteBytecode(biStart, "aload_1");
emit_aload(1);
break;
}
case JBC_aload_2:
{
if (shouldPrint)
lister.noteBytecode(biStart, "aload_2");
emit_aload(2);
break;
}
case JBC_aload_3:
{
if (shouldPrint)
lister.noteBytecode(biStart, "aload_3");
emit_aload(3);
break;
}
case JBC_iaload:
{
if (shouldPrint)
lister.noteBytecode(biStart, "iaload");
emit_iaload();
break;
}
case JBC_laload:
{
if (shouldPrint)
lister.noteBytecode(biStart, "laload");
emit_laload();
break;
}
case JBC_faload:
{
if (shouldPrint)
lister.noteBytecode(biStart, "faload");
emit_faload();
break;
}
case JBC_daload:
{
if (shouldPrint)
lister.noteBytecode(biStart, "daload");
emit_daload();
break;
}
case JBC_aaload:
{
if (shouldPrint)
lister.noteBytecode(biStart, "aaload");
emit_aaload();
break;
}
case JBC_baload:
{
if (shouldPrint)
lister.noteBytecode(biStart, "baload");
emit_baload();
break;
}
case JBC_caload:
{
if (shouldPrint)
lister.noteBytecode(biStart, "caload");
emit_caload();
break;
}
case JBC_saload:
{
if (shouldPrint)
lister.noteBytecode(biStart, "saload");
emit_saload();
break;
}
case JBC_istore:
{
int index = bcodes.getLocalNumber();
if (shouldPrint)
lister.noteBytecode(biStart, "istore", index);
emit_istore(index);
break;
}
case JBC_lstore:
{
int index = bcodes.getLocalNumber();
if (shouldPrint)
lister.noteBytecode(biStart, "lstore", index);
emit_lstore(index);
break;
}
case JBC_fstore:
{
int index = bcodes.getLocalNumber();
if (shouldPrint)
lister.noteBytecode(biStart, "fstore", index);
emit_fstore(index);
break;
}
case JBC_dstore:
{
int index = bcodes.getLocalNumber();
if (shouldPrint)
lister.noteBytecode(biStart, "dstore", index);
emit_dstore(index);
break;
}
case JBC_astore:
{
int index = bcodes.getLocalNumber();
if (shouldPrint)
lister.noteBytecode(biStart, "astore", index);
emit_astore(index);
break;
}
case JBC_istore_0:
{
if (shouldPrint)
lister.noteBytecode(biStart, "istore_0");
emit_istore(0);
break;
}
case JBC_istore_1:
{
if (shouldPrint)
lister.noteBytecode(biStart, "istore_1");
emit_istore(1);
break;
}
case JBC_istore_2:
{
if (shouldPrint)
lister.noteBytecode(biStart, "istore_2");
emit_istore(2);
break;
}
case JBC_istore_3:
{
if (shouldPrint)
lister.noteBytecode(biStart, "istore_3");
emit_istore(3);
break;
}
case JBC_lstore_0:
{
if (shouldPrint)
lister.noteBytecode(biStart, "lstore_0");
emit_lstore(0);
break;
}
case JBC_lstore_1:
{
if (shouldPrint)
lister.noteBytecode(biStart, "lstore_1");
emit_lstore(1);
break;
}
case JBC_lstore_2:
{
if (shouldPrint)
lister.noteBytecode(biStart, "lstore_2");
emit_lstore(2);
break;
}
case JBC_lstore_3:
{
if (shouldPrint)
lister.noteBytecode(biStart, "lstore_3");
emit_lstore(3);
break;
}
case JBC_fstore_0:
{
if (shouldPrint)
lister.noteBytecode(biStart, "fstore_0");
emit_fstore(0);
break;
}
case JBC_fstore_1:
{
if (shouldPrint)
lister.noteBytecode(biStart, "fstore_1");
emit_fstore(1);
break;
}
case JBC_fstore_2:
{
if (shouldPrint)
lister.noteBytecode(biStart, "fstore_2");
emit_fstore(2);
break;
}
case JBC_fstore_3:
{
if (shouldPrint)
lister.noteBytecode(biStart, "fstore_3");
emit_fstore(3);
break;
}
case JBC_dstore_0:
{
if (shouldPrint)
lister.noteBytecode(biStart, "dstore_0");
emit_dstore(0);
break;
}
case JBC_dstore_1:
{
if (shouldPrint)
lister.noteBytecode(biStart, "dstore_1");
emit_dstore(1);
break;
}
case JBC_dstore_2:
{
if (shouldPrint)
lister.noteBytecode(biStart, "dstore_2");
emit_dstore(2);
break;
}
case JBC_dstore_3:
{
if (shouldPrint)
lister.noteBytecode(biStart, "dstore_3");
emit_dstore(3);
break;
}
case JBC_astore_0:
{
if (shouldPrint)
lister.noteBytecode(biStart, "astore_0");
emit_astore(0);
break;
}
case JBC_astore_1:
{
if (shouldPrint)
lister.noteBytecode(biStart, "astore_1");
emit_astore(1);
break;
}
case JBC_astore_2:
{
if (shouldPrint)
lister.noteBytecode(biStart, "astore_2");
emit_astore(2);
break;
}
case JBC_astore_3:
{
if (shouldPrint)
lister.noteBytecode(biStart, "astore_3");
emit_astore(3);
break;
}
case JBC_iastore:
{
if (shouldPrint)
lister.noteBytecode(biStart, "iastore");
emit_iastore();
break;
}
case JBC_lastore:
{
if (shouldPrint)
lister.noteBytecode(biStart, "lastore");
emit_lastore();
break;
}
case JBC_fastore:
{
if (shouldPrint)
lister.noteBytecode(biStart, "fastore");
emit_fastore();
break;
}
case JBC_dastore:
{
if (shouldPrint)
lister.noteBytecode(biStart, "dastore");
emit_dastore();
break;
}
case JBC_aastore:
{
if (shouldPrint)
lister.noteBytecode(biStart, "aastore");
// ArrayStoreException}
if (VM.VerifyUnint && isUninterruptible && doesCheckStore)
forbiddenBytecode("aastore", bcodes.index());
emit_aastore();
break;
}
case JBC_bastore:
{
if (shouldPrint)
lister.noteBytecode(biStart, "bastore");
emit_bastore();
break;
}
case JBC_castore:
{
if (shouldPrint)
lister.noteBytecode(biStart, "castore");
emit_castore();
break;
}
case JBC_sastore:
{
if (shouldPrint)
lister.noteBytecode(biStart, "sastore");
emit_sastore();
break;
}
case JBC_pop:
{
if (shouldPrint)
lister.noteBytecode(biStart, "pop");
emit_pop();
break;
}
case JBC_pop2:
{
if (shouldPrint)
lister.noteBytecode(biStart, "pop2");
emit_pop2();
break;
}
case JBC_dup:
{
if (shouldPrint)
lister.noteBytecode(biStart, "dup");
emit_dup();
break;
}
case JBC_dup_x1:
{
if (shouldPrint)
lister.noteBytecode(biStart, "dup_x1");
emit_dup_x1();
break;
}
case JBC_dup_x2:
{
if (shouldPrint)
lister.noteBytecode(biStart, "dup_x2");
emit_dup_x2();
break;
}
case JBC_dup2:
{
if (shouldPrint)
lister.noteBytecode(biStart, "dup2");
emit_dup2();
break;
}
case JBC_dup2_x1:
{
if (shouldPrint)
lister.noteBytecode(biStart, "dup2_x1");
emit_dup2_x1();
break;
}
case JBC_dup2_x2:
{
if (shouldPrint)
lister.noteBytecode(biStart, "dup2_x2");
emit_dup2_x2();
break;
}
case JBC_swap:
{
if (shouldPrint)
lister.noteBytecode(biStart, "swap");
emit_swap();
break;
}
case JBC_iadd:
{
if (shouldPrint)
lister.noteBytecode(biStart, "iadd");
emit_iadd();
break;
}
case JBC_ladd:
{
if (shouldPrint)
lister.noteBytecode(biStart, "ladd");
emit_ladd();
break;
}
case JBC_fadd:
{
if (shouldPrint)
lister.noteBytecode(biStart, "fadd");
emit_fadd();
break;
}
case JBC_dadd:
{
if (shouldPrint)
lister.noteBytecode(biStart, "dadd");
emit_dadd();
break;
}
case JBC_isub:
{
if (shouldPrint)
lister.noteBytecode(biStart, "isub");
emit_isub();
break;
}
case JBC_lsub:
{
if (shouldPrint)
lister.noteBytecode(biStart, "lsub");
emit_lsub();
break;
}
case JBC_fsub:
{
if (shouldPrint)
lister.noteBytecode(biStart, "fsub");
emit_fsub();
break;
}
case JBC_dsub:
{
if (shouldPrint)
lister.noteBytecode(biStart, "dsub");
emit_dsub();
break;
}
case JBC_imul:
{
if (shouldPrint)
lister.noteBytecode(biStart, "imul");
emit_imul();
break;
}
case JBC_lmul:
{
if (shouldPrint)
lister.noteBytecode(biStart, "lmul");
emit_lmul();
break;
}
case JBC_fmul:
{
if (shouldPrint)
lister.noteBytecode(biStart, "fmul");
emit_fmul();
break;
}
case JBC_dmul:
{
if (shouldPrint)
lister.noteBytecode(biStart, "dmul");
emit_dmul();
break;
}
case JBC_idiv:
{
if (shouldPrint)
lister.noteBytecode(biStart, "idiv");
emit_idiv();
break;
}
case JBC_ldiv:
{
if (shouldPrint)
lister.noteBytecode(biStart, "ldiv");
emit_ldiv();
break;
}
case JBC_fdiv:
{
if (shouldPrint)
lister.noteBytecode(biStart, "fdiv");
emit_fdiv();
break;
}
case JBC_ddiv:
{
if (shouldPrint)
lister.noteBytecode(biStart, "ddiv");
emit_ddiv();
break;
}
case JBC_irem:
{
if (shouldPrint)
lister.noteBytecode(biStart, "irem");
emit_irem();
break;
}
case JBC_lrem:
{
if (shouldPrint)
lister.noteBytecode(biStart, "lrem");
emit_lrem();
break;
}
case JBC_frem:
{
if (shouldPrint)
lister.noteBytecode(biStart, "frem");
emit_frem();
break;
}
case JBC_drem:
{
if (shouldPrint)
lister.noteBytecode(biStart, "drem");
emit_drem();
break;
}
case JBC_ineg:
{
if (shouldPrint)
lister.noteBytecode(biStart, "ineg");
emit_ineg();
break;
}
case JBC_lneg:
{
if (shouldPrint)
lister.noteBytecode(biStart, "lneg");
emit_lneg();
break;
}
case JBC_fneg:
{
if (shouldPrint)
lister.noteBytecode(biStart, "fneg");
emit_fneg();
break;
}
case JBC_dneg:
{
if (shouldPrint)
lister.noteBytecode(biStart, "dneg");
emit_dneg();
break;
}
case JBC_ishl:
{
if (shouldPrint)
lister.noteBytecode(biStart, "ishl");
emit_ishl();
break;
}
case JBC_lshl:
{
// l >> n
if (shouldPrint)
lister.noteBytecode(biStart, "lshl");
emit_lshl();
break;
}
case JBC_ishr:
{
if (shouldPrint)
lister.noteBytecode(biStart, "ishr");
emit_ishr();
break;
}
case JBC_lshr:
{
if (shouldPrint)
lister.noteBytecode(biStart, "lshr");
emit_lshr();
break;
}
case JBC_iushr:
{
if (shouldPrint)
lister.noteBytecode(biStart, "iushr");
emit_iushr();
break;
}
case JBC_lushr:
{
if (shouldPrint)
lister.noteBytecode(biStart, "lushr");
emit_lushr();
break;
}
case JBC_iand:
{
if (shouldPrint)
lister.noteBytecode(biStart, "iand");
emit_iand();
break;
}
case JBC_land:
{
if (shouldPrint)
lister.noteBytecode(biStart, "land");
emit_land();
break;
}
case JBC_ior:
{
if (shouldPrint)
lister.noteBytecode(biStart, "ior");
emit_ior();
break;
}
case JBC_lor:
{
if (shouldPrint)
lister.noteBytecode(biStart, "lor");
emit_lor();
break;
}
case JBC_ixor:
{
if (shouldPrint)
lister.noteBytecode(biStart, "ixor");
emit_ixor();
break;
}
case JBC_lxor:
{
if (shouldPrint)
lister.noteBytecode(biStart, "lxor");
emit_lxor();
break;
}
case JBC_iinc:
{
int index = bcodes.getLocalNumber();
int val = bcodes.getIncrement();
if (shouldPrint)
lister.noteBytecode(biStart, "iinc", index, val);
emit_iinc(index, val);
break;
}
case JBC_i2l:
{
if (shouldPrint)
lister.noteBytecode(biStart, "i2l");
emit_i2l();
break;
}
case JBC_i2f:
{
if (shouldPrint)
lister.noteBytecode(biStart, "i2f");
emit_i2f();
break;
}
case JBC_i2d:
{
if (shouldPrint)
lister.noteBytecode(biStart, "i2d");
emit_i2d();
break;
}
case JBC_l2i:
{
if (shouldPrint)
lister.noteBytecode(biStart, "l2i");
emit_l2i();
break;
}
case JBC_l2f:
{
if (shouldPrint)
lister.noteBytecode(biStart, "l2f");
emit_l2f();
break;
}
case JBC_l2d:
{
if (shouldPrint)
lister.noteBytecode(biStart, "l2d");
emit_l2d();
break;
}
case JBC_f2i:
{
if (shouldPrint)
lister.noteBytecode(biStart, "f2i");
emit_f2i();
break;
}
case JBC_f2l:
{
if (shouldPrint)
lister.noteBytecode(biStart, "f2l");
emit_f2l();
break;
}
case JBC_f2d:
{
if (shouldPrint)
lister.noteBytecode(biStart, "f2d");
emit_f2d();
break;
}
case JBC_d2i:
{
if (shouldPrint)
lister.noteBytecode(biStart, "d2i");
emit_d2i();
break;
}
case JBC_d2l:
{
if (shouldPrint)
lister.noteBytecode(biStart, "d2l");
emit_d2l();
break;
}
case JBC_d2f:
{
if (shouldPrint)
lister.noteBytecode(biStart, "d2f");
emit_d2f();
break;
}
case JBC_int2byte:
{
if (shouldPrint)
lister.noteBytecode(biStart, "i2b");
emit_i2b();
break;
}
case JBC_int2char:
{
if (shouldPrint)
lister.noteBytecode(biStart, "i2c");
emit_i2c();
break;
}
case JBC_int2short:
{
if (shouldPrint)
lister.noteBytecode(biStart, "i2s");
emit_i2s();
break;
}
case JBC_lcmp:
{
// a ? b
if (shouldPrint)
lister.noteBytecode(biStart, "lcmp");
emit_lcmp();
break;
}
case JBC_fcmpl:
{
if (shouldPrint)
lister.noteBytecode(biStart, "fcmpl");
emit_DFcmpGL(true, false);
break;
}
case JBC_fcmpg:
{
if (shouldPrint)
lister.noteBytecode(biStart, "fcmpg");
emit_DFcmpGL(true, true);
break;
}
case JBC_dcmpl:
{
if (shouldPrint)
lister.noteBytecode(biStart, "dcmpl");
emit_DFcmpGL(false, false);
break;
}
case JBC_dcmpg:
{
if (shouldPrint)
lister.noteBytecode(biStart, "dcmpg");
emit_DFcmpGL(false, true);
break;
}
case JBC_ifeq:
{
do_if(biStart, BranchCondition.EQ);
break;
}
case JBC_ifne:
{
do_if(biStart, BranchCondition.NE);
break;
}
case JBC_iflt:
{
do_if(biStart, BranchCondition.LT);
break;
}
case JBC_ifge:
{
do_if(biStart, BranchCondition.GE);
break;
}
case JBC_ifgt:
{
do_if(biStart, BranchCondition.GT);
break;
}
case JBC_ifle:
{
do_if(biStart, BranchCondition.LE);
break;
}
case JBC_if_icmpeq:
{
do_if_icmp(biStart, BranchCondition.EQ);
break;
}
case JBC_if_icmpne:
{
do_if_icmp(biStart, BranchCondition.NE);
break;
}
case JBC_if_icmplt:
{
do_if_icmp(biStart, BranchCondition.LT);
break;
}
case JBC_if_icmpge:
{
do_if_icmp(biStart, BranchCondition.GE);
break;
}
case JBC_if_icmpgt:
{
do_if_icmp(biStart, BranchCondition.GT);
break;
}
case JBC_if_icmple:
{
do_if_icmp(biStart, BranchCondition.LE);
break;
}
case JBC_if_acmpeq:
{
int offset = bcodes.getBranchOffset();
int bTarget = biStart + offset;
if (shouldPrint)
lister.noteBranchBytecode(biStart, "if_acmpeq", offset, bTarget);
if (offset <= 0)
emit_threadSwitchTest(RVMThread.BACKEDGE);
emit_if_acmpeq(bTarget);
break;
}
case JBC_if_acmpne:
{
int offset = bcodes.getBranchOffset();
int bTarget = biStart + offset;
if (shouldPrint)
lister.noteBranchBytecode(biStart, "if_acmpne", offset, bTarget);
if (offset <= 0)
emit_threadSwitchTest(RVMThread.BACKEDGE);
emit_if_acmpne(bTarget);
break;
}
case JBC_goto:
{
int offset = bcodes.getBranchOffset();
// bi has been bumped by 3 already
int bTarget = biStart + offset;
if (shouldPrint)
lister.noteBranchBytecode(biStart, "goto", offset, bTarget);
if (offset <= 0)
emit_threadSwitchTest(RVMThread.BACKEDGE);
emit_goto(bTarget);
break;
}
case JBC_jsr:
{
int offset = bcodes.getBranchOffset();
int bTarget = biStart + offset;
if (shouldPrint)
lister.noteBranchBytecode(biStart, "jsr", offset, bTarget);
emit_jsr(bTarget);
break;
}
case JBC_ret:
{
int index = bcodes.getLocalNumber();
if (shouldPrint)
lister.noteBytecode(biStart, "ret ", index);
emit_ret(index);
break;
}
case JBC_tableswitch:
{
bcodes.alignSwitch();
int defaultval = bcodes.getDefaultSwitchOffset();
int low = bcodes.getLowSwitchValue();
int high = bcodes.getHighSwitchValue();
if (shouldPrint)
lister.noteTableswitchBytecode(biStart, low, high, defaultval);
emit_tableswitch(defaultval, low, high);
break;
}
case JBC_lookupswitch:
{
bcodes.alignSwitch();
int defaultval = bcodes.getDefaultSwitchOffset();
int npairs = bcodes.getSwitchLength();
if (shouldPrint)
lister.noteLookupswitchBytecode(biStart, npairs, defaultval);
emit_lookupswitch(defaultval, npairs);
break;
}
case JBC_ireturn:
{
if (shouldPrint)
lister.noteBytecode(biStart, "ireturn");
if (VM.UseEpilogueYieldPoints)
emit_threadSwitchTest(RVMThread.EPILOGUE);
emit_ireturn();
break;
}
case JBC_lreturn:
{
if (shouldPrint)
lister.noteBytecode(biStart, "lreturn");
if (VM.UseEpilogueYieldPoints)
emit_threadSwitchTest(RVMThread.EPILOGUE);
emit_lreturn();
break;
}
case JBC_freturn:
{
if (shouldPrint)
lister.noteBytecode(biStart, "freturn");
if (VM.UseEpilogueYieldPoints)
emit_threadSwitchTest(RVMThread.EPILOGUE);
emit_freturn();
break;
}
case JBC_dreturn:
{
if (shouldPrint)
lister.noteBytecode(biStart, "dreturn");
if (VM.UseEpilogueYieldPoints)
emit_threadSwitchTest(RVMThread.EPILOGUE);
emit_dreturn();
break;
}
case JBC_areturn:
{
if (shouldPrint)
lister.noteBytecode(biStart, "areturn");
if (VM.UseEpilogueYieldPoints)
emit_threadSwitchTest(RVMThread.EPILOGUE);
emit_areturn();
break;
}
case JBC_return:
{
if (shouldPrint)
lister.noteBytecode(biStart, "return");
if (VM.UseEpilogueYieldPoints)
emit_threadSwitchTest(RVMThread.EPILOGUE);
emit_return();
break;
}
case JBC_getstatic:
{
FieldReference fieldRef = bcodes.getFieldReference();
if (shouldPrint)
lister.noteBytecode(biStart, "getstatic", fieldRef);
if (fieldRef.needsDynamicLink(method)) {
// interruptions
if (VM.VerifyUnint && !isInterruptible)
forbiddenBytecode("unresolved getstatic ", fieldRef, bcodes.index());
emit_unresolved_getstatic(fieldRef);
} else {
emit_resolved_getstatic(fieldRef);
}
break;
}
case JBC_putstatic:
{
FieldReference fieldRef = bcodes.getFieldReference();
if (shouldPrint)
lister.noteBytecode(biStart, "putstatic", fieldRef);
if (fieldRef.needsDynamicLink(method)) {
// interruptions
if (VM.VerifyUnint && !isInterruptible)
forbiddenBytecode("unresolved putstatic ", fieldRef, bcodes.index());
emit_unresolved_putstatic(fieldRef);
} else {
emit_resolved_putstatic(fieldRef);
}
break;
}
case JBC_getfield:
{
FieldReference fieldRef = bcodes.getFieldReference();
if (shouldPrint)
lister.noteBytecode(biStart, "getfield", fieldRef);
if (fieldRef.needsDynamicLink(method)) {
// interruptions
if (VM.VerifyUnint && !isInterruptible)
forbiddenBytecode("unresolved getfield ", fieldRef, bcodes.index());
emit_unresolved_getfield(fieldRef);
} else {
emit_resolved_getfield(fieldRef);
}
break;
}
case JBC_putfield:
{
FieldReference fieldRef = bcodes.getFieldReference();
if (shouldPrint)
lister.noteBytecode(biStart, "putfield", fieldRef);
if (fieldRef.needsDynamicLink(method)) {
// interruptions
if (VM.VerifyUnint && !isInterruptible)
forbiddenBytecode("unresolved putfield ", fieldRef, bcodes.index());
emit_unresolved_putfield(fieldRef);
} else {
emit_resolved_putfield(fieldRef);
}
break;
}
case JBC_invokevirtual:
{
ForwardReference xx = null;
if (biStart == this.pendingIdx) {
// goto X
ForwardReference x = emit_pending_goto(0);
// pendingIdx: (target of pending goto in prologue)
this.pendingRef.resolve(asm);
CompiledMethod cm = CompiledMethods.getCompiledMethod(this.pendingCMID);
if (VM.VerifyAssertions)
VM._assert(cm.isSpecialForOSR());
// invoke_cmid
emit_invoke_compiledmethod(cm);
// goto XX
xx = emit_pending_goto(0);
// X:
x.resolve(asm);
}
MethodReference methodRef = bcodes.getMethodReference();
if (shouldPrint)
lister.noteBytecode(biStart, "invokevirtual", methodRef);
if (methodRef.getType().isMagicType()) {
if (emit_Magic(methodRef)) {
break;
}
}
if (methodRef.isMiranda()) {
/* Special case of abstract interface method should generate
* an invokeinterface, despite the compiler claiming it should
* be invokevirtual.
*/
if (shouldPrint)
lister.noteBytecode(biStart, "invokeinterface", methodRef);
// causes runtime checks that can be interrupted
if (VM.VerifyUnint && !isInterruptible)
forbiddenBytecode("invokeinterface ", methodRef, bcodes.index());
emit_invokeinterface(methodRef);
} else {
if (methodRef.needsDynamicLink(method)) {
// cause interruptions
if (VM.VerifyUnint && !isInterruptible)
forbiddenBytecode("unresolved invokevirtual ", methodRef, bcodes.index());
emit_unresolved_invokevirtual(methodRef);
} else {
if (VM.VerifyUnint && !isInterruptible)
checkTarget(methodRef.peekResolvedMethod(), bcodes.index());
emit_resolved_invokevirtual(methodRef);
}
}
if (xx != null) {
// XX:
xx.resolve(asm);
}
break;
}
case JBC_invokespecial:
{
ForwardReference xx = null;
if (biStart == this.pendingIdx) {
// goto X
ForwardReference x = emit_pending_goto(0);
// pendingIdx: (target of pending goto in prologue)
this.pendingRef.resolve(asm);
CompiledMethod cm = CompiledMethods.getCompiledMethod(this.pendingCMID);
if (VM.VerifyAssertions)
VM._assert(cm.isSpecialForOSR());
// invoke_cmid
emit_invoke_compiledmethod(cm);
// goto XX
xx = emit_pending_goto(0);
// X:
x.resolve(asm);
}
MethodReference methodRef = bcodes.getMethodReference();
if (shouldPrint)
lister.noteBytecode(biStart, "invokespecial", methodRef);
RVMMethod target = methodRef.resolveInvokeSpecial();
if (target != null) {
if (VM.VerifyUnint && !isInterruptible)
checkTarget(target, bcodes.index());
emit_resolved_invokespecial(methodRef, target);
} else {
emit_unresolved_invokespecial(methodRef);
}
if (xx != null) {
// XX:
xx.resolve(asm);
}
break;
}
case JBC_invokestatic:
{
ForwardReference xx = null;
if (biStart == this.pendingIdx) {
// goto X
ForwardReference x = emit_pending_goto(0);
// pendingIdx: (target of pending goto in prologue)
this.pendingRef.resolve(asm);
CompiledMethod cm = CompiledMethods.getCompiledMethod(this.pendingCMID);
if (VM.VerifyAssertions)
VM._assert(cm.isSpecialForOSR());
// invoke_cmid
emit_invoke_compiledmethod(cm);
// goto XX
xx = emit_pending_goto(0);
// X:
x.resolve(asm);
}
MethodReference methodRef = bcodes.getMethodReference();
if (shouldPrint)
lister.noteBytecode(biStart, "invokestatic", methodRef);
if (methodRef.isMagic()) {
if (emit_Magic(methodRef)) {
break;
}
}
if (methodRef.needsDynamicLink(method)) {
// cause interruptions
if (VM.VerifyUnint && !isInterruptible)
forbiddenBytecode("unresolved invokestatic ", methodRef, bcodes.index());
emit_unresolved_invokestatic(methodRef);
} else {
if (VM.VerifyUnint && !isInterruptible)
checkTarget(methodRef.peekResolvedMethod(), bcodes.index());
emit_resolved_invokestatic(methodRef);
}
if (xx != null) {
// XX:
xx.resolve(asm);
}
break;
}
case JBC_invokeinterface:
{
ForwardReference xx = null;
if (biStart == this.pendingIdx) {
// goto X
ForwardReference x = emit_pending_goto(0);
// pendingIdx: (target of pending goto in prologue)
this.pendingRef.resolve(asm);
CompiledMethod cm = CompiledMethods.getCompiledMethod(this.pendingCMID);
if (VM.VerifyAssertions)
VM._assert(cm.isSpecialForOSR());
// invoke_cmid
emit_invoke_compiledmethod(cm);
// goto XX
xx = emit_pending_goto(0);
// X:
x.resolve(asm);
}
MethodReference methodRef = bcodes.getMethodReference();
bcodes.alignInvokeInterface();
if (shouldPrint)
lister.noteBytecode(biStart, "invokeinterface", methodRef);
// causes runtime checks that can be interrupted
if (VM.VerifyUnint && !isInterruptible)
forbiddenBytecode("invokeinterface ", methodRef, bcodes.index());
emit_invokeinterface(methodRef);
if (xx != null) {
// XX:
xx.resolve(asm);
}
break;
}
case JBC_invokedynamic:
{
if (shouldPrint)
lister.noteBytecode(biStart, "unused");
if (VM.VerifyAssertions)
VM._assert(VM.NOT_REACHED);
break;
}
case JBC_new:
{
TypeReference typeRef = bcodes.getTypeReference();
if (shouldPrint)
lister.noteBytecode(biStart, "new", typeRef);
// that are interruptible
if (VM.VerifyUnint && isUninterruptible)
forbiddenBytecode("new ", typeRef, bcodes.index());
RVMType type = typeRef.peekType();
if (type != null && (type.isInitialized() || type.isInBootImage())) {
emit_resolved_new(type.asClass());
} else {
if (VM.VerifyUnint && isUnpreemptible)
forbiddenBytecode("unresolved new ", typeRef, bcodes.index());
emit_unresolved_new(typeRef);
}
break;
}
case JBC_newarray:
{
int atype = bcodes.getArrayElementType();
RVMArray array = RVMArray.getPrimitiveArrayType(atype);
if (VM.VerifyAssertions) {
boolean resolved = array.isResolved();
if (!resolved) {
String msg = "Found reference to unresolved array type " + array + " while compiling newarray bytecode in method " + method;
VM._assert(VM.NOT_REACHED, msg);
}
}
// that are interruptible
if (shouldPrint)
lister.noteBytecode(biStart, "newarray", array.getTypeRef());
if (VM.VerifyUnint && isUninterruptible)
forbiddenBytecode("newarray ", array, bcodes.index());
emit_resolved_newarray(array);
break;
}
case JBC_anewarray:
{
TypeReference elementTypeRef = bcodes.getTypeReference();
TypeReference arrayRef = elementTypeRef.getArrayTypeForElementType();
if (shouldPrint)
lister.noteBytecode(biStart, "anewarray new", arrayRef);
// that are interruptible
if (VM.VerifyUnint && !isInterruptible)
forbiddenBytecode("anewarray ", arrayRef, bcodes.index());
if (VM.VerifyAssertions && elementTypeRef.isUnboxedType()) {
String msg = "During compilation of " + method + " found an anewarray of " + elementTypeRef + "\n" + "You must use the 'create' function to create an array of this type";
VM._assert(VM.NOT_REACHED, msg);
}
RVMArray array = (RVMArray) arrayRef.peekType();
if (RVMType.JavaLangObjectType.isInstantiated()) {
// We need Object to be instantiated because we are going to mine it's TIB to get entries for array methods...
if (array == null || !(array.isInitialized() || array.isInBootImage())) {
RVMType elementType = elementTypeRef.peekType();
if (elementType != null && (elementType.isInitialized() || elementType.isInBootImage())) {
if (array == null) {
array = (RVMArray) arrayRef.resolve();
}
array.resolve();
array.instantiate();
}
}
}
if (array != null && (array.isInitialized() || array.isInBootImage())) {
emit_resolved_newarray(array);
} else {
emit_unresolved_newarray(arrayRef);
}
break;
}
case JBC_arraylength:
{
if (shouldPrint)
lister.noteBytecode(biStart, "arraylength");
emit_arraylength();
break;
}
case JBC_athrow:
{
if (shouldPrint)
lister.noteBytecode(biStart, "athrow");
if (VM.UseEpilogueYieldPoints)
emit_threadSwitchTest(RVMThread.EPILOGUE);
// that are interruptible
if (VM.VerifyUnint && isUninterruptible)
forbiddenBytecode("athrow", bcodes.index());
emit_athrow();
break;
}
case JBC_checkcast:
{
TypeReference typeRef = bcodes.getTypeReference();
if (shouldPrint)
lister.noteBytecode(biStart, "checkcast", typeRef);
RVMType type = typeRef.peekType();
if (type != null) {
if (type.isClassType()) {
RVMClass cType = type.asClass();
if (cType.isFinal()) {
emit_checkcast_final(cType);
break;
} else if (cType.isResolved()) {
if (cType.isInterface()) {
emit_checkcast_resolvedInterface(cType);
} else {
emit_checkcast_resolvedClass(cType);
}
break;
}
// else fall through to emit_checkcast
} else if (type.isArrayType()) {
RVMType elemType = type.asArray().getElementType();
if (elemType.isPrimitiveType() || elemType.isUnboxedType() || (elemType.isClassType() && elemType.asClass().isFinal())) {
emit_checkcast_final(type);
break;
}
// else fall through to emit_checkcast
} else {
// checkcast to a primitive. Must be a word type.
if (VM.VerifyAssertions)
VM._assert(type.getTypeRef().isUnboxedType());
break;
}
}
// that executes via interruptible code
if (VM.VerifyUnint && !isInterruptible)
forbiddenBytecode("checkcast ", typeRef, bcodes.index());
emit_checkcast(typeRef);
break;
}
case JBC_instanceof:
{
TypeReference typeRef = bcodes.getTypeReference();
if (shouldPrint)
lister.noteBytecode(biStart, "instanceof", typeRef);
RVMType type = typeRef.peekType();
if (type != null) {
if (type.isClassType()) {
RVMClass cType = type.asClass();
if (cType.isFinal()) {
emit_instanceof_final(type);
break;
} else if (cType.isResolved()) {
if (cType.isInterface()) {
emit_instanceof_resolvedInterface(cType);
} else {
emit_instanceof_resolvedClass(cType);
}
break;
}
} else if (type.isArrayType()) {
RVMType elemType = type.asArray().getElementType();
if (elemType.isPrimitiveType() || elemType.isUnboxedType() || (elemType.isClassType() && elemType.asClass().isFinal())) {
emit_instanceof_final(type);
break;
}
}
}
// for its implementation
if (VM.VerifyUnint && !isInterruptible)
forbiddenBytecode("instanceof ", typeRef, bcodes.index());
emit_instanceof(typeRef);
break;
}
case JBC_monitorenter:
{
if (shouldPrint)
lister.noteBytecode(biStart, "monitorenter");
// for its implementation
if (VM.VerifyUnint && isUninterruptible)
forbiddenBytecode("monitorenter", bcodes.index());
emit_monitorenter();
break;
}
case JBC_monitorexit:
{
if (shouldPrint)
lister.noteBytecode(biStart, "monitorexit");
// for its implementation
if (VM.VerifyUnint && isUninterruptible)
forbiddenBytecode("monitorexit", bcodes.index());
emit_monitorexit();
break;
}
case JBC_wide:
{
int widecode = bcodes.getWideOpcode();
int index = bcodes.getWideLocalNumber();
switch(widecode) {
case JBC_iload:
{
if (shouldPrint)
lister.noteBytecode(biStart, "wide iload", index);
emit_iload(index);
break;
}
case JBC_lload:
{
if (shouldPrint)
lister.noteBytecode(biStart, "wide lload", index);
emit_lload(index);
break;
}
case JBC_fload:
{
if (shouldPrint)
lister.noteBytecode(biStart, "wide fload", index);
emit_fload(index);
break;
}
case JBC_dload:
{
if (shouldPrint)
lister.noteBytecode(biStart, "wide dload", index);
emit_dload(index);
break;
}
case JBC_aload:
{
if (shouldPrint)
lister.noteBytecode(biStart, "wide aload", index);
emit_aload(index);
break;
}
case JBC_istore:
{
if (shouldPrint)
lister.noteBytecode(biStart, "wide istore", index);
emit_istore(index);
break;
}
case JBC_lstore:
{
if (shouldPrint)
lister.noteBytecode(biStart, "wide lstore", index);
emit_lstore(index);
break;
}
case JBC_fstore:
{
if (shouldPrint)
lister.noteBytecode(biStart, "wide fstore", index);
emit_fstore(index);
break;
}
case JBC_dstore:
{
if (shouldPrint)
lister.noteBytecode(biStart, "wide dstore", index);
emit_dstore(index);
break;
}
case JBC_astore:
{
if (shouldPrint)
lister.noteBytecode(biStart, "wide astore", index);
emit_astore(index);
break;
}
case JBC_iinc:
{
int val = bcodes.getWideIncrement();
if (shouldPrint)
lister.noteBytecode(biStart, "wide inc", index, val);
emit_iinc(index, val);
break;
}
case JBC_ret:
{
if (shouldPrint)
lister.noteBytecode(biStart, "wide ret", index);
emit_ret(index);
break;
}
default:
if (VM.VerifyAssertions)
VM._assert(VM.NOT_REACHED);
}
break;
}
case JBC_multianewarray:
{
TypeReference typeRef = bcodes.getTypeReference();
int dimensions = bcodes.getArrayDimension();
if (shouldPrint)
lister.noteBytecode(biStart, "multianewarray", typeRef);
// that are interruptible
if (VM.VerifyUnint && !isInterruptible)
forbiddenBytecode("multianewarray", bcodes.index());
emit_multianewarray(typeRef, dimensions);
break;
}
case JBC_ifnull:
{
int offset = bcodes.getBranchOffset();
int bTarget = biStart + offset;
if (shouldPrint)
lister.noteBranchBytecode(biStart, "ifnull", offset, bTarget);
if (offset <= 0)
emit_threadSwitchTest(RVMThread.BACKEDGE);
emit_ifnull(bTarget);
break;
}
case JBC_ifnonnull:
{
int offset = bcodes.getBranchOffset();
int bTarget = biStart + offset;
if (shouldPrint)
lister.noteBranchBytecode(biStart, "ifnonnull", offset, bTarget);
if (offset <= 0)
emit_threadSwitchTest(RVMThread.BACKEDGE);
emit_ifnonnull(bTarget);
break;
}
case JBC_goto_w:
{
int offset = bcodes.getWideBranchOffset();
int bTarget = biStart + offset;
if (shouldPrint)
lister.noteBranchBytecode(biStart, "goto_w", offset, bTarget);
if (offset <= 0)
emit_threadSwitchTest(RVMThread.BACKEDGE);
emit_goto(bTarget);
break;
}
case JBC_jsr_w:
{
int offset = bcodes.getWideBranchOffset();
int bTarget = biStart + offset;
if (shouldPrint)
lister.noteBranchBytecode(biStart, "jsr_w", offset, bTarget);
emit_jsr(bTarget);
break;
}
/* CAUTION: cannot use JBC_impdep1, which is 0xfffffffe (signed),
* this is not consistent with OPT compiler.
*/
case JBC_impdep1:
/* --- pseudo bytecode --- */
{
if (VM.BuildForAdaptiveSystem) {
int pseudo_opcode = bcodes.nextPseudoInstruction();
// pseudo instruction
switch(pseudo_opcode) {
case org.jikesrvm.osr.OSRConstants.PSEUDO_LoadIntConst:
{
int value = bcodes.readIntConst();
if (shouldPrint)
lister.noteBytecode(biStart, "pseudo_load_int", value);
Offset offset = Offset.fromIntSignExtend(Statics.findOrCreateIntSizeLiteral(value));
emit_ldc(offset, CP_INT);
break;
}
case org.jikesrvm.osr.OSRConstants.PSEUDO_LoadLongConst:
{
// fetch8BytesUnsigned();
long value = bcodes.readLongConst();
if (shouldPrint)
lister.noteBytecode(biStart, "pseudo_load_long", value);
Offset offset = Offset.fromIntSignExtend(Statics.findOrCreateLongSizeLiteral(value));
emit_ldc2(offset, CP_LONG);
break;
}
case org.jikesrvm.osr.OSRConstants.PSEUDO_LoadWordConst:
{
if (VM.BuildFor32Addr) {
int value = bcodes.readIntConst();
if (shouldPrint)
lister.noteBytecode(biStart, "pseudo_load_word " + Integer.toHexString(value));
Offset offset = Offset.fromIntSignExtend(Statics.findOrCreateIntSizeLiteral(value));
emit_ldc(offset, CP_INT);
} else {
long value = bcodes.readLongConst();
if (shouldPrint)
lister.noteBytecode(biStart, "pseudo_load_word " + Long.toHexString(value));
Offset offset = Offset.fromIntSignExtend(Statics.findOrCreateLongSizeLiteral(value));
emit_ldc2(offset, CP_LONG);
// dirty hack
emit_l2i();
}
break;
}
case org.jikesrvm.osr.OSRConstants.PSEUDO_LoadFloatConst:
{
// fetch4BytesSigned();
int ibits = bcodes.readIntConst();
if (shouldPrint)
lister.noteBytecode(biStart, "pseudo_load_float", ibits);
Offset offset = Offset.fromIntSignExtend(Statics.findOrCreateIntSizeLiteral(ibits));
emit_ldc(offset, CP_FLOAT);
break;
}
case org.jikesrvm.osr.OSRConstants.PSEUDO_LoadDoubleConst:
{
// fetch8BytesUnsigned();
long lbits = bcodes.readLongConst();
if (shouldPrint)
lister.noteBytecode(biStart, "pseudo_load_double", lbits);
Offset offset = Offset.fromIntSignExtend(Statics.findOrCreateLongSizeLiteral(lbits));
emit_ldc2(offset, CP_DOUBLE);
break;
}
case org.jikesrvm.osr.OSRConstants.PSEUDO_LoadRetAddrConst:
{
// fetch4BytesSigned();
int bcIndex = bcodes.readIntConst();
if (shouldPrint)
lister.noteBytecode(biStart, "pseudo_load_retaddr", bcIndex);
// for bytecode to get future bytecode's address
// we register it and patch it later.
emit_loadretaddrconst(bcIndex);
break;
}
case org.jikesrvm.osr.OSRConstants.PSEUDO_InvokeStatic:
{
// fetch4BytesSigned();
int targetidx = bcodes.readIntConst();
RVMMethod methodRef = InvokeStatic.targetMethod(targetidx);
if (shouldPrint)
lister.noteBytecode(biStart, "pseudo_invokestatic", methodRef);
emit_resolved_invokestatic(methodRef.getMemberRef().asMethodReference());
break;
}
/*
case org.jikesrvm.osr.OSRConstants.PSEUDO_CheckCast: {
if (shouldPrint) lister.noteBytecode(biStart, "pseudo_checkcast");
// fetch 4 byte type id
int tid = bcodes.readIntConst(); // fetch4BytesSigned();
// do nothing now
break;
}
*/
case org.jikesrvm.osr.OSRConstants.PSEUDO_InvokeCompiledMethod:
{
// fetch4BytesSigned(); // callee's cmid
int cmid = bcodes.readIntConst();
int origIdx = // fetch4BytesSigned(); // orginal bytecode index of this call (for build gc map)
bcodes.readIntConst();
if (shouldPrint)
lister.noteBytecode(biStart, "pseudo_invoke_cmid", cmid);
this.pendingCMID = cmid;
this.pendingIdx = origIdx + this.method.getOsrPrologueLength();
this.pendingRef = emit_pending_goto(this.pendingIdx);
/*
CompiledMethod cm = CompiledMethods.getCompiledMethod(cmid);
if (VM.VerifyAssertions) VM._assert(cm.isSpecialForOSR());
emit_invoke_compiledmethod(cm);
*/
break;
}
case org.jikesrvm.osr.OSRConstants.PSEUDO_ParamInitEnd:
{
if (shouldPrint)
lister.noteBytecode(biStart, "pseudo_paraminitend");
// now we can inserted stack overflow check,
emit_deferred_prologue();
break;
}
default:
if (VM.TraceOnStackReplacement) {
VM.sysWriteln("Unexpected PSEUDO code " + Services.intAsHexString(pseudo_opcode));
}
if (VM.VerifyAssertions)
VM._assert(VM.NOT_REACHED);
break;
}
} else {
if (VM.VerifyAssertions)
VM._assert(VM.NOT_REACHED);
}
break;
}
default:
VM.sysWriteln("BaselineCompilerImpl: unexpected bytecode: " + Services.getHexString(code, false));
if (VM.VerifyAssertions)
VM._assert(VM.NOT_REACHED);
}
ending_bytecode();
}
bytecodeMap[bcodes.length()] = asm.getMachineCodeIndex();
ending_method();
return new MachineCode(getAssembler().getMachineCodes(), bytecodeMap);
}
Aggregations