Search in sources :

Example 1 with Assembler

use of org.jikesrvm.compilers.common.assembler.ppc.Assembler in project JikesRVM by JikesRVM.

the class JNICompiler method genSVR4ParameterPassingCode.

/**
 * Generates instructions to copy parameters from RVM convention to OS convention.
 * @param asm         The {@link Assembler} object
 * @param types       The parameter types
 * @param nextVMArgReg   The first parameter GPR in RVM convention,
 *                      the last parameter GPR is defined as LAST_VOLATILE_GPR.
 * @param nextVMArgFloatReg The first parameter FPR in RVM convention,
 *                           the last parameter FPR is defined as LAST_VOLATILE_FPR.
 * @param spillOffsetVM  The spill offset (related to FP) in RVM convention
 * @param nextOSArgReg  the first parameter GPR in OS convention,
 *                      the last parameter GPR is defined as LAST_OS_PARAMETER_GPR.
 * @param nextOSArgFloatReg  The first parameter FPR in OS convention,
 *                           the last parameter FPR is defined as LAST_OS_PARAMETER_FPR.
 * @param spillOffsetOS  The spill offset (related to FP) in OS convention
 */
private static void genSVR4ParameterPassingCode(Assembler asm, TypeReference[] types, int nextVMArgReg, int nextVMArgFloatReg, int spillOffsetVM, int nextOSArgReg, int nextOSArgFloatReg, int spillOffsetOS) {
    if (VM.BuildForSVR4ABI) {
        // create one Assembler object for each argument
        // This is needed for the following reason:
        // -2 new arguments are added in front for native methods, so the normal arguments
        // need to be shifted down in addition to being moved
        // -to avoid overwriting each other, the arguments must be copied in reverse order
        // -the analysis for mapping however must be done in forward order
        // -the moving/mapping for each argument may involve a sequence of 1-3 instructions
        // which must be kept in the normal order
        // To solve this problem, the instructions for each argument is generated in its
        // own Assembler in the forward pass, then in the reverse pass, each Assembler
        // emist the instruction sequence and copies it into the main Assembler
        int numArguments = types.length;
        Assembler[] asmForArgs = new Assembler[numArguments];
        for (int arg = 0; arg < numArguments; arg++) {
            asmForArgs[arg] = new Assembler(0);
            Assembler asmArg = asmForArgs[arg];
            // 
            if (types[arg].isFloatingPointType()) {
                boolean is32bits = types[arg].isFloatType();
                // 1. check the source, the value will be in srcVMArg
                // scratch fpr
                FPR srcVMArg;
                if (nextVMArgFloatReg <= LAST_VOLATILE_FPR.value()) {
                    srcVMArg = FPR.lookup(nextVMArgFloatReg);
                    nextVMArgFloatReg++;
                } else {
                    srcVMArg = FIRST_SCRATCH_FPR;
                    // VM float reg is in spill area
                    if (is32bits) {
                        spillOffsetVM += BYTES_IN_STACKSLOT;
                        asmArg.emitLFS(srcVMArg, spillOffsetVM - BYTES_IN_FLOAT, FP);
                    } else {
                        asmArg.emitLFD(srcVMArg, spillOffsetVM, FP);
                        spillOffsetVM += BYTES_IN_DOUBLE;
                    }
                }
                // 2. check the destination,
                if (nextOSArgFloatReg <= LAST_OS_PARAMETER_FPR.value()) {
                    // leave it there
                    nextOSArgFloatReg++;
                } else {
                    if (VM.BuildForSVR4ABI) {
                        if (is32bits) {
                            asmArg.emitSTFS(srcVMArg, spillOffsetOS, FP);
                            spillOffsetOS += BYTES_IN_ADDRESS;
                        } else {
                            // spill it, round the spill address to 8
                            // assuming FP is aligned to 8
                            spillOffsetOS = (spillOffsetOS + 7) & -8;
                            asmArg.emitSTFD(srcVMArg, spillOffsetOS, FP);
                            spillOffsetOS += BYTES_IN_DOUBLE;
                        }
                    }
                }
            // for 64-bit long arguments
            } else if (types[arg].isLongType() && VM.BuildFor32Addr) {
                // handle OS first
                boolean dstSpilling;
                // it is register number or spilling offset
                int regOrSpilling = -1;
                // 1. check if Linux register > 9
                if (nextOSArgReg > (LAST_OS_PARAMETER_GPR.value() - 1)) {
                    // goes to spilling area
                    dstSpilling = true;
                    if (VM.BuildForSVR4ABI) {
                        /* NOTE: following adjustment is not stated in SVR4 ABI, but
               * was implemented in GCC.
               * -- Feng
               */
                        nextOSArgReg = LAST_OS_PARAMETER_GPR.value() + 1;
                        // do alignment and compute spilling offset
                        spillOffsetOS = (spillOffsetOS + 7) & -8;
                        regOrSpilling = spillOffsetOS;
                        spillOffsetOS += BYTES_IN_LONG;
                    }
                } else {
                    // use registers
                    dstSpilling = false;
                    if (VM.BuildForSVR4ABI) {
                        // rounds to odd
                        // if gpr is even, gpr += 1
                        nextOSArgReg += (nextOSArgReg + 1) & 0x01;
                        regOrSpilling = nextOSArgReg;
                        nextOSArgReg += 2;
                    }
                }
                // handle RVM source
                if (nextVMArgReg < LAST_VOLATILE_GPR.value()) {
                    // both parts in registers
                    if (dstSpilling) {
                        asmArg.emitSTW(GPR.lookup(nextVMArgReg + 1), regOrSpilling + 4, FP);
                        if (VM.BuildForSVR4ABI) {
                            asmArg.emitSTW(GPR.lookup(nextVMArgReg), regOrSpilling, FP);
                        }
                    } else {
                        asmArg.emitMR(GPR.lookup(regOrSpilling + 1), GPR.lookup(nextVMArgReg + 1));
                        asmArg.emitMR(GPR.lookup(regOrSpilling), GPR.lookup(nextVMArgReg));
                    }
                    // advance register counting, Linux register number
                    // already advanced
                    nextVMArgReg += 2;
                } else if (nextVMArgReg == LAST_VOLATILE_GPR.value()) {
                    // VM striding
                    if (dstSpilling) {
                        asmArg.emitLWZ(REGISTER_ZERO, spillOffsetVM, FP);
                        asmArg.emitSTW(REGISTER_ZERO, regOrSpilling + 4, FP);
                        asmArg.emitSTW(GPR.lookup(nextVMArgReg), regOrSpilling, FP);
                    } else {
                        asmArg.emitLWZ(GPR.lookup(regOrSpilling + 1), spillOffsetVM, FP);
                        asmArg.emitMR(GPR.lookup(regOrSpilling), GPR.lookup(nextVMArgReg));
                    }
                    // advance spillOffsetVM and nextVMArgReg
                    nextVMArgReg++;
                    spillOffsetVM += BYTES_IN_STACKSLOT;
                } else if (nextVMArgReg > LAST_VOLATILE_GPR.value()) {
                    if (dstSpilling) {
                        asmArg.emitLFD(FIRST_SCRATCH_FPR, spillOffsetVM, FP);
                        asmArg.emitSTFD(FIRST_SCRATCH_FPR, regOrSpilling, FP);
                    } else {
                        // this shouldnot happen, VM spills, OS has registers
                        asmArg.emitLWZ(GPR.lookup(regOrSpilling + 1), spillOffsetVM + 4, FP);
                        asmArg.emitLWZ(GPR.lookup(regOrSpilling), spillOffsetVM, FP);
                    }
                    spillOffsetVM += BYTES_IN_LONG;
                }
            } else if (types[arg].isLongType() && VM.BuildFor64Addr) {
                // handle OS first
                boolean dstSpilling;
                // it is register number or spilling offset
                int regOrSpilling = -1;
                // 1. check if Linux register > 9
                if (nextOSArgReg > LAST_OS_PARAMETER_GPR.value()) {
                    // goes to spilling area
                    dstSpilling = true;
                    /* NOTE: following adjustment is not stated in SVR4 ABI, but
             * was implemented in GCC.
             * -- Feng
             */
                    nextOSArgReg = LAST_OS_PARAMETER_GPR.value() + 1;
                    // do alignment and compute spilling offset
                    spillOffsetOS = (spillOffsetOS + 7) & -8;
                    regOrSpilling = spillOffsetOS;
                    spillOffsetOS += BYTES_IN_LONG;
                } else {
                    // use registers
                    dstSpilling = false;
                    // rounds to odd
                    regOrSpilling = nextOSArgReg;
                    nextOSArgReg += 1;
                }
                // handle RVM source
                if (nextVMArgReg <= LAST_VOLATILE_GPR.value()) {
                    // both parts in registers
                    if (dstSpilling) {
                        asmArg.emitSTD(GPR.lookup(nextVMArgReg), regOrSpilling, FP);
                    } else {
                        asmArg.emitMR(GPR.lookup(regOrSpilling), GPR.lookup(nextVMArgReg));
                    }
                    // advance register counting, Linux register number
                    // already advanced
                    nextVMArgReg += 1;
                } else if (nextVMArgReg > LAST_VOLATILE_GPR.value()) {
                    if (dstSpilling) {
                        asmArg.emitLFD(FIRST_SCRATCH_FPR, spillOffsetVM, FP);
                        asmArg.emitSTFD(FIRST_SCRATCH_FPR, regOrSpilling, FP);
                    } else {
                        // this shouldnot happen, VM spills, OS has registers;
                        asmArg.emitLD(GPR.lookup(regOrSpilling), spillOffsetVM, FP);
                    }
                    spillOffsetVM += BYTES_IN_LONG;
                }
            } else if (types[arg].isReferenceType()) {
                // For reference type, replace with handles before passing to native
                GPR srcreg;
                if (nextVMArgReg <= LAST_VOLATILE_GPR.value()) {
                    srcreg = GPR.lookup(nextVMArgReg++);
                } else {
                    srcreg = REGISTER_ZERO;
                    asmArg.emitLAddr(srcreg, spillOffsetVM, FP);
                    spillOffsetVM += BYTES_IN_ADDRESS;
                }
                // Are we passing NULL?
                asmArg.emitCMPI(srcreg, 0);
                ForwardReference isNull = asmArg.emitForwardBC(EQ);
                // NO: put it in the JNIRefs array and pass offset
                asmArg.emitSTAddrU(srcreg, BYTES_IN_ADDRESS, KLUDGE_TI_REG);
                if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value()) {
                    asmArg.emitSUBFC(GPR.lookup(nextOSArgReg), THREAD_REGISTER, KLUDGE_TI_REG);
                } else {
                    asmArg.emitSUBFC(REGISTER_ZERO, THREAD_REGISTER, KLUDGE_TI_REG);
                    asmArg.emitSTAddr(REGISTER_ZERO, spillOffsetOS, FP);
                }
                ForwardReference done = asmArg.emitForwardB();
                // YES: pass NULL (0)
                isNull.resolve(asmArg);
                if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value()) {
                    asmArg.emitLVAL(GPR.lookup(nextOSArgReg), 0);
                } else {
                    asmArg.emitSTAddr(srcreg, spillOffsetOS, FP);
                }
                // JOIN PATHS
                done.resolve(asmArg);
                if (VM.BuildForSVR4ABI) {
                    if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value()) {
                        nextOSArgReg++;
                    } else {
                        spillOffsetOS += BYTES_IN_ADDRESS;
                    }
                }
            } else {
                // (1a) fit in OS register, move the register
                if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value()) {
                    if (VM.BuildForSVR4ABI) {
                        asmArg.emitMR(GPR.lookup(nextOSArgReg++), GPR.lookup(nextVMArgReg++));
                    } else {
                        asmArg.emitMR(GPR.lookup(nextOSArgReg), GPR.lookup(nextVMArgReg++));
                    }
                } else if (nextVMArgReg <= LAST_VOLATILE_GPR.value()) {
                    // (1b) spill OS register, but still fit in VM register
                    asmArg.emitSTAddr(GPR.lookup(nextVMArgReg++), spillOffsetOS, FP);
                    if (VM.BuildForSVR4ABI) {
                        spillOffsetOS += BYTES_IN_ADDRESS;
                    }
                } else {
                    // (1c) spill VM register
                    spillOffsetVM += BYTES_IN_STACKSLOT;
                    // retrieve arg from VM spill area
                    asmArg.emitLInt(REGISTER_ZERO, spillOffsetVM - BYTES_IN_INT, FP);
                    asmArg.emitSTAddr(REGISTER_ZERO, spillOffsetOS, FP);
                    if (VM.BuildForSVR4ABI) {
                        spillOffsetOS += BYTES_IN_ADDRESS;
                    }
                }
            }
        }
        // so that the move does not overwrite the parameters
        for (int arg = asmForArgs.length - 1; arg >= 0; arg--) {
            asm.appendInstructions(asmForArgs[arg].getMachineCodes());
        }
    }
}
Also used : ForwardReference(org.jikesrvm.compilers.common.assembler.ForwardReference) LAST_SCRATCH_GPR(org.jikesrvm.ppc.RegisterConstants.LAST_SCRATCH_GPR) LAST_NONVOLATILE_GPR(org.jikesrvm.ppc.RegisterConstants.LAST_NONVOLATILE_GPR) LAST_OS_PARAMETER_GPR(org.jikesrvm.ppc.RegisterConstants.LAST_OS_PARAMETER_GPR) LAST_RVM_RESERVED_NV_GPR(org.jikesrvm.ppc.RegisterConstants.LAST_RVM_RESERVED_NV_GPR) GPR(org.jikesrvm.ppc.RegisterConstants.GPR) FIRST_NONVOLATILE_GPR(org.jikesrvm.ppc.RegisterConstants.FIRST_NONVOLATILE_GPR) FIRST_OS_PARAMETER_GPR(org.jikesrvm.ppc.RegisterConstants.FIRST_OS_PARAMETER_GPR) FIRST_RVM_RESERVED_NV_GPR(org.jikesrvm.ppc.RegisterConstants.FIRST_RVM_RESERVED_NV_GPR) LAST_VOLATILE_GPR(org.jikesrvm.ppc.RegisterConstants.LAST_VOLATILE_GPR) FIRST_VOLATILE_GPR(org.jikesrvm.ppc.RegisterConstants.FIRST_VOLATILE_GPR) FIRST_SCRATCH_FPR(org.jikesrvm.ppc.RegisterConstants.FIRST_SCRATCH_FPR) FIRST_OS_PARAMETER_FPR(org.jikesrvm.ppc.RegisterConstants.FIRST_OS_PARAMETER_FPR) LAST_VOLATILE_FPR(org.jikesrvm.ppc.RegisterConstants.LAST_VOLATILE_FPR) FIRST_VOLATILE_FPR(org.jikesrvm.ppc.RegisterConstants.FIRST_VOLATILE_FPR) LAST_OS_PARAMETER_FPR(org.jikesrvm.ppc.RegisterConstants.LAST_OS_PARAMETER_FPR) FPR(org.jikesrvm.ppc.RegisterConstants.FPR) LAST_OS_VARARG_PARAMETER_FPR(org.jikesrvm.ppc.RegisterConstants.LAST_OS_VARARG_PARAMETER_FPR) Assembler(org.jikesrvm.compilers.common.assembler.ppc.Assembler)

Example 2 with Assembler

use of org.jikesrvm.compilers.common.assembler.ppc.Assembler in project JikesRVM by JikesRVM.

the class JNICompiler method compile.

/**
 * This method creates the stub to link native method.  It will be called
 * from the lazy linker the first time a native method is invoked.  The stub
 * generated will be patched by the lazy linker to link to the native method
 * for all future calls. <p>
 * <pre>
 * The stub performs the following tasks in the prologue:
 * <ol>
 *  <li>Allocate the glue frame
 *  <li>Save the TR and JTOC registers in the JNI Environment for reentering Java later
 *  <li>Shuffle the parameters in the registers to conform to the OS calling convention
 *  <li>Save the nonvolatile registers in a known space in the frame to be used
 *    for the GC stack map
 *  <li>Push a new JREF frame on the JNIRefs stack
 *  <li>Supply the first JNI argument:  the JNI environment pointer
 *  <li>Supply the second JNI argument:  class object if static, "this" if virtual
 * </ol>
 * <p>
 * The stub performs the following tasks in the epilogue:
 * <ol>
 *  <li>Restore TR and JTOC registers saved in JNI Environment
 *  <li>Restore the nonvolatile registers if GC has occurred
 *  <li>Pop the JREF frame off the JNIRefs stack
 *  <li>Check for pending exception and deliver to Java caller if present
 *  <li>Process the return value from native:  push onto caller's Java stack
 * </ol>
 * <p>
 * Within the stackframe, we have two frames.
 * The "main" frame exactly follows the OS native ABI and is therefore
 * different for each ABI.
 * The "mini-frame" is identical on all platforms and is stores RVM-specific fields.
 * The picture below shows the frames for 64-bit PowerPC ELF ABI.
 * <pre>
 *
 *   | fp       | <- native frame
 *   | cr       |
 *   | lr       |
 *   | resv     |
 *   | resv     |
 *   + toc      +
 *   |          |
 *   |          |
 *   |----------| <- Java to C glue frame using native calling conventions
 *   | fp       | saved fp of mini-frame
 *   | cr       |
 *   | lr       | native caller saves return address of native method here
 *   | resv     |
 *   | resv     |
 *   + toc      +
 *   |   0      | spill area (at least 8 words reserved)
 *   |   1      | (also used for saving volatile regs during calls in prolog)
 *   |   2      |
 *   |   3      |
 *   |   4      |
 *   |   5      |
 *   |   6      |
 *   |   7      |
 *   |  ...     |
 *   |----------| <- mini-frame for use by RVM stackwalkers
 *   |  fp      | saved fp of Java caller                 <- JNI_SAVE_AREA_OFFSET
 *   | mid      | cmid of native method
 *   | xxx (lr) | lr slot not used in mini frame
 *   |GC flag   | did GC happen while thread in native?   <- JNI_GC_FLAG_OFFSET
 *   |ENV       | JNIEnvironment                       <- JNI_ENV_OFFSET
 *   |RVM nonvol| save RVM nonvolatile GPRs for updating by GC stack mapper
 *   | ...      |
 *   |RVM nonvol|                                         <- JNI_RVM_NONVOLATILE_OFFSET
 *   |----------|
 *   |  fp      | <- Java caller frame
 *   | mid      |
 *   | xxx      |
 *   |          |
 *   |          |
 *   |          |
 *   |----------|
 *   |          |
 * </pre>
 * <p>
 * Runtime.unwindNativeStackFrame will return a pointer to the mini-frame
 * because none of our stack walkers need to do anything with the main frame.
 */
public static synchronized CompiledMethod compile(NativeMethod method) {
    JNICompiledMethod cm = (JNICompiledMethod) CompiledMethods.createCompiledMethod(method, CompiledMethod.JNI);
    int compiledMethodId = cm.getId();
    Assembler asm = new Assembler(0);
    int frameSize = getFrameSize(method);
    RVMClass klass = method.getDeclaringClass();
    // need 4 gp temps
    if (VM.VerifyAssertions)
        VM._assert(T3.value() <= LAST_VOLATILE_GPR.value());
    // need 4 fp temps
    if (VM.VerifyAssertions)
        VM._assert(F3.value() <= LAST_VOLATILE_FPR.value());
    if (VM.VerifyAssertions)
        VM._assert(S0.value() < S1.value() && // need 2 scratch
        S1.value() <= LAST_SCRATCH_GPR.value());
    Address nativeIP = method.getNativeIP();
    Address nativeTOC = method.getNativeTOC();
    // NOTE:  this must be done before the condition Thread.hasNativeStackFrame() become true
    // so that the first Java to C transition will be allowed to resize the stack
    // (currently, this is true when the JNIRefsTop index has been incremented from 0)
    // add at least 14 for C frame (header + spill)
    asm.emitNativeStackOverflowCheck(frameSize + 14);
    // save return address in caller frame
    asm.emitMFLR(REGISTER_ZERO);
    asm.emitSTAddr(REGISTER_ZERO, STACKFRAME_RETURN_ADDRESS_OFFSET.toInt(), FP);
    // buy mini frame
    asm.emitSTAddrU(FP, -JNI_SAVE_AREA_SIZE, FP);
    // store CMID for native method in mini-frame
    asm.emitLVAL(S0, compiledMethodId);
    asm.emitSTW(S0, STACKFRAME_METHOD_ID_OFFSET.toInt(), FP);
    // buy main frame, the total size equals to frameSize
    asm.emitSTAddrU(FP, -frameSize + JNI_SAVE_AREA_SIZE, FP);
    // establish S0 -> threads JNIEnv structure
    asm.emitLAddrOffset(S0, THREAD_REGISTER, Entrypoints.jniEnvField.getOffset());
    // save the TR register in the JNIEnvironment object for possible calls back into Java
    asm.emitSTAddrOffset(THREAD_REGISTER, S0, Entrypoints.JNIEnvSavedTRField.getOffset());
    // save the JNIEnvironment in the stack frame so we can use it to acquire the TR
    // when we return from native code.
    // save TR in frame
    asm.emitSTAddr(S0, frameSize - JNI_ENV_OFFSET, FP);
    // save mini-frame frame pointer in JNIEnv, JNITopJavaFP, which will be the frame
    // to start scanning this stack during GC, if top of stack is still executing in C
    asm.emitLAddr(THREAD_REGISTER, 0, FP);
    asm.emitSTAddrOffset(THREAD_REGISTER, S0, Entrypoints.JNITopJavaFPField.getOffset());
    // save the RVM nonvolatile GPRs, to be scanned by GC stack mapper
    for (int i = LAST_NONVOLATILE_GPR.value(), offset = JNI_RVM_NONVOLATILE_OFFSET; i >= FIRST_NONVOLATILE_GPR.value(); --i, offset += BYTES_IN_STACKSLOT) {
        asm.emitSTAddr(GPR.lookup(i), frameSize - offset, FP);
    }
    // clear the GC flag on entry to native code
    // use TR as scratch
    asm.emitLVAL(THREAD_REGISTER, 0);
    asm.emitSTW(THREAD_REGISTER, frameSize - JNI_GC_FLAG_OFFSET, FP);
    // generate the code to map the parameters to OS convention and add the
    // second parameter (either the "this" ptr or class if a static method).
    // The JNI Function ptr first parameter is set before making the call
    // by the out of line machine code we invoke below.
    // Opens a new frame in the JNIRefs table to register the references.
    // Assumes S0 set to JNIEnv, kills KLUDGE_TI_REG, S1 & THREAD_REGISTER
    // On return, S0 still contains JNIEnv
    storeParameters(asm, frameSize, method, klass);
    // 
    // Load required JNI function ptr into first parameter reg (GPR3/T0)
    // This pointer is an interior pointer to the JNIEnvironment which is
    // currently in S0.
    // 
    asm.emitADDI(T0, Entrypoints.JNIExternalFunctionsField.getOffset(), S0);
    // 
    // change the status of the thread to IN_JNI
    // 
    asm.emitLAddrOffset(THREAD_REGISTER, S0, Entrypoints.JNIEnvSavedTRField.getOffset());
    asm.emitLVALAddr(S1, Entrypoints.execStatusField.getOffset());
    // get status for thread
    asm.emitLWARX(S0, S1, THREAD_REGISTER);
    // we should be in java code?
    asm.emitCMPI(S0, RVMThread.IN_JAVA + (RVMThread.ALWAYS_LOCK_ON_STATE_TRANSITION ? 100 : 0));
    ForwardReference notInJava = asm.emitForwardBC(NE);
    // S0  <- new state value
    asm.emitLVAL(S0, RVMThread.IN_JNI);
    // attempt to change state to IN_JNI
    asm.emitSTWCXr(S0, S1, THREAD_REGISTER);
    // branch if success over slow path
    ForwardReference enteredJNIRef = asm.emitForwardBC(EQ);
    notInJava.resolve(asm);
    asm.emitLAddrOffset(S0, THREAD_REGISTER, Entrypoints.threadContextRegistersField.getOffset());
    asm.emitLAddrOffset(S1, JTOC, ArchEntrypoints.saveVolatilesInstructionsField.getOffset());
    asm.emitMTLR(S1);
    asm.emitBCLRL();
    // NOTE: THREAD_REGISTER should still have the thread
    // pointer, since up to this point we would have saved it but not
    // overwritten it.
    // call into our friendly slow path function.  note that this should
    // work because:
    // 1) we're not calling from C so we don't care what registers are
    // considered non-volatile in C
    // 2) all Java non-volatiles have been saved
    // 3) the only other registers we need - TR and S0 are taken care
    // of (see above)
    // 4) the prologue and epilogue will take care of the frame pointer
    // accordingly (it will just save it on the stack and then restore
    // it - so we don't even have to know what its value is here)
    // the only thing we have to make sure of is that MMTk ignores the
    // framePointer field in RVMThread and uses the one in the JNI
    // environment instead (see Collection.prepareMutator)...
    // T1 gets address of function
    asm.emitLAddrOffset(S1, JTOC, Entrypoints.enterJNIBlockedFromCallIntoNativeMethod.getOffset());
    asm.emitMTLR(S1);
    // call RVMThread.enterJNIBlocked
    asm.emitBCLRL();
    asm.emitLAddrOffset(S0, THREAD_REGISTER, Entrypoints.threadContextRegistersField.getOffset());
    asm.emitLAddrOffset(S1, JTOC, ArchEntrypoints.restoreVolatilesInstructionsField.getOffset());
    asm.emitMTLR(S1);
    asm.emitBCLRL();
    // come here when we're done
    enteredJNIRef.resolve(asm);
    // set the TOC and IP for branch to out_of_line code
    asm.emitLVALAddr(JTOC, nativeTOC);
    asm.emitLVALAddr(S1, nativeIP);
    // move native code address to CTR reg;
    // do this early so that S1 will be available as a scratch.
    asm.emitMTCTR(S1);
    // 
    // CALL NATIVE METHOD
    // 
    asm.emitBCCTRL();
    // if we have to call sysVirtualProcessorYield because we are locked in native.
    if (VM.BuildFor64Addr) {
        asm.emitSTD(T0, NATIVE_FRAME_HEADER_SIZE, FP);
    } else {
        asm.emitSTW(T0, NATIVE_FRAME_HEADER_SIZE, FP);
        asm.emitSTW(T1, NATIVE_FRAME_HEADER_SIZE + BYTES_IN_ADDRESS, FP);
    }
    // 
    // try to return thread status to IN_JAVA
    // 
    int label1 = asm.getMachineCodeIndex();
    // TODO: we can do this directly from FP because we know framesize at compiletime
    // (the same way we stored the JNI Env above)
    // get mini-frame
    asm.emitLAddr(S0, 0, FP);
    // get Java caller FP
    asm.emitLAddr(S0, 0, S0);
    // load JNIEnvironment into TR
    asm.emitLAddr(THREAD_REGISTER, -JNI_ENV_OFFSET, S0);
    // Restore JTOC and TR
    asm.emitLAddrOffset(JTOC, THREAD_REGISTER, Entrypoints.JNIEnvSavedJTOCField.getOffset());
    asm.emitLAddrOffset(THREAD_REGISTER, THREAD_REGISTER, Entrypoints.JNIEnvSavedTRField.getOffset());
    asm.emitLVALAddr(S1, Entrypoints.execStatusField.getOffset());
    // get status for processor
    asm.emitLWARX(S0, S1, THREAD_REGISTER);
    // are we IN_JNI code?
    asm.emitCMPI(S0, RVMThread.IN_JNI + (RVMThread.ALWAYS_LOCK_ON_STATE_TRANSITION ? 100 : 0));
    ForwardReference blocked = asm.emitForwardBC(NE);
    // S0  <- new state value
    asm.emitLVAL(S0, RVMThread.IN_JAVA);
    // attempt to change state to java
    asm.emitSTWCXr(S0, S1, THREAD_REGISTER);
    // branch over blocked call if state change successful
    ForwardReference fr = asm.emitForwardBC(EQ);
    blocked.resolve(asm);
    // if not IN_JNI call RVMThread.leaveJNIBlockedFromCallIntoNative
    // T1 gets address of function
    asm.emitLAddrOffset(T1, JTOC, Entrypoints.leaveJNIBlockedFromCallIntoNativeMethod.getOffset());
    asm.emitMTLR(T1);
    // call RVMThread.leaveJNIBlockedFromCallIntoNative
    asm.emitBCLRL();
    fr.resolve(asm);
    // check if GC has occurred, If GC did not occur, then
    // VM NON_VOLATILE regs were restored by OS and are valid.  If GC did occur
    // objects referenced by these restored regs may have moved, in this case we
    // restore the nonvolatile registers from our save area,
    // where any object references would have been relocated during GC.
    // use T2 as scratch (not needed any more on return from call)
    // 
    asm.emitLWZ(T2, frameSize - JNI_GC_FLAG_OFFSET, FP);
    asm.emitCMPI(T2, 0);
    ForwardReference fr1 = asm.emitForwardBC(EQ);
    for (int i = LAST_NONVOLATILE_GPR.value(), offset = JNI_RVM_NONVOLATILE_OFFSET; i >= FIRST_NONVOLATILE_GPR.value(); --i, offset += BYTES_IN_STACKSLOT) {
        asm.emitLAddr(GPR.lookup(i), frameSize - offset, FP);
    }
    fr1.resolve(asm);
    // reestablish S0 to hold pointer to JNIEnvironment
    asm.emitLAddrOffset(S0, THREAD_REGISTER, Entrypoints.jniEnvField.getOffset());
    // pop jrefs frame off the JNIRefs stack, "reopen" the previous top jref frame
    // use S1 as scratch, also use T2, T3 for scratch which are no longer needed
    // load base of JNIRefs array
    asm.emitLAddrOffset(S1, S0, Entrypoints.JNIRefsField.getOffset());
    asm.emitLIntOffset(T2, S0, // get saved offset for JNIRefs frame ptr previously pushed onto JNIRefs array
    Entrypoints.JNIRefsSavedFPField.getOffset());
    // compute offset for new TOP
    asm.emitADDI(T3, -BYTES_IN_STACKSLOT, T2);
    // store new offset for TOP into JNIEnv
    asm.emitSTWoffset(T3, S0, Entrypoints.JNIRefsTopField.getOffset());
    // retrieve the previous frame ptr
    asm.emitLIntX(T2, S1, T2);
    asm.emitSTWoffset(T2, S0, // store new offset for JNIRefs frame ptr into JNIEnv
    Entrypoints.JNIRefsSavedFPField.getOffset());
    // Restore the return value R3-R4 saved in the glue frame spill area before the migration
    if (VM.BuildFor64Addr) {
        asm.emitLD(T0, NATIVE_FRAME_HEADER_SIZE, FP);
    } else {
        asm.emitLWZ(T0, NATIVE_FRAME_HEADER_SIZE, FP);
        asm.emitLWZ(T1, NATIVE_FRAME_HEADER_SIZE + BYTES_IN_STACKSLOT, FP);
    }
    // if the the return type is a reference, the native C is returning a jref
    // which is a byte offset from the beginning of the threads JNIRefs stack/array
    // of the corresponding ref.  In this case, emit code to replace the returned
    // offset (in R3) with the ref from the JNIRefs array
    TypeReference returnType = method.getReturnType();
    if (returnType.isReferenceType()) {
        asm.emitCMPI(T0, 0);
        ForwardReference globalRef = asm.emitForwardBC(LT);
        // Local ref - load from JNIRefs
        // S1 is still the base of the JNIRefs array
        asm.emitLAddrX(T0, S1, T0);
        ForwardReference afterGlobalRef = asm.emitForwardB();
        // Deal with global references
        globalRef.resolve(asm);
        asm.emitLVAL(T3, JNIGlobalRefTable.STRONG_REF_BIT);
        asm.emitAND(T1, T0, T3);
        asm.emitLAddrOffset(T2, JTOC, Entrypoints.JNIGlobalRefsField.getOffset());
        asm.emitCMPI(T1, 0);
        ForwardReference weakGlobalRef = asm.emitForwardBC(EQ);
        // Strong global references
        asm.emitNEG(T0, T0);
        // convert index to offset
        asm.emitSLWI(T0, T0, LOG_BYTES_IN_ADDRESS);
        asm.emitLAddrX(T0, T2, T0);
        ForwardReference afterWeakGlobalRef = asm.emitForwardB();
        // Weak global references
        weakGlobalRef.resolve(asm);
        // STRONG_REF_BIT
        asm.emitOR(T0, T0, T3);
        asm.emitNEG(T0, T0);
        // convert index to offset
        asm.emitSLWI(T0, T0, LOG_BYTES_IN_ADDRESS);
        asm.emitLAddrX(T0, T2, T0);
        asm.emitLAddrOffset(T0, T0, Entrypoints.referenceReferentField.getOffset());
        afterWeakGlobalRef.resolve(asm);
        afterGlobalRef.resolve(asm);
    }
    // pop the whole stack frame (main & mini), restore the Java caller frame
    asm.emitADDI(FP, +frameSize, FP);
    // C return value is already where caller expected it (T0/T1 or F0)
    // So, just restore the return address to the link register.
    asm.emitLAddr(REGISTER_ZERO, STACKFRAME_RETURN_ADDRESS_OFFSET.toInt(), FP);
    // restore return address
    asm.emitMTLR(REGISTER_ZERO);
    // CHECK EXCEPTION AND BRANCH TO ATHROW CODE OR RETURN NORMALLY
    asm.emitLIntOffset(T2, S0, Entrypoints.JNIHasPendingExceptionField.getOffset());
    // get a zero value to compare
    asm.emitLVAL(T3, 0);
    asm.emitCMP(T2, T3);
    ForwardReference fr3 = asm.emitForwardBC(NE);
    // if no pending exception, proceed to return to caller
    asm.emitBCLR();
    fr3.resolve(asm);
    // T1 gets address of function
    asm.emitLAddrToc(T1, Entrypoints.jniThrowPendingException.getOffset());
    // point LR to the exception delivery code
    asm.emitMTCTR(T1);
    // then branch to the exception delivery code, does not return
    asm.emitBCCTR();
    cm.compileComplete(asm.getMachineCodes());
    return cm;
}
Also used : ForwardReference(org.jikesrvm.compilers.common.assembler.ForwardReference) Address(org.vmmagic.unboxed.Address) Assembler(org.jikesrvm.compilers.common.assembler.ppc.Assembler) TypeReference(org.jikesrvm.classloader.TypeReference) JNICompiledMethod(org.jikesrvm.jni.JNICompiledMethod) RVMClass(org.jikesrvm.classloader.RVMClass)

Example 3 with Assembler

use of org.jikesrvm.compilers.common.assembler.ppc.Assembler in project JikesRVM by JikesRVM.

the class Barriers method compilePutfieldBarrierImm.

// on entry java stack contains ...|target_ref|ref_to_store|
static void compilePutfieldBarrierImm(BaselineCompilerImpl comp, Offset fieldOffset, int locationMetadata) {
    Assembler asm = comp.asm;
    asm.emitLAddrToc(S0, Entrypoints.objectFieldWriteBarrierMethod.getOffset());
    asm.emitMTCTR(S0);
    // object base
    comp.peekAddr(T0, 1);
    asm.emitNullCheck(T0);
    // offset
    asm.emitLVALAddr(T2, fieldOffset);
    // value to store
    comp.peekAddr(T1, 0);
    asm.emitLVAL(T3, locationMetadata);
    // MemoryManager.putfieldWriteBarrier(T0,T1,T2,T3)
    asm.emitBCCTRL();
}
Also used : Assembler(org.jikesrvm.compilers.common.assembler.ppc.Assembler)

Example 4 with Assembler

use of org.jikesrvm.compilers.common.assembler.ppc.Assembler in project JikesRVM by JikesRVM.

the class Barriers method compilePutfieldBarrierDoubleImm.

// on entry java stack contains ...|target_ref|value_to_store|
static void compilePutfieldBarrierDoubleImm(BaselineCompilerImpl comp, Offset fieldOffset, int locationMetadata) {
    Assembler asm = comp.asm;
    asm.emitLAddrToc(S0, Entrypoints.doubleFieldWriteBarrierMethod.getOffset());
    asm.emitMTCTR(S0);
    // store target_ref in T0
    comp.peekAddr(T0, 2);
    asm.emitNullCheck(T0);
    // store value_to_store in F0
    comp.peekDouble(F0, 0);
    // store offset in T1
    asm.emitLVALAddr(T1, fieldOffset);
    // store locationMetaData in T2
    asm.emitLVAL(T2, locationMetadata);
    // call barrier with parameters in (T0,F0,T1,T2)
    asm.emitBCCTRL();
    // clean up stack
    comp.discardSlots(3);
}
Also used : Assembler(org.jikesrvm.compilers.common.assembler.ppc.Assembler)

Example 5 with Assembler

use of org.jikesrvm.compilers.common.assembler.ppc.Assembler in project JikesRVM by JikesRVM.

the class Barriers method compileGetstaticBarrier.

// on entry java stack contains ...|
// T0 already contains the offset of the field on entry
static void compileGetstaticBarrier(BaselineCompilerImpl comp, int locationMetadata) {
    Assembler asm = comp.asm;
    asm.emitLAddrToc(S0, Entrypoints.objectStaticReadBarrierMethod.getOffset());
    asm.emitMTCTR(S0);
    asm.emitLVAL(T1, locationMetadata);
    // MemoryManager.getstaticReadBarrier(T0,T1)
    asm.emitBCCTRL();
}
Also used : Assembler(org.jikesrvm.compilers.common.assembler.ppc.Assembler)

Aggregations

Assembler (org.jikesrvm.compilers.common.assembler.ppc.Assembler)30 Entrypoint (org.vmmagic.pragma.Entrypoint)6 ForwardReference (org.jikesrvm.compilers.common.assembler.ForwardReference)4 Address (org.vmmagic.unboxed.Address)2 Offset (org.vmmagic.unboxed.Offset)2 RVMClass (org.jikesrvm.classloader.RVMClass)1 TypeReference (org.jikesrvm.classloader.TypeReference)1 ArchBaselineCompiledMethod (org.jikesrvm.compilers.baseline.ppc.ArchBaselineCompiledMethod)1 CodeArray (org.jikesrvm.compilers.common.CodeArray)1 CompiledMethod (org.jikesrvm.compilers.common.CompiledMethod)1 OptCompiledMethod (org.jikesrvm.compilers.opt.runtimesupport.OptCompiledMethod)1 JNICompiledMethod (org.jikesrvm.jni.JNICompiledMethod)1 FIRST_NONVOLATILE_GPR (org.jikesrvm.ppc.RegisterConstants.FIRST_NONVOLATILE_GPR)1 FIRST_OS_PARAMETER_FPR (org.jikesrvm.ppc.RegisterConstants.FIRST_OS_PARAMETER_FPR)1 FIRST_OS_PARAMETER_GPR (org.jikesrvm.ppc.RegisterConstants.FIRST_OS_PARAMETER_GPR)1 FIRST_RVM_RESERVED_NV_GPR (org.jikesrvm.ppc.RegisterConstants.FIRST_RVM_RESERVED_NV_GPR)1 FIRST_SCRATCH_FPR (org.jikesrvm.ppc.RegisterConstants.FIRST_SCRATCH_FPR)1 FIRST_VOLATILE_FPR (org.jikesrvm.ppc.RegisterConstants.FIRST_VOLATILE_FPR)1 FIRST_VOLATILE_GPR (org.jikesrvm.ppc.RegisterConstants.FIRST_VOLATILE_GPR)1 FPR (org.jikesrvm.ppc.RegisterConstants.FPR)1