use of org.bytedeco.cuda.cudart.CUctx_st in project javacpp-presets by bytedeco.
the class SampleEncodeDecode method main.
public static void main(String[] args) {
// If you use NVIDIA GPU not '0', changing it.
int targetGpu = 0;
CUctx_st cuContext = new CUctx_st();
checkCudaApiCall("cuInit", cuInit(0));
checkCudaApiCall("cuCtxCreate", cuCtxCreate(cuContext, 0, targetGpu));
try {
// Check encoder max supported version
try (IntPointer version = new IntPointer(1)) {
checkEncodeApiCall("NvEncodeAPIGetMaxSupportedVersion", NvEncodeAPIGetMaxSupportedVersion(version));
System.out.printf("Encoder Max Supported Version\t : %d \r\n", version.get());
}
// Query decoder capability 'H.264' codec
try (CUVIDDECODECAPS decodeCaps = new CUVIDDECODECAPS()) {
decodeCaps.eCodecType(cudaVideoCodec_H264);
decodeCaps.eChromaFormat(cudaVideoChromaFormat_420);
decodeCaps.nBitDepthMinus8(0);
checkCudaApiCall("cuvidGetDecoderCaps", cuvidGetDecoderCaps(decodeCaps));
System.out.printf("Decoder Capability H.264 Codec\t : %s \r\n", (decodeCaps.bIsSupported() != 0));
}
} finally {
checkCudaApiCall("cuCtxDestroy", cuCtxDestroy(cuContext));
}
}
use of org.bytedeco.cuda.cudart.CUctx_st in project javacpp-presets by bytedeco.
the class AppEncCuda method showEncoderCapability.
public static void showEncoderCapability() {
StringBuilder sb = new StringBuilder();
try {
checkCudaApiCall(cuInit(0));
IntPointer gpuNum = new IntPointer();
checkCudaApiCall(cuDeviceGetCount(gpuNum));
sb.append("Encoder Capability \n\n");
for (int iGpu = 0; iGpu < gpuNum.get(); iGpu++) {
IntPointer cuDevice = new IntPointer();
checkCudaApiCall(cuDeviceGet(cuDevice, iGpu));
BytePointer szDeviceName = new BytePointer(80);
checkCudaApiCall(cuDeviceGetName(szDeviceName, szDeviceName.sizeof(), cuDevice.get()));
CUctx_st cuContext = new CUctx_st();
checkCudaApiCall(cuCtxCreate(cuContext, 0, cuDevice.get()));
NvEncoderCuda enc = new NvEncoderCuda(cuContext, 1280, 720, NV_ENC_BUFFER_FORMAT_NV12);
sb.append("GPU ").append(iGpu).append(" - ").append(szDeviceName.getString()).append("\n");
sb.append("\tH264:\t\t ").append(enc.getCapabilityValue(NV_ENC_CODEC_H264_GUID(), NV_ENC_CAPS_SUPPORTED_RATECONTROL_MODES) != 0 ? "yes" : "no").append("\n").append("\tH264_444:\t ").append(enc.getCapabilityValue(NV_ENC_CODEC_H264_GUID(), NV_ENC_CAPS_SUPPORT_YUV444_ENCODE) != 0 ? "yes" : "no").append("\n").append("\tH264_ME:\t").append(" ").append(enc.getCapabilityValue(NV_ENC_CODEC_H264_GUID(), NV_ENC_CAPS_SUPPORT_MEONLY_MODE) != 0 ? "yes" : "no").append("\n").append("\tH264_WxH:\t").append(" ").append(enc.getCapabilityValue(NV_ENC_CODEC_H264_GUID(), NV_ENC_CAPS_WIDTH_MAX)).append("*").append(enc.getCapabilityValue(NV_ENC_CODEC_H264_GUID(), NV_ENC_CAPS_HEIGHT_MAX)).append("\n").append("\tHEVC:\t\t").append(" ").append(enc.getCapabilityValue(NV_ENC_CODEC_HEVC_GUID(), NV_ENC_CAPS_SUPPORTED_RATECONTROL_MODES) != 0 ? "yes" : "no").append("\n").append("\tHEVC_Main10:\t").append(" ").append(enc.getCapabilityValue(NV_ENC_CODEC_HEVC_GUID(), NV_ENC_CAPS_SUPPORT_10BIT_ENCODE) != 0 ? "yes" : "no").append("\n").append("\tHEVC_Lossless:\t").append(" ").append(enc.getCapabilityValue(NV_ENC_CODEC_HEVC_GUID(), NV_ENC_CAPS_SUPPORT_LOSSLESS_ENCODE) != 0 ? "yes" : "no").append("\n").append("\tHEVC_SAO:\t").append(" ").append(enc.getCapabilityValue(NV_ENC_CODEC_HEVC_GUID(), NV_ENC_CAPS_SUPPORT_SAO) != 0 ? "yes" : "no").append("\n").append("\tHEVC_444:\t").append(" ").append(enc.getCapabilityValue(NV_ENC_CODEC_HEVC_GUID(), NV_ENC_CAPS_SUPPORT_YUV444_ENCODE) != 0 ? "yes" : "no").append("\n").append("\tHEVC_ME:\t").append(" ").append(enc.getCapabilityValue(NV_ENC_CODEC_HEVC_GUID(), NV_ENC_CAPS_SUPPORT_MEONLY_MODE) != 0 ? "yes" : "no").append("\n").append("\tHEVC_WxH:\t").append(" ").append(enc.getCapabilityValue(NV_ENC_CODEC_HEVC_GUID(), NV_ENC_CAPS_WIDTH_MAX)).append("*").append(enc.getCapabilityValue(NV_ENC_CODEC_HEVC_GUID(), NV_ENC_CAPS_HEIGHT_MAX)).append("\n\n");
System.out.println(sb.toString());
enc.destroyEncoder();
checkCudaApiCall(cuCtxDestroy(cuContext));
}
} catch (CudaException e) {
e.printStackTrace();
}
}
use of org.bytedeco.cuda.cudart.CUctx_st in project javacpp-presets by bytedeco.
the class AppDec method main.
public static void main(String[] args) {
inputFilePath = "";
outputFilePath = "";
gpu = 0;
cropRectangle = new Rectangle();
resizeDimension = new Dimension();
outPlanar = false;
try {
parseCommandLine(args.length, args);
checkInputFile(inputFilePath);
if (outputFilePath == null || outputFilePath.isEmpty()) {
outputFilePath = outPlanar ? "out.planar" : "out.native";
}
checkCudaApiCall(cuInit(0));
IntPointer nGpu = new IntPointer(1);
checkCudaApiCall(cuDeviceGetCount(nGpu));
if (gpu < 0 || gpu > nGpu.get()) {
System.err.printf("GPU ordinal out of range. Should be within [%d,%d]\n", 0, nGpu.get() - 1);
return;
}
CUctx_st cuContext = new CUctx_st();
cuCtxCreate(cuContext, 0, gpu);
System.out.println("Decode with demuxing.");
decodeMediaFile(cuContext, inputFilePath, outputFilePath, outPlanar, cropRectangle, resizeDimension);
} catch (Exception e) {
e.printStackTrace();
}
}
use of org.bytedeco.cuda.cudart.CUctx_st in project javacpp-presets by bytedeco.
the class AppEncCuda method main.
public static void main(String[] args) {
try {
parseCommandLine(args.length, args);
NvCodecUtil.checkInputFile(szInputFilePath);
NvCodecUtil.validateResolution(width, height);
if (szOutputFilePath == null) {
szOutputFilePath = initParam.isCodecH264() ? "out.h264" : "out.hevc";
}
try {
checkCudaApiCall(cuInit(0));
IntPointer nGpu = new IntPointer(1);
checkCudaApiCall(cuDeviceGetCount(nGpu));
if (iGpu < 0 || iGpu >= nGpu.get()) {
System.out.println("GPU ordinal out of range. Should be within [0 ," + (nGpu.get() - 1) + "]");
return;
}
IntPointer cuDevice = new IntPointer(1);
checkCudaApiCall(cuDeviceGet(cuDevice, iGpu));
BytePointer szDeviceName = new BytePointer(80);
checkCudaApiCall(cuDeviceGetName(szDeviceName, (int) szDeviceName.limit(), cuDevice.get()));
System.out.println("GPU in use: " + szDeviceName.getString());
CUctx_st cuContext = new CUctx_st();
checkCudaApiCall(cuCtxCreate(cuContext, 0, cuDevice.get()));
// Open input file
FileInputStream input = new FileInputStream(szInputFilePath);
// Open output file
FileOutputStream output = new FileOutputStream(szOutputFilePath);
// Encode
if (bOutputInVidMem) {
encodeCudaOpInVidMem(width, height, eFormat, initParam, cuContext, input, output, cuStreamType);
} else {
encodeCuda(width, height, eFormat, initParam, cuContext, input, output);
}
output.close();
input.close();
System.out.println("Bitstream saved in file " + szOutputFilePath);
} catch (IOException e) {
e.printStackTrace();
}
} catch (CudaException | InvalidArgument e) {
e.printStackTrace();
}
}
use of org.bytedeco.cuda.cudart.CUctx_st in project javacpp-presets by bytedeco.
the class AppEncCuda method encodeCudaOpInVidMem.
public static void encodeCudaOpInVidMem(int nWidth, int nHeight, int eFormat, NvEncoderInitParam encodeCLIOptions, CUctx_st cuContext, FileInputStream input, FileOutputStream output, int cuStreamType) throws IOException {
NvEncoderOutputInVidMemCuda encoder = new NvEncoderOutputInVidMemCuda(cuContext, nWidth, nHeight, eFormat);
initializeEncoder(encoder, encodeCLIOptions, eFormat);
int nFrameSize = encoder.getFrameSize();
boolean useCUStream = cuStreamType != -1;
CRC crc = null;
NvCUStream cuStream = null;
if (useCUStream) {
// Allocate CUDA streams
cuStream = new NvCUStream((CUctx_st) encoder.getDevice(), cuStreamType, encoder);
// When CUDA streams are used, the encoded frame's CRC is computed using cuda kernel
crc = new CRC((CUctx_st) encoder.getDevice(), encoder.getOutputBufferSize());
}
// For dumping output - encoded frame and CRC, to a file
DumpVidMemOutput dumpVidMemOutput = new DumpVidMemOutput((CUctx_st) encoder.getDevice(), encoder.getOutputBufferSize(), szOutputFilePath, useCUStream);
byte[] pHostFrame = new byte[nFrameSize];
int nFrame = 0;
// Encoding loop
while (true) {
// Load the next frame from disk
int nRead = input.read(pHostFrame);
// For receiving encoded packets
Vector<NV_ENC_OUTPUT_PTR> pVideoMemBfr = new Vector<>();
if (nRead == nFrameSize) {
final NvEncoderInputFrame encoderInputFrame = encoder.getNextInputFrame();
NvEncoderCuda.copyToDeviceFrame(cuContext, new BytePointer(pHostFrame), 0, encoderInputFrame.getInputPointer().getPointer(LongPointer.class), encoderInputFrame.getPitch(), encoder.getEncodeWidth(), encoder.getEncodeHeight(), CU_MEMORYTYPE_HOST, encoderInputFrame.getBufferFormat(), encoderInputFrame.getChromaOffsets(), encoderInputFrame.getNumChromaPlanes(), false, useCUStream ? cuStream.getInputStream() : null);
encoder.encodeFrame(pVideoMemBfr);
} else {
encoder.endEncode(pVideoMemBfr);
}
for (int i = 0; i < pVideoMemBfr.size(); ++i) {
if (useCUStream) {
// Compute CRC of encoded stream
crc.getCRC(pVideoMemBfr.get(i), cuStream.getOutputStream());
}
dumpVidMemOutput.dumpOutputToFile(pVideoMemBfr.get(i).getPointer(LongPointer.class), useCUStream ? crc.getCrcVidMem() : new LongPointer(1) {
{
put(0);
}
}, output, nFrame);
nFrame++;
}
if (nRead != nFrameSize)
break;
}
dumpVidMemOutput.dispose();
if (useCUStream) {
crc.dispose();
cuStream.dispose();
}
encoder.dispose();
encoder.destroyEncoder();
System.out.println("Total frames encoded: " + nFrame);
}
Aggregations