use of org.bytedeco.javacpp.BytePointer in project djl by deepjavalibrary.
the class JavacppUtils method getDevice.
@SuppressWarnings({ "unchecked", "try" })
public static Device getDevice(TFE_TensorHandle handle) {
try (PointerScope ignored = new PointerScope()) {
TF_Status status = TF_Status.newStatus();
BytePointer pointer = tensorflow.TFE_TensorHandleDeviceName(handle, status);
String device = new String(pointer.getStringBytes(), StandardCharsets.UTF_8);
return fromTfDevice(device);
}
}
use of org.bytedeco.javacpp.BytePointer in project djl by deepjavalibrary.
the class JavacppUtils method getString.
@SuppressWarnings({ "unchecked", "try" })
public static String[] getString(TFE_TensorHandle handle, int count, Charset charset) {
try (PointerScope ignored = new PointerScope()) {
// convert to TF_Tensor
TF_Status status = TF_Status.newStatus();
// should not add .withDeallocator() here, otherwise sting data will be destroyed
TF_Tensor tensor = tensorflow.TFE_TensorHandleResolve(handle, status);
status.throwExceptionIfNotOK();
long tensorSize = tensorflow.TF_TensorByteSize(tensor);
Pointer pointer = tensorflow.TF_TensorData(tensor).capacity(tensorSize);
TF_TString data = new TF_TString(pointer).capacity(pointer.position() + count);
String[] ret = new String[count];
for (int i = 0; i < count; ++i) {
TF_TString tstring = data.getPointer(i);
long size = tensorflow.TF_TString_GetSize(tstring);
BytePointer bp = tensorflow.TF_TString_GetDataPointer(tstring).capacity(size);
ret[i] = bp.getString(charset);
}
// manually delete tensor
tensorflow.TF_DeleteTensor(tensor);
return ret;
}
}
use of org.bytedeco.javacpp.BytePointer in project djl by deepjavalibrary.
the class JavacppUtils method createStringTensor.
@SuppressWarnings({ "unchecked", "try" })
public static Pair<TF_Tensor, TFE_TensorHandle> createStringTensor(long[] dims, ByteBuffer[] src) {
int dType = TfDataType.toTf(DataType.STRING);
long numBytes = (long) Loader.sizeof(TF_TString.class) * src.length;
try (PointerScope ignored = new PointerScope()) {
/*
* String tensor allocates a separate TF_TString memory. The TF_TString will
* be deleted when the string tensor is closed. We have to track TF_TString
* memory by ourselves and make sure thw TF_TString lifecycle align with
* TFE_TensorHandle. TF_Tensor already handles TF_TString automatically, We
* can just keep a TF_Tensor reference in TfNDArray.
*/
TF_Tensor tensor = AbstractTF_Tensor.allocateTensor(dType, dims, numBytes);
Pointer pointer = tensorflow.TF_TensorData(tensor).capacity(numBytes);
TF_TString data = new TF_TString(pointer).capacity(pointer.position() + src.length);
for (int i = 0; i < src.length; ++i) {
TF_TString tstring = data.getPointer(i);
tensorflow.TF_TString_Copy(tstring, new BytePointer(src[i]), src[i].remaining());
}
TF_Status status = TF_Status.newStatus();
TFE_TensorHandle handle = AbstractTFE_TensorHandle.newTensor(tensor, status);
status.throwExceptionIfNotOK();
handle.retainReference();
tensor.retainReference();
return new Pair<>(tensor, handle);
}
}
use of org.bytedeco.javacpp.BytePointer in project sample-projects by bytedeco.
the class VectorAddDrv method main.
public static void main(String[] args) {
// array in host memory
int[] a = new int[N], b = new int[N], c = new int[N];
// "pointers" to device memory
long[] dev_a = { 0 }, dev_b = { 0 }, dev_c = { 0 };
// init the device
cuda.cuInit(0);
int[] cudaDevice = { 0 };
cuda.cuDeviceGet(cudaDevice, 0);
// obtain a context
cuda.CUctx_st context = new cuda.CUctx_st();
cuda.cuCtxCreate(context, 0, cudaDevice[0]);
// allocate device memory
cuda.cuMemAlloc(dev_a, N * Integer.BYTES);
cuda.cuMemAlloc(dev_b, N * Integer.BYTES);
cuda.cuMemAlloc(dev_c, N * Integer.BYTES);
// prepare host arrays
for (int i = 0; i < N; i++) {
a[i] = -i;
b[i] = i * i;
}
// introduce error to be found; to show that check works
a[N - 3] = 1;
// copy input host (heap) arrays to native memory to device memory
// host->nat
IntPointer nat_a = new IntPointer(a);
// nat->dev
cuda.cuMemcpyHtoD(dev_a[0], nat_a, a.length * Integer.BYTES);
// host->nat
IntPointer nat_b = new IntPointer(b);
// nat->dev
cuda.cuMemcpyHtoD(dev_b[0], nat_b, b.length * Integer.BYTES);
// prepare the kernel - module and function
cuda.CUmod_st module = new cuda.CUmod_st();
cuda.cuModuleLoadData(module, new BytePointer(VECTOR_ADD_PTX));
cuda.CUfunc_st vector_add = new cuda.CUfunc_st();
cuda.cuModuleGetFunction(vector_add, module, "vector_add");
// prepare kernel parameters
PointerPointer kernelParameters = new PointerPointer(new IntPointer(new int[] { N }), new LongPointer(dev_a), new LongPointer(dev_b), new LongPointer(dev_c));
// run the kernel
cuda.cuLaunchKernel(vector_add, // Grid dimension
(N + 255) / 256, // Grid dimension
1, // Grid dimension
1, // Block dimension - all GPUs should manage > 256 threads per block
256, // Block dimension - all GPUs should manage > 256 threads per block
1, // Block dimension - all GPUs should manage > 256 threads per block
1, // Shared memory size and stream
0, // Shared memory size and stream
null, kernelParameters, // Kernel- and extra parameters
null);
cuda.cuCtxSynchronize();
// copy output device array to native memory to host (heap) memory
IntPointer nat_c = new IntPointer(c.length);
// dev->nat
cuda.cuMemcpyDtoH(nat_c, dev_c[0], c.length * Integer.BYTES);
// nat->host
nat_c.get(c);
// check results
boolean ok = true;
for (int i = 0; i < N; i++) {
if (c[i] != (i * i) - i) {
ok = false;
System.out.println("result incorrect at position " + i + ": expected " + ((i * i) - i) + ", but got " + c[i]);
}
}
if (ok)
System.out.println("calculation succeded");
}
use of org.bytedeco.javacpp.BytePointer in project bigbluebutton by bigbluebutton.
the class FFmpegFrameGrabber method processImage.
private void processImage() throws Exception {
frame.imageWidth = imageWidth > 0 ? imageWidth : video_c.width();
frame.imageHeight = imageHeight > 0 ? imageHeight : video_c.height();
frame.imageDepth = Frame.DEPTH_UBYTE;
switch(imageMode) {
case COLOR:
case GRAY:
// Deinterlace Picture
if (deinterlace) {
throw new Exception("Cannot deinterlace: Functionality moved to FFmpegFrameFilter.");
}
// Convert the image into BGR or GRAY format that OpenCV uses
img_convert_ctx = sws_getCachedContext(img_convert_ctx, video_c.width(), video_c.height(), video_c.pix_fmt(), frame.imageWidth, frame.imageHeight, getPixelFormat(), SWS_BILINEAR, null, null, (DoublePointer) null);
if (img_convert_ctx == null) {
throw new Exception("sws_getCachedContext() error: Cannot initialize the conversion context.");
}
// Convert the image from its native format to RGB or GRAY
sws_scale(img_convert_ctx, new PointerPointer(picture), picture.linesize(), 0, video_c.height(), new PointerPointer(picture_rgb), picture_rgb.linesize());
frame.imageStride = picture_rgb.linesize(0);
frame.image = image_buf;
break;
case RAW:
frame.imageStride = picture.linesize(0);
BytePointer ptr = picture.data(0);
if (ptr != null && !ptr.equals(image_ptr[0])) {
image_ptr[0] = ptr.capacity(frame.imageHeight * frame.imageStride);
image_buf[0] = ptr.asBuffer();
}
frame.image = image_buf;
break;
default:
assert false;
}
frame.image[0].limit(frame.imageHeight * frame.imageStride);
frame.imageChannels = frame.imageStride / frame.imageWidth;
}
Aggregations