use of com.simiacryptus.mindseye.lang.ReferenceCounting in project MindsEye by SimiaCryptus.
the class AvgReducerLayer method evalAndFree.
@Nullable
@Override
public Result evalAndFree(final Result... inObj) {
if (!CudaSystem.isEnabled())
return getCompatibilityLayer().evalAndFree(inObj);
final Result input = inObj[0];
final TensorList inputData = input.getData();
@Nonnull final int[] inputSize = inputData.getDimensions();
int length = inputData.length();
CudaTensorList result = CudaSystem.run(gpu -> {
CudaTensor inputTensor = gpu.getTensor(inputData, precision, MemoryType.Device, false);
inputData.freeRef();
CudaMemory inputMemory = inputTensor.getMemory(gpu);
@Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, 1, 1, 1);
long size = (long) precision.size * outputDescriptor.nStride * length;
@Nonnull final CudaMemory outputMemory = gpu.allocate(size, MemoryType.Managed, true);
CudaResource<cudnnReduceTensorDescriptor> reduceTensorDescriptor = gpu.cudnnCreateReduceTensorDescriptor(cudnnReduceTensorOp.CUDNN_REDUCE_TENSOR_AVG, precision.code, cudnnNanPropagation.CUDNN_NOT_PROPAGATE_NAN, cudnnReduceTensorIndices.CUDNN_REDUCE_TENSOR_NO_INDICES, cudnnIndicesType.CUDNN_32BIT_INDICES);
@Nonnull final CudaMemory workspacePtr = gpu.allocate(inputMemory.size, MemoryType.Device, true);
@Nonnull final CudaMemory indexPtr = gpu.allocate(12 * length, MemoryType.Device, false);
// outputPtr.synchronize();
gpu.cudnnReduceTensor(reduceTensorDescriptor.getPtr(), indexPtr.getPtr(), indexPtr.size, workspacePtr.getPtr(), workspacePtr.size, precision.getPointer(1.0), inputTensor.descriptor.getPtr(), inputMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputMemory.getPtr());
outputMemory.dirty();
inputMemory.dirty();
Stream.of(inputTensor, inputMemory, reduceTensorDescriptor, workspacePtr, indexPtr).forEach(ReferenceCounting::freeRef);
return CudaTensorList.wrap(CudaTensor.wrap(outputMemory, outputDescriptor, precision), length, new int[] { 1, 1, 1 }, precision);
});
return new Result(result, (DeltaSet<Layer> ctx, TensorList delta) -> {
// Not supported by CuDNN?
// CudaTensorList passback = CudaSystem.run(gpu -> {
// CudaTensor deltaTensor = gpu.getTensor(delta, precision, MemoryType.Device, false);
// CudaMemory deltaMemory = deltaTensor.getMemory(gpu);
//
// @Nonnull final CudaDevice.CudaTensorDescriptor passbackDescriptor1 = gpu.newTensorDescriptor(
// precision, length, inputSize[2], inputSize[1], inputSize[0]
// );
// @Nonnull final CudaMemory passbackPtr1 = gpu.allocate((long) precision.size * passbackDescriptor1.nStride * length, MemoryType.Device, false);
// gpu.cudnnAddTensor(precision.getPointer(1.0), deltaTensor.descriptor.getPtr(), deltaMemory.getPtr(),
// precision.getPointer(1.0), passbackDescriptor1.getPtr(), passbackPtr1.getPtr());
// passbackPtr1.dirty();
//
// Stream.of(deltaTensor, deltaMemory, passbackDescriptor1, passbackPtr1).forEach(ReferenceCounting::freeRef);
// return CudaTensorList.wrap(CudaTensor.wrap(passbackPtr1, passbackDescriptor1, precision), length, inputSize, precision);
// });
TensorList passback = TensorArray.wrap(IntStream.range(0, length).mapToObj(i -> {
Tensor tensor = delta.get(i);
Tensor tensor1 = new Tensor(inputSize).setAll((double) tensor.get(0) / Tensor.length(inputSize));
tensor.freeRef();
return tensor1;
}).toArray(i -> new Tensor[i]));
input.accumulate(ctx, passback);
}) {
@Override
protected void _free() {
super._free();
input.freeRef();
}
};
}
use of com.simiacryptus.mindseye.lang.ReferenceCounting in project MindsEye by SimiaCryptus.
the class CudaLayerTester method testNonstandardBoundsBackprop.
/**
* Test nonstandard bounds backprop tolerance statistics.
*
* @param log the log
* @param layer the layer
* @param inputPrototype the input prototype
* @return the tolerance statistics
*/
@Nonnull
public ToleranceStatistics testNonstandardBoundsBackprop(final NotebookOutput log, @Nullable final Layer layer, @Nonnull final Tensor[] inputPrototype) {
log.h2("Irregular Backprop");
log.p("This layer should accept non-dense tensors as delta input.");
return log.code(() -> {
Tensor[] randomized = Arrays.stream(inputPrototype).map(x -> x.map(v -> getRandom())).toArray(i -> new Tensor[i]);
logger.info("Input: " + Arrays.stream(randomized).map(Tensor::prettyPrint).collect(Collectors.toList()));
Precision precision = Precision.Double;
TensorList[] controlInput = Arrays.stream(randomized).map(original -> {
return TensorArray.wrap(original);
}).toArray(i -> new TensorList[i]);
@Nonnull final SimpleResult testResult = CudaSystem.run(gpu -> {
TensorList[] copy = copy(controlInput);
SimpleResult result = new SimpleGpuEval(layer, gpu, copy) {
@Nonnull
@Override
public TensorList getFeedback(@Nonnull final TensorList original) {
Tensor originalTensor = original.get(0).mapAndFree(x -> 1);
CudaTensorList cudaTensorList = buildIrregularCudaTensor(gpu, precision, originalTensor);
originalTensor.freeRef();
return cudaTensorList;
}
}.call();
Arrays.stream(copy).forEach(ReferenceCounting::freeRef);
return result;
});
@Nonnull final SimpleResult controlResult = CudaSystem.run(gpu -> {
TensorList[] copy = copy(controlInput);
SimpleResult result = SimpleGpuEval.run(layer, gpu, copy);
Arrays.stream(copy).forEach(ReferenceCounting::freeRef);
return result;
}, 1);
try {
ToleranceStatistics compareOutput = compareOutput(controlResult, testResult);
ToleranceStatistics compareDerivatives = compareDerivatives(controlResult, testResult);
return compareDerivatives.combine(compareOutput);
} finally {
Arrays.stream(controlInput).forEach(ReferenceCounting::freeRef);
controlResult.freeRef();
testResult.freeRef();
}
});
}
use of com.simiacryptus.mindseye.lang.ReferenceCounting in project MindsEye by SimiaCryptus.
the class CudaLayerTester method testInterGpu.
/**
* Test inter gpu tolerance statistics.
*
* @param log the log
* @param reference the reference
* @param inputPrototype the input prototype
* @return the tolerance statistics
*/
@Nonnull
public ToleranceStatistics testInterGpu(final NotebookOutput log, @Nullable final Layer reference, @Nonnull final Tensor[] inputPrototype) {
log.h2("Multi-GPU Compatibility");
log.p("This layer should be able to apply using a GPU context other than the one used to create the inputs.");
return log.code(() -> {
final TensorList[] heapInput = Arrays.stream(inputPrototype).map(t -> TensorArray.wrap(IntStream.range(0, getBatchSize()).mapToObj(i -> t.map(v -> getRandom())).toArray(i -> new Tensor[i]))).toArray(i -> new TensorList[i]);
logger.info("Input: " + Arrays.stream(heapInput).flatMap(x -> x.stream()).map(tensor -> {
String prettyPrint = tensor.prettyPrint();
tensor.freeRef();
return prettyPrint;
}).collect(Collectors.toList()));
TensorList[] gpuInput = CudaSystem.run(gpu -> {
return Arrays.stream(heapInput).map(original -> {
return CudaTensorList.wrap(gpu.getTensor(original, Precision.Double, MemoryType.Managed, false), original.length(), original.getDimensions(), Precision.Double);
}).toArray(i -> new TensorList[i]);
}, 0);
@Nonnull final SimpleResult fromHeap = CudaSystem.run(gpu -> {
return SimpleGpuEval.run(reference, gpu, heapInput);
}, 1);
@Nonnull final SimpleResult fromGPU = CudaSystem.run(gpu -> {
return SimpleGpuEval.run(reference, gpu, gpuInput);
}, 1);
try {
ToleranceStatistics compareOutput = compareOutput(fromHeap, fromGPU);
ToleranceStatistics compareDerivatives = compareDerivatives(fromHeap, fromGPU);
return compareDerivatives.combine(compareOutput);
} finally {
Arrays.stream(gpuInput).forEach(ReferenceCounting::freeRef);
Arrays.stream(heapInput).forEach(x -> x.freeRef());
fromGPU.freeRef();
fromHeap.freeRef();
}
});
}
use of com.simiacryptus.mindseye.lang.ReferenceCounting in project MindsEye by SimiaCryptus.
the class StandardLayerTests method getInvocations.
/**
* Gets invocations.
*
* @param smallLayer the small layer
* @param smallDims the small dims
* @return the invocations
*/
@Nonnull
public Collection<Invocation> getInvocations(@Nonnull Layer smallLayer, @Nonnull int[][] smallDims) {
@Nonnull DAGNetwork smallCopy = (DAGNetwork) smallLayer.copy();
@Nonnull HashSet<Invocation> invocations = new HashSet<>();
smallCopy.visitNodes(node -> {
@Nullable Layer inner = node.getLayer();
inner.addRef();
@Nullable Layer wrapper = new LayerBase() {
@Nullable
@Override
public Result eval(@Nonnull Result... array) {
if (null == inner)
return null;
@Nullable Result result = inner.eval(array);
invocations.add(new Invocation(inner, Arrays.stream(array).map(x -> x.getData().getDimensions()).toArray(i -> new int[i][])));
return result;
}
@Override
public JsonObject getJson(Map<CharSequence, byte[]> resources, DataSerializer dataSerializer) {
return inner.getJson(resources, dataSerializer);
}
@Nullable
@Override
public List<double[]> state() {
return inner.state();
}
@Override
protected void _free() {
inner.freeRef();
}
};
node.setLayer(wrapper);
wrapper.freeRef();
});
Tensor[] input = Arrays.stream(smallDims).map(i -> new Tensor(i)).toArray(i -> new Tensor[i]);
try {
Result eval = smallCopy.eval(input);
eval.freeRef();
eval.getData().freeRef();
return invocations;
} finally {
Arrays.stream(input).forEach(ReferenceCounting::freeRef);
smallCopy.freeRef();
}
}
use of com.simiacryptus.mindseye.lang.ReferenceCounting in project MindsEye by SimiaCryptus.
the class TrainingTester method testCompleteLearning.
/**
* Test complete learning apply result.
*
* @param log the log
* @param component the component
* @param random the random
* @param inputPrototype the input prototype
* @return the apply result
*/
@Nonnull
public TestResult testCompleteLearning(@Nonnull final NotebookOutput log, @Nonnull final Layer component, final Random random, @Nonnull final Tensor[] inputPrototype) {
@Nonnull final Layer network_target = shuffle(random, component.copy()).freeze();
final Tensor[][] input_target = shuffleCopy(random, inputPrototype);
log.p("In this apply, attempt to train a network to emulate a randomized network given an example input/output. The target state is:");
log.code(() -> {
return network_target.state().stream().map(Arrays::toString).reduce((a, b) -> a + "\n" + b).orElse("");
});
log.p("We simultaneously regress this target input:");
log.code(() -> {
return Arrays.stream(input_target).flatMap(x -> Arrays.stream(x)).map(x -> x.prettyPrint()).reduce((a, b) -> a + "\n" + b).orElse("");
});
log.p("Which produces the following output:");
Result[] inputs = ConstantResult.batchResultArray(input_target);
Result eval = network_target.eval(inputs);
network_target.freeRef();
Arrays.stream(inputs).forEach(ReferenceCounting::freeRef);
TensorList result = eval.getData();
eval.freeRef();
final Tensor[] output_target = result.stream().toArray(i -> new Tensor[i]);
log.code(() -> {
return Stream.of(output_target).map(x -> x.prettyPrint()).reduce((a, b) -> a + "\n" + b).orElse("");
});
// if (output_target.length != inputPrototype.length) return null;
return trainAll("Integrated Convergence", log, append(shuffleCopy(random, inputPrototype), output_target), shuffle(random, component.copy()), buildMask(inputPrototype.length));
}
Aggregations