use of com.simiacryptus.mindseye.lang.cudnn.Precision in project MindsEye by SimiaCryptus.
the class SquareActivationLayer method evalAndFree.
@Nullable
@Override
public Result evalAndFree(@Nonnull final Result... inObj) {
if (!CudaSystem.isEnabled())
return getCompatibilityLayer().evalAndFree(inObj);
if (inObj.length != 1) {
throw new IllegalArgumentException("inObj.length=" + inObj.length);
}
Result input = inObj[0];
final TensorList inputData = input.getData();
@Nonnull final int[] dimensions = inputData.getDimensions();
final int length = inputData.length();
if (3 != dimensions.length) {
throw new IllegalArgumentException("dimensions=" + Arrays.toString(dimensions));
}
return new Result(CudaSystem.run(gpu -> {
@Nonnull final CudaResource<cudnnOpTensorDescriptor> opDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_MUL, precision);
@Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, dimensions[2], dimensions[1], dimensions[0], dimensions[2] * dimensions[1] * dimensions[0], dimensions[1] * dimensions[0], dimensions[0], 1);
@Nullable final CudaTensor inputTensor = gpu.getTensor(inputData, precision, MemoryType.Device, false);
// assert inputTensor.size == rPtr.size;
@Nonnull final CudaMemory outputPtr = gpu.allocate((long) precision.size * outputDescriptor.nStride * length, MemoryType.Device, true);
CudaMemory lPtrMemory = inputTensor.getMemory(gpu);
CudaSystem.handle(gpu.cudnnOpTensor(opDescriptor.getPtr(), precision.getPointer(alpha), inputTensor.descriptor.getPtr(), lPtrMemory.getPtr(), precision.getPointer(1.0), inputTensor.descriptor.getPtr(), lPtrMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputPtr.getPtr()));
assert CudaDevice.isThreadDeviceId(gpu.getDeviceId());
outputPtr.dirty();
lPtrMemory.dirty();
outputPtr.dirty();
lPtrMemory.freeRef();
inputTensor.freeRef();
opDescriptor.freeRef();
CudaTensor cudaTensor = CudaTensor.wrap(outputPtr, outputDescriptor, precision);
return CudaTensorList.wrap(cudaTensor, length, dimensions, precision);
}, inputData), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
if (input.isAlive()) {
@Nonnull TensorList data = CudaSystem.run(gpu -> {
@Nonnull final CudaResource<cudnnOpTensorDescriptor> opDescriptor = gpu.newOpDescriptor(cudnnOpTensorOp.CUDNN_OP_TENSOR_MUL, precision);
@Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, dimensions[2], dimensions[1], dimensions[0], dimensions[2] * dimensions[1] * dimensions[0], dimensions[1] * dimensions[0], dimensions[0], 1);
@Nullable final CudaTensor deltaTensor = gpu.getTensor(delta, precision, MemoryType.Device, true);
delta.freeRef();
@Nullable final CudaTensor inputTensor = gpu.getTensor(input.getData(), precision, MemoryType.Device, false);
// assert deltaTensor.size == inputTensor.size;
@Nonnull final CudaMemory outputPtr = gpu.allocate((long) precision.size * outputDescriptor.nStride * length, MemoryType.Device, true);
CudaMemory deltaTensorMemory = deltaTensor.getMemory(gpu);
CudaMemory rightTensorMemory = inputTensor.getMemory(gpu);
CudaSystem.handle(gpu.cudnnOpTensor(opDescriptor.getPtr(), precision.getPointer(2), deltaTensor.descriptor.getPtr(), deltaTensorMemory.getPtr(), precision.getPointer(alpha), inputTensor.descriptor.getPtr(), rightTensorMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputPtr.getPtr()));
deltaTensorMemory.dirty();
rightTensorMemory.dirty();
outputPtr.dirty();
deltaTensorMemory.freeRef();
rightTensorMemory.freeRef();
CudaTensor cudaTensor = new CudaTensor(outputPtr, outputDescriptor, precision);
Arrays.stream(new ReferenceCounting[] { deltaTensor, inputTensor, opDescriptor, outputDescriptor }).forEach(ReferenceCounting::freeRef);
outputPtr.freeRef();
return CudaTensorList.wrap(cudaTensor, length, dimensions, precision);
}, delta);
input.accumulate(buffer, data);
} else {
delta.freeRef();
}
}) {
@Override
public void accumulate(final DeltaSet<Layer> buffer, final TensorList delta) {
getAccumulator().accept(buffer, delta);
}
@Override
protected void _free() {
inputData.freeRef();
input.freeRef();
}
@Override
public boolean isAlive() {
for (@Nonnull final Result element : inObj) if (element.isAlive()) {
return true;
}
return false;
}
};
}
use of com.simiacryptus.mindseye.lang.cudnn.Precision in project MindsEye by SimiaCryptus.
the class DeepDream method train.
/**
* Train buffered image.
*
* @param server the server
* @param log the log
* @param canvasImage the canvas image
* @param network the network
* @param precision the precision
* @param trainingMinutes the training minutes
* @return the buffered image
*/
@Nonnull
public BufferedImage train(final StreamNanoHTTPD server, @Nonnull final NotebookOutput log, final BufferedImage canvasImage, final PipelineNetwork network, final Precision precision, final int trainingMinutes) {
System.gc();
Tensor canvas = Tensor.fromRGB(canvasImage);
TestUtil.monitorImage(canvas, false, false);
network.setFrozen(true);
ArtistryUtil.setPrecision(network, precision);
@Nonnull Trainable trainable = new ArrayTrainable(network, 1).setVerbose(true).setMask(true).setData(Arrays.asList(new Tensor[][] { { canvas } }));
TestUtil.instrumentPerformance(network);
if (null != server)
ArtistryUtil.addLayersHandler(network, server);
log.code(() -> {
@Nonnull ArrayList<StepRecord> history = new ArrayList<>();
new IterativeTrainer(trainable).setMonitor(TestUtil.getMonitor(history)).setIterationsPerSample(100).setOrientation(new TrustRegionStrategy() {
@Override
public TrustRegion getRegionPolicy(final Layer layer) {
return new RangeConstraint();
}
}).setLineSearchFactory(name -> new BisectionSearch().setSpanTol(1e-1).setCurrentRate(1e3)).setTimeout(trainingMinutes, TimeUnit.MINUTES).setTerminateThreshold(Double.NEGATIVE_INFINITY).runAndFree();
return TestUtil.plot(history);
});
return canvas.toImage();
}
use of com.simiacryptus.mindseye.lang.cudnn.Precision in project MindsEye by SimiaCryptus.
the class AvgReducerLayer method evalAndFree.
@Nullable
@Override
public Result evalAndFree(final Result... inObj) {
if (!CudaSystem.isEnabled())
return getCompatibilityLayer().evalAndFree(inObj);
final Result input = inObj[0];
final TensorList inputData = input.getData();
@Nonnull final int[] inputSize = inputData.getDimensions();
int length = inputData.length();
CudaTensorList result = CudaSystem.run(gpu -> {
CudaTensor inputTensor = gpu.getTensor(inputData, precision, MemoryType.Device, false);
inputData.freeRef();
CudaMemory inputMemory = inputTensor.getMemory(gpu);
@Nonnull final CudaDevice.CudaTensorDescriptor outputDescriptor = gpu.newTensorDescriptor(precision, length, 1, 1, 1);
long size = (long) precision.size * outputDescriptor.nStride * length;
@Nonnull final CudaMemory outputMemory = gpu.allocate(size, MemoryType.Managed, true);
CudaResource<cudnnReduceTensorDescriptor> reduceTensorDescriptor = gpu.cudnnCreateReduceTensorDescriptor(cudnnReduceTensorOp.CUDNN_REDUCE_TENSOR_AVG, precision.code, cudnnNanPropagation.CUDNN_NOT_PROPAGATE_NAN, cudnnReduceTensorIndices.CUDNN_REDUCE_TENSOR_NO_INDICES, cudnnIndicesType.CUDNN_32BIT_INDICES);
@Nonnull final CudaMemory workspacePtr = gpu.allocate(inputMemory.size, MemoryType.Device, true);
@Nonnull final CudaMemory indexPtr = gpu.allocate(12 * length, MemoryType.Device, false);
// outputPtr.synchronize();
gpu.cudnnReduceTensor(reduceTensorDescriptor.getPtr(), indexPtr.getPtr(), indexPtr.size, workspacePtr.getPtr(), workspacePtr.size, precision.getPointer(1.0), inputTensor.descriptor.getPtr(), inputMemory.getPtr(), precision.getPointer(0.0), outputDescriptor.getPtr(), outputMemory.getPtr());
outputMemory.dirty();
inputMemory.dirty();
Stream.of(inputTensor, inputMemory, reduceTensorDescriptor, workspacePtr, indexPtr).forEach(ReferenceCounting::freeRef);
return CudaTensorList.wrap(CudaTensor.wrap(outputMemory, outputDescriptor, precision), length, new int[] { 1, 1, 1 }, precision);
});
return new Result(result, (DeltaSet<Layer> ctx, TensorList delta) -> {
// Not supported by CuDNN?
// CudaTensorList passback = CudaSystem.run(gpu -> {
// CudaTensor deltaTensor = gpu.getTensor(delta, precision, MemoryType.Device, false);
// CudaMemory deltaMemory = deltaTensor.getMemory(gpu);
//
// @Nonnull final CudaDevice.CudaTensorDescriptor passbackDescriptor1 = gpu.newTensorDescriptor(
// precision, length, inputSize[2], inputSize[1], inputSize[0]
// );
// @Nonnull final CudaMemory passbackPtr1 = gpu.allocate((long) precision.size * passbackDescriptor1.nStride * length, MemoryType.Device, false);
// gpu.cudnnAddTensor(precision.getPointer(1.0), deltaTensor.descriptor.getPtr(), deltaMemory.getPtr(),
// precision.getPointer(1.0), passbackDescriptor1.getPtr(), passbackPtr1.getPtr());
// passbackPtr1.dirty();
//
// Stream.of(deltaTensor, deltaMemory, passbackDescriptor1, passbackPtr1).forEach(ReferenceCounting::freeRef);
// return CudaTensorList.wrap(CudaTensor.wrap(passbackPtr1, passbackDescriptor1, precision), length, inputSize, precision);
// });
TensorList passback = TensorArray.wrap(IntStream.range(0, length).mapToObj(i -> {
Tensor tensor = delta.get(i);
Tensor tensor1 = new Tensor(inputSize).setAll((double) tensor.get(0) / Tensor.length(inputSize));
tensor.freeRef();
return tensor1;
}).toArray(i -> new Tensor[i]));
input.accumulate(ctx, passback);
}) {
@Override
protected void _free() {
super._free();
input.freeRef();
}
};
}
use of com.simiacryptus.mindseye.lang.cudnn.Precision in project MindsEye by SimiaCryptus.
the class CudaLayerTester method testNonstandardBoundsBackprop.
/**
* Test nonstandard bounds backprop tolerance statistics.
*
* @param log the log
* @param layer the layer
* @param inputPrototype the input prototype
* @return the tolerance statistics
*/
@Nonnull
public ToleranceStatistics testNonstandardBoundsBackprop(final NotebookOutput log, @Nullable final Layer layer, @Nonnull final Tensor[] inputPrototype) {
log.h2("Irregular Backprop");
log.p("This layer should accept non-dense tensors as delta input.");
return log.code(() -> {
Tensor[] randomized = Arrays.stream(inputPrototype).map(x -> x.map(v -> getRandom())).toArray(i -> new Tensor[i]);
logger.info("Input: " + Arrays.stream(randomized).map(Tensor::prettyPrint).collect(Collectors.toList()));
Precision precision = Precision.Double;
TensorList[] controlInput = Arrays.stream(randomized).map(original -> {
return TensorArray.wrap(original);
}).toArray(i -> new TensorList[i]);
@Nonnull final SimpleResult testResult = CudaSystem.run(gpu -> {
TensorList[] copy = copy(controlInput);
SimpleResult result = new SimpleGpuEval(layer, gpu, copy) {
@Nonnull
@Override
public TensorList getFeedback(@Nonnull final TensorList original) {
Tensor originalTensor = original.get(0).mapAndFree(x -> 1);
CudaTensorList cudaTensorList = buildIrregularCudaTensor(gpu, precision, originalTensor);
originalTensor.freeRef();
return cudaTensorList;
}
}.call();
Arrays.stream(copy).forEach(ReferenceCounting::freeRef);
return result;
});
@Nonnull final SimpleResult controlResult = CudaSystem.run(gpu -> {
TensorList[] copy = copy(controlInput);
SimpleResult result = SimpleGpuEval.run(layer, gpu, copy);
Arrays.stream(copy).forEach(ReferenceCounting::freeRef);
return result;
}, 1);
try {
ToleranceStatistics compareOutput = compareOutput(controlResult, testResult);
ToleranceStatistics compareDerivatives = compareDerivatives(controlResult, testResult);
return compareDerivatives.combine(compareOutput);
} finally {
Arrays.stream(controlInput).forEach(ReferenceCounting::freeRef);
controlResult.freeRef();
testResult.freeRef();
}
});
}
use of com.simiacryptus.mindseye.lang.cudnn.Precision in project MindsEye by SimiaCryptus.
the class StyleTransfer_VGG19 method run.
/**
* Test.
*
* @param log the log
*/
public void run(@Nonnull NotebookOutput log) {
StyleTransfer.VGG19 styleTransfer = new StyleTransfer.VGG19();
init(log);
Precision precision = Precision.Float;
int imageSize = 400;
styleTransfer.parallelLossFunctions = true;
double growthFactor = Math.sqrt(1.5);
CharSequence lakeAndForest = "H:\\SimiaCryptus\\Artistry\\Owned\\IMG_20170624_153541213-EFFECTS.jpg";
String monkey = "H:\\SimiaCryptus\\Artistry\\capuchin-monkey-2759768_960_720.jpg";
CharSequence vanGogh1 = "H:\\SimiaCryptus\\Artistry\\portraits\\vangogh\\Van_Gogh_-_Portrait_of_Pere_Tanguy_1887-8.jpg";
CharSequence vanGogh2 = "H:\\SimiaCryptus\\Artistry\\portraits\\vangogh\\800px-Vincent_van_Gogh_-_Dr_Paul_Gachet_-_Google_Art_Project.jpg";
CharSequence threeMusicians = "H:\\SimiaCryptus\\Artistry\\portraits\\picasso\\800px-Pablo_Picasso,_1921,_Nous_autres_musiciens_(Three_Musicians),_oil_on_canvas,_204.5_x_188.3_cm,_Philadelphia_Museum_of_Art.jpg";
CharSequence maJolie = "H:\\SimiaCryptus\\Artistry\\portraits\\picasso\\Ma_Jolie_Pablo_Picasso.jpg";
Map<List<CharSequence>, StyleTransfer.StyleCoefficients> styles = new HashMap<>();
double coeff_mean = 1e1;
double coeff_cov = 1e0;
styles.put(Arrays.asList(// threeMusicians, maJolie
vanGogh1, vanGogh2), new StyleTransfer.StyleCoefficients(StyleTransfer.CenteringMode.Origin).set(MultiLayerVGG19.LayerType.Layer_1b, coeff_mean, coeff_cov).set(MultiLayerVGG19.LayerType.Layer_1d, coeff_mean, coeff_cov));
StyleTransfer.ContentCoefficients contentCoefficients = new StyleTransfer.ContentCoefficients().set(MultiLayerVGG19.LayerType.Layer_1c, 1e0);
int trainingMinutes = 90;
log.h1("Phase 0");
BufferedImage canvasImage = ArtistryUtil.load(monkey, imageSize);
canvasImage = TestUtil.resize(canvasImage, imageSize, true);
canvasImage = TestUtil.resize(TestUtil.resize(canvasImage, 25, true), imageSize, true);
// canvasImage = randomize(canvasImage, x -> 10 * (FastRandom.INSTANCE.random()) * (FastRandom.INSTANCE.random() < 0.9 ? 1 : 0));
canvasImage = ArtistryUtil.randomize(canvasImage, x -> x + 2 * 1 * (FastRandom.INSTANCE.random() - 0.5));
// canvasImage = randomize(canvasImage, x -> 10*(FastRandom.INSTANCE.random()-0.5));
// canvasImage = randomize(canvasImage, x -> x*(FastRandom.INSTANCE.random()));
Map<CharSequence, BufferedImage> styleImages = new HashMap<>();
styleImages.clear();
styleImages.putAll(styles.keySet().stream().flatMap(x -> x.stream()).collect(Collectors.toMap(x -> x, file -> ArtistryUtil.load(file))));
StyleTransfer.StyleSetup styleSetup = new StyleTransfer.StyleSetup(precision, ArtistryUtil.load(monkey, canvasImage.getWidth(), canvasImage.getHeight()), contentCoefficients, styleImages, styles);
StyleTransfer.NeuralSetup measureStyle = styleTransfer.measureStyle(styleSetup);
canvasImage = styleTransfer.styleTransfer(server, log, canvasImage, styleSetup, trainingMinutes, measureStyle);
for (int i = 1; i < 10; i++) {
log.h1("Phase " + i);
imageSize = (int) (imageSize * growthFactor);
canvasImage = TestUtil.resize(canvasImage, imageSize, true);
canvasImage = styleTransfer.styleTransfer(server, log, canvasImage, styleSetup, trainingMinutes, measureStyle);
}
log.setFrontMatterProperty("status", "OK");
}
Aggregations