use of com.simiacryptus.mindseye.layers.cudnn.ActivationLayer in project MindsEye by SimiaCryptus.
the class VGG16_HDF5 method phase3a.
/**
* Phase 3 a.
*/
protected void phase3a() {
add(new ConvolutionLayer(1, 1, 4096, 4096).setPaddingXY(0, 0).setAndFree(hdf5.readDataSet("param_0", "layer_34").permuteDimensionsAndFree(fullyconnectedOrder)));
add(new ImgBandBiasLayer(4096).setAndFree((hdf5.readDataSet("param_1", "layer_34"))));
add(new ActivationLayer(ActivationLayer.Mode.RELU));
add(new ConvolutionLayer(1, 1, 4096, 1000).setPaddingXY(0, 0).setAndFree(hdf5.readDataSet("param_0", "layer_36").permuteDimensionsAndFree(fullyconnectedOrder)));
add(new ImgBandBiasLayer(1000).setAndFree((hdf5.readDataSet("param_1", "layer_36"))));
}
use of com.simiacryptus.mindseye.layers.cudnn.ActivationLayer in project MindsEye by SimiaCryptus.
the class RecursiveSubspaceTest method buildModel.
@Override
public DAGNetwork buildModel(@Nonnull NotebookOutput log) {
log.h3("Model");
log.p("We use a multi-level convolution network");
return log.code(() -> {
@Nonnull final PipelineNetwork network = new PipelineNetwork();
double weight = 1e-3;
@Nonnull DoubleSupplier init = () -> weight * (Math.random() - 0.5);
network.add(new ConvolutionLayer(3, 3, 1, 5).set(init));
network.add(new ImgBandBiasLayer(5));
network.add(new PoolingLayer().setMode(PoolingLayer.PoolingMode.Max));
network.add(new ActivationLayer(ActivationLayer.Mode.RELU));
network.add(newNormalizationLayer());
network.add(new ConvolutionLayer(3, 3, 5, 5).set(init));
network.add(new ImgBandBiasLayer(5));
network.add(new PoolingLayer().setMode(PoolingLayer.PoolingMode.Max));
network.add(new ActivationLayer(ActivationLayer.Mode.RELU));
network.add(newNormalizationLayer());
network.add(new BiasLayer(7, 7, 5));
network.add(new FullyConnectedLayer(new int[] { 7, 7, 5 }, new int[] { 10 }).set(init));
network.add(new SoftmaxActivationLayer());
return network;
});
}
use of com.simiacryptus.mindseye.layers.cudnn.ActivationLayer in project MindsEye by SimiaCryptus.
the class ImageClassifier method deepDream.
/**
* Deep dream.
*
* @param log the log
* @param image the image
*/
public void deepDream(@Nonnull final NotebookOutput log, final Tensor image) {
log.code(() -> {
@Nonnull ArrayList<StepRecord> history = new ArrayList<>();
@Nonnull PipelineNetwork clamp = new PipelineNetwork(1);
clamp.add(new ActivationLayer(ActivationLayer.Mode.RELU));
clamp.add(new LinearActivationLayer().setBias(255).setScale(-1).freeze());
clamp.add(new ActivationLayer(ActivationLayer.Mode.RELU));
clamp.add(new LinearActivationLayer().setBias(255).setScale(-1).freeze());
@Nonnull PipelineNetwork supervised = new PipelineNetwork(1);
supervised.add(getNetwork().freeze(), supervised.wrap(clamp, supervised.getInput(0)));
// CudaTensorList gpuInput = CudnnHandle.apply(gpu -> {
// Precision precision = Precision.Float;
// return CudaTensorList.wrap(gpu.getPtr(TensorArray.wrap(image), precision, MemoryType.Managed), 1, image.getDimensions(), precision);
// });
// @Nonnull Trainable trainable = new TensorListTrainable(supervised, gpuInput).setVerbosity(1).setMask(true);
@Nonnull Trainable trainable = new ArrayTrainable(supervised, 1).setVerbose(true).setMask(true, false).setData(Arrays.<Tensor[]>asList(new Tensor[] { image }));
new IterativeTrainer(trainable).setMonitor(getTrainingMonitor(history, supervised)).setOrientation(new QQN()).setLineSearchFactory(name -> new ArmijoWolfeSearch()).setTimeout(60, TimeUnit.MINUTES).runAndFree();
return TestUtil.plot(history);
});
}
use of com.simiacryptus.mindseye.layers.cudnn.ActivationLayer in project MindsEye by SimiaCryptus.
the class ImageClassifier method deepDream.
/**
* Deep dream.
*
* @param log the log
* @param image the image
* @param targetCategoryIndex the target category index
* @param totalCategories the total categories
* @param config the config
* @param network the network
* @param lossLayer the loss layer
* @param targetValue the target value
*/
public void deepDream(@Nonnull final NotebookOutput log, final Tensor image, final int targetCategoryIndex, final int totalCategories, Function<IterativeTrainer, IterativeTrainer> config, final Layer network, final Layer lossLayer, final double targetValue) {
@Nonnull List<Tensor[]> data = Arrays.<Tensor[]>asList(new Tensor[] { image, new Tensor(1, 1, totalCategories).set(targetCategoryIndex, targetValue) });
log.code(() -> {
for (Tensor[] tensors : data) {
ImageClassifier.log.info(log.image(tensors[0].toImage(), "") + tensors[1]);
}
});
log.code(() -> {
@Nonnull ArrayList<StepRecord> history = new ArrayList<>();
@Nonnull PipelineNetwork clamp = new PipelineNetwork(1);
clamp.add(new ActivationLayer(ActivationLayer.Mode.RELU));
clamp.add(new LinearActivationLayer().setBias(255).setScale(-1).freeze());
clamp.add(new ActivationLayer(ActivationLayer.Mode.RELU));
clamp.add(new LinearActivationLayer().setBias(255).setScale(-1).freeze());
@Nonnull PipelineNetwork supervised = new PipelineNetwork(2);
supervised.wrap(lossLayer, supervised.add(network.freeze(), supervised.wrap(clamp, supervised.getInput(0))), supervised.getInput(1));
// TensorList[] gpuInput = data.stream().map(data1 -> {
// return CudnnHandle.apply(gpu -> {
// Precision precision = Precision.Float;
// return CudaTensorList.wrap(gpu.getPtr(TensorArray.wrap(data1), precision, MemoryType.Managed), 1, image.getDimensions(), precision);
// });
// }).toArray(i -> new TensorList[i]);
// @Nonnull Trainable trainable = new TensorListTrainable(supervised, gpuInput).setVerbosity(1).setMask(true);
@Nonnull Trainable trainable = new ArrayTrainable(supervised, 1).setVerbose(true).setMask(true, false).setData(data);
config.apply(new IterativeTrainer(trainable).setMonitor(getTrainingMonitor(history, supervised)).setOrientation(new QQN()).setLineSearchFactory(name -> new ArmijoWolfeSearch()).setTimeout(60, TimeUnit.MINUTES)).setTerminateThreshold(Double.NEGATIVE_INFINITY).runAndFree();
return TestUtil.plot(history);
});
}
use of com.simiacryptus.mindseye.layers.cudnn.ActivationLayer in project MindsEye by SimiaCryptus.
the class ArtistryUtil method getClamp.
/**
* Gets clamp.
*
* @param max the max
* @return the clamp
*/
@Nonnull
public static PipelineNetwork getClamp(final int max) {
@Nonnull PipelineNetwork clamp = new PipelineNetwork(1);
clamp.add(new ActivationLayer(ActivationLayer.Mode.RELU));
clamp.add(new LinearActivationLayer().setBias(max).setScale(-1).freeze());
clamp.add(new ActivationLayer(ActivationLayer.Mode.RELU));
clamp.add(new LinearActivationLayer().setBias(max).setScale(-1).freeze());
return clamp;
}
Aggregations