use of com.simiacryptus.util.io.NotebookOutput in project MindsEye by SimiaCryptus.
the class StyleTransfer method styleTransfer.
/**
* Style transfer buffered image.
*
* @param server the server
* @param log the log
* @param canvasImage the canvas image
* @param styleParameters the style parameters
* @param trainingMinutes the training minutes
* @param measureStyle the measure style
* @return the buffered image
*/
public BufferedImage styleTransfer(final StreamNanoHTTPD server, @Nonnull final NotebookOutput log, final BufferedImage canvasImage, final StyleSetup<T> styleParameters, final int trainingMinutes, final NeuralSetup measureStyle) {
BufferedImage result = ArtistryUtil.logExceptionWithDefault(log, () -> {
log.p("Input Content:");
log.p(log.image(styleParameters.contentImage, "Content Image"));
log.p("Style Content:");
styleParameters.styleImages.forEach((file, styleImage) -> {
log.p(log.image(styleImage, file));
});
log.p("Input Canvas:");
log.p(log.image(canvasImage, "Input Canvas"));
System.gc();
Tensor canvas = Tensor.fromRGB(canvasImage);
TestUtil.monitorImage(canvas, false, false);
log.p("Input Parameters:");
log.code(() -> {
return ArtistryUtil.toJson(styleParameters);
});
Trainable trainable = log.code(() -> {
PipelineNetwork network = fitnessNetwork(measureStyle);
network.setFrozen(true);
ArtistryUtil.setPrecision(network, styleParameters.precision);
TestUtil.instrumentPerformance(network);
if (null != server)
ArtistryUtil.addLayersHandler(network, server);
return new ArrayTrainable(network, 1).setVerbose(true).setMask(true).setData(Arrays.asList(new Tensor[][] { { canvas } }));
});
log.code(() -> {
@Nonnull ArrayList<StepRecord> history = new ArrayList<>();
new IterativeTrainer(trainable).setMonitor(TestUtil.getMonitor(history)).setOrientation(new TrustRegionStrategy() {
@Override
public TrustRegion getRegionPolicy(final Layer layer) {
return new RangeConstraint().setMin(1e-2).setMax(256);
}
}).setIterationsPerSample(100).setLineSearchFactory(name -> new BisectionSearch().setSpanTol(1e-1).setCurrentRate(1e6)).setTimeout(trainingMinutes, TimeUnit.MINUTES).setTerminateThreshold(Double.NEGATIVE_INFINITY).runAndFree();
return TestUtil.plot(history);
});
return canvas.toImage();
}, canvasImage);
log.p("Output Canvas:");
log.p(log.image(result, "Output Canvas"));
return result;
}
use of com.simiacryptus.util.io.NotebookOutput in project MindsEye by SimiaCryptus.
the class CudaLayerTester method testNonstandardBoundsBackprop.
/**
* Test nonstandard bounds backprop tolerance statistics.
*
* @param log the log
* @param layer the layer
* @param inputPrototype the input prototype
* @return the tolerance statistics
*/
@Nonnull
public ToleranceStatistics testNonstandardBoundsBackprop(final NotebookOutput log, @Nullable final Layer layer, @Nonnull final Tensor[] inputPrototype) {
log.h2("Irregular Backprop");
log.p("This layer should accept non-dense tensors as delta input.");
return log.code(() -> {
Tensor[] randomized = Arrays.stream(inputPrototype).map(x -> x.map(v -> getRandom())).toArray(i -> new Tensor[i]);
logger.info("Input: " + Arrays.stream(randomized).map(Tensor::prettyPrint).collect(Collectors.toList()));
Precision precision = Precision.Double;
TensorList[] controlInput = Arrays.stream(randomized).map(original -> {
return TensorArray.wrap(original);
}).toArray(i -> new TensorList[i]);
@Nonnull final SimpleResult testResult = CudaSystem.run(gpu -> {
TensorList[] copy = copy(controlInput);
SimpleResult result = new SimpleGpuEval(layer, gpu, copy) {
@Nonnull
@Override
public TensorList getFeedback(@Nonnull final TensorList original) {
Tensor originalTensor = original.get(0).mapAndFree(x -> 1);
CudaTensorList cudaTensorList = buildIrregularCudaTensor(gpu, precision, originalTensor);
originalTensor.freeRef();
return cudaTensorList;
}
}.call();
Arrays.stream(copy).forEach(ReferenceCounting::freeRef);
return result;
});
@Nonnull final SimpleResult controlResult = CudaSystem.run(gpu -> {
TensorList[] copy = copy(controlInput);
SimpleResult result = SimpleGpuEval.run(layer, gpu, copy);
Arrays.stream(copy).forEach(ReferenceCounting::freeRef);
return result;
}, 1);
try {
ToleranceStatistics compareOutput = compareOutput(controlResult, testResult);
ToleranceStatistics compareDerivatives = compareDerivatives(controlResult, testResult);
return compareDerivatives.combine(compareOutput);
} finally {
Arrays.stream(controlInput).forEach(ReferenceCounting::freeRef);
controlResult.freeRef();
testResult.freeRef();
}
});
}
use of com.simiacryptus.util.io.NotebookOutput in project MindsEye by SimiaCryptus.
the class CudaLayerTester method testInterGpu.
/**
* Test inter gpu tolerance statistics.
*
* @param log the log
* @param reference the reference
* @param inputPrototype the input prototype
* @return the tolerance statistics
*/
@Nonnull
public ToleranceStatistics testInterGpu(final NotebookOutput log, @Nullable final Layer reference, @Nonnull final Tensor[] inputPrototype) {
log.h2("Multi-GPU Compatibility");
log.p("This layer should be able to apply using a GPU context other than the one used to create the inputs.");
return log.code(() -> {
final TensorList[] heapInput = Arrays.stream(inputPrototype).map(t -> TensorArray.wrap(IntStream.range(0, getBatchSize()).mapToObj(i -> t.map(v -> getRandom())).toArray(i -> new Tensor[i]))).toArray(i -> new TensorList[i]);
logger.info("Input: " + Arrays.stream(heapInput).flatMap(x -> x.stream()).map(tensor -> {
String prettyPrint = tensor.prettyPrint();
tensor.freeRef();
return prettyPrint;
}).collect(Collectors.toList()));
TensorList[] gpuInput = CudaSystem.run(gpu -> {
return Arrays.stream(heapInput).map(original -> {
return CudaTensorList.wrap(gpu.getTensor(original, Precision.Double, MemoryType.Managed, false), original.length(), original.getDimensions(), Precision.Double);
}).toArray(i -> new TensorList[i]);
}, 0);
@Nonnull final SimpleResult fromHeap = CudaSystem.run(gpu -> {
return SimpleGpuEval.run(reference, gpu, heapInput);
}, 1);
@Nonnull final SimpleResult fromGPU = CudaSystem.run(gpu -> {
return SimpleGpuEval.run(reference, gpu, gpuInput);
}, 1);
try {
ToleranceStatistics compareOutput = compareOutput(fromHeap, fromGPU);
ToleranceStatistics compareDerivatives = compareDerivatives(fromHeap, fromGPU);
return compareDerivatives.combine(compareOutput);
} finally {
Arrays.stream(gpuInput).forEach(ReferenceCounting::freeRef);
Arrays.stream(heapInput).forEach(x -> x.freeRef());
fromGPU.freeRef();
fromHeap.freeRef();
}
});
}
use of com.simiacryptus.util.io.NotebookOutput in project MindsEye by SimiaCryptus.
the class SerializationTest method test.
@Nullable
@Override
public ToleranceStatistics test(@Nonnull final NotebookOutput log, @Nonnull final Layer layer, final Tensor... inputPrototype) {
log.h1("Serialization");
log.p("This apply will demonstrate the layer's JSON serialization, and verify deserialization integrity.");
String prettyPrint = "";
log.h2("Raw Json");
try {
prettyPrint = log.code(() -> {
final JsonObject json = layer.getJson();
@Nonnull final Layer echo = Layer.fromJson(json);
if (echo == null)
throw new AssertionError("Failed to deserialize");
if (layer == echo)
throw new AssertionError("Serialization did not copy");
if (!layer.equals(echo))
throw new AssertionError("Serialization not equal");
echo.freeRef();
return new GsonBuilder().setPrettyPrinting().create().toJson(json);
});
@Nonnull String filename = layer.getClass().getSimpleName() + "_" + log.getName() + ".json";
log.p(log.file(prettyPrint, filename, String.format("Wrote Model to %s; %s characters", filename, prettyPrint.length())));
} catch (RuntimeException e) {
e.printStackTrace();
Util.sleep(1000);
} catch (OutOfMemoryError e) {
e.printStackTrace();
Util.sleep(1000);
}
log.p("");
@Nonnull Object outSync = new Object();
if (prettyPrint.isEmpty() || prettyPrint.length() > 1024 * 64)
Arrays.stream(SerialPrecision.values()).parallel().forEach(precision -> {
try {
@Nonnull File file = new File(log.getResourceDir(), log.getName() + "_" + precision.name() + ".zip");
layer.writeZip(file, precision);
@Nonnull final Layer echo = Layer.fromZip(new ZipFile(file));
getModels().put(precision, echo);
synchronized (outSync) {
log.h2(String.format("Zipfile %s", precision.name()));
log.p(log.link(file, String.format("Wrote Model apply %s precision to %s; %.3fMiB bytes", precision, file.getName(), file.length() * 1.0 / (0x100000))));
}
if (!isPersist())
file.delete();
if (echo == null)
throw new AssertionError("Failed to deserialize");
if (layer == echo)
throw new AssertionError("Serialization did not copy");
if (!layer.equals(echo))
throw new AssertionError("Serialization not equal");
} catch (RuntimeException e) {
e.printStackTrace();
} catch (OutOfMemoryError e) {
e.printStackTrace();
} catch (ZipException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
});
return null;
}
use of com.simiacryptus.util.io.NotebookOutput in project MindsEye by SimiaCryptus.
the class StandardLayerTests method run.
/**
* Test.
*
* @param log the log
*/
public void run(@Nonnull final NotebookOutput log) {
long seed = (long) (Math.random() * Long.MAX_VALUE);
int[][] smallDims = getSmallDims(new Random(seed));
final Layer smallLayer = getLayer(smallDims, new Random(seed));
int[][] largeDims = getLargeDims(new Random(seed));
final Layer largeLayer = getLayer(largeDims, new Random(seed));
try {
if (smallLayer instanceof DAGNetwork) {
try {
log.h1("Network Diagram");
log.p("This is a network apply the following layout:");
log.code(() -> {
return Graphviz.fromGraph(TestUtil.toGraph((DAGNetwork) smallLayer)).height(400).width(600).render(Format.PNG).toImage();
});
} catch (Throwable e) {
logger.info("Error plotting graph", e);
}
} else if (smallLayer instanceof Explodable) {
try {
Layer explode = ((Explodable) smallLayer).explode();
if (explode instanceof DAGNetwork) {
log.h1("Exploded Network Diagram");
log.p("This is a network apply the following layout:");
@Nonnull DAGNetwork network = (DAGNetwork) explode;
log.code(() -> {
@Nonnull Graphviz graphviz = Graphviz.fromGraph(TestUtil.toGraph(network)).height(400).width(600);
@Nonnull File file = new File(log.getResourceDir(), log.getName() + "_network.svg");
graphviz.render(Format.SVG_STANDALONE).toFile(file);
log.link(file, "Saved to File");
return graphviz.render(Format.SVG).toString();
});
}
} catch (Throwable e) {
logger.info("Error plotting graph", e);
}
}
@Nonnull ArrayList<TestError> exceptions = standardTests(log, seed);
if (!exceptions.isEmpty()) {
if (smallLayer instanceof DAGNetwork) {
for (@Nonnull Invocation invocation : getInvocations(smallLayer, smallDims)) {
log.h1("Small SubTests: " + invocation.getLayer().getClass().getSimpleName());
log.p(Arrays.deepToString(invocation.getDims()));
tests(log, getLittleTests(), invocation, exceptions);
invocation.freeRef();
}
}
if (largeLayer instanceof DAGNetwork) {
testEquivalency = false;
for (@Nonnull Invocation invocation : getInvocations(largeLayer, largeDims)) {
log.h1("Large SubTests: " + invocation.getLayer().getClass().getSimpleName());
log.p(Arrays.deepToString(invocation.getDims()));
tests(log, getBigTests(), invocation, exceptions);
invocation.freeRef();
}
}
}
log.code(() -> {
throwException(exceptions);
});
} finally {
smallLayer.freeRef();
largeLayer.freeRef();
}
getFinalTests().stream().filter(x -> null != x).forEach(test -> {
final Layer perfLayer;
perfLayer = getLayer(largeDims, new Random(seed));
perfLayer.assertAlive();
@Nonnull Layer copy;
copy = perfLayer.copy();
Tensor[] randomize = randomize(largeDims);
try {
test.test(log, copy, randomize);
} finally {
test.freeRef();
for (@Nonnull Tensor tensor : randomize) {
tensor.freeRef();
}
perfLayer.freeRef();
copy.freeRef();
}
});
}
Aggregations