use of com.simiacryptus.mindseye.lang.TensorList in project MindsEye by SimiaCryptus.
the class CudaLayerTester method compareOutput.
/**
* Compare output tolerance statistics.
*
* @param expectedOutput the expected output
* @param actualOutput the actual output
* @return the tolerance statistics
*/
@Nonnull
public ToleranceStatistics compareOutput(final TensorList expectedOutput, final TensorList actualOutput) {
@Nonnull final ToleranceStatistics outputAgreement = IntStream.range(0, getBatchSize()).mapToObj(batch -> {
Tensor a = expectedOutput.get(batch);
Tensor b = actualOutput.get(batch);
ToleranceStatistics statistics = new ToleranceStatistics().accumulate(a.getData(), b.getData());
a.freeRef();
b.freeRef();
return statistics;
}).reduce((a, b) -> a.combine(b)).get();
if (!(outputAgreement.absoluteTol.getMax() < tolerance)) {
logger.info("Expected Output: " + expectedOutput.stream().map(x -> {
String str = x.prettyPrint();
x.freeRef();
return str;
}).collect(Collectors.toList()));
logger.info("Actual Output: " + actualOutput.stream().map(x -> {
String str = x.prettyPrint();
x.freeRef();
return str;
}).collect(Collectors.toList()));
throw new AssertionError("Output Corrupt: " + outputAgreement);
}
return outputAgreement;
}
use of com.simiacryptus.mindseye.lang.TensorList in project MindsEye by SimiaCryptus.
the class CudaLayerTester method testInterGpu.
/**
* Test inter gpu tolerance statistics.
*
* @param log the log
* @param reference the reference
* @param inputPrototype the input prototype
* @return the tolerance statistics
*/
@Nonnull
public ToleranceStatistics testInterGpu(final NotebookOutput log, @Nullable final Layer reference, @Nonnull final Tensor[] inputPrototype) {
log.h2("Multi-GPU Compatibility");
log.p("This layer should be able to apply using a GPU context other than the one used to create the inputs.");
return log.code(() -> {
final TensorList[] heapInput = Arrays.stream(inputPrototype).map(t -> TensorArray.wrap(IntStream.range(0, getBatchSize()).mapToObj(i -> t.map(v -> getRandom())).toArray(i -> new Tensor[i]))).toArray(i -> new TensorList[i]);
logger.info("Input: " + Arrays.stream(heapInput).flatMap(x -> x.stream()).map(tensor -> {
String prettyPrint = tensor.prettyPrint();
tensor.freeRef();
return prettyPrint;
}).collect(Collectors.toList()));
TensorList[] gpuInput = CudaSystem.run(gpu -> {
return Arrays.stream(heapInput).map(original -> {
return CudaTensorList.wrap(gpu.getTensor(original, Precision.Double, MemoryType.Managed, false), original.length(), original.getDimensions(), Precision.Double);
}).toArray(i -> new TensorList[i]);
}, 0);
@Nonnull final SimpleResult fromHeap = CudaSystem.run(gpu -> {
return SimpleGpuEval.run(reference, gpu, heapInput);
}, 1);
@Nonnull final SimpleResult fromGPU = CudaSystem.run(gpu -> {
return SimpleGpuEval.run(reference, gpu, gpuInput);
}, 1);
try {
ToleranceStatistics compareOutput = compareOutput(fromHeap, fromGPU);
ToleranceStatistics compareDerivatives = compareDerivatives(fromHeap, fromGPU);
return compareDerivatives.combine(compareOutput);
} finally {
Arrays.stream(gpuInput).forEach(ReferenceCounting::freeRef);
Arrays.stream(heapInput).forEach(x -> x.freeRef());
fromGPU.freeRef();
fromHeap.freeRef();
}
});
}
use of com.simiacryptus.mindseye.lang.TensorList in project MindsEye by SimiaCryptus.
the class CudaLayerTester method compareInputDerivatives.
/**
* Compare input derivatives tolerance statistics.
*
* @param expected the expected
* @param actual the actual
* @return the tolerance statistics
*/
@Nonnull
public ToleranceStatistics compareInputDerivatives(final SimpleResult expected, final SimpleResult actual) {
@Nonnull final ToleranceStatistics derivativeAgreement = IntStream.range(0, getBatchSize()).mapToObj(batch -> {
@Nonnull IntFunction<ToleranceStatistics> compareInputDerivative = input -> {
Tensor b = actual.getInputDerivative()[input].get(batch);
Tensor a = expected.getInputDerivative()[input].get(batch);
ToleranceStatistics statistics = new ToleranceStatistics().accumulate(a.getData(), b.getData());
a.freeRef();
b.freeRef();
return statistics;
};
return IntStream.range(0, expected.getOutput().length()).mapToObj(compareInputDerivative).reduce((a, b) -> a.combine(b)).get();
}).reduce((a, b) -> a.combine(b)).get();
if (!(derivativeAgreement.absoluteTol.getMax() < tolerance)) {
logger.info("Expected Derivative: " + Arrays.stream(expected.getInputDerivative()).flatMap(TensorList::stream).map(x -> {
String str = x.prettyPrint();
x.freeRef();
return str;
}).collect(Collectors.toList()));
logger.info("Actual Derivative: " + Arrays.stream(actual.getInputDerivative()).flatMap(TensorList::stream).map(x -> {
String str = x.prettyPrint();
x.freeRef();
return str;
}).collect(Collectors.toList()));
throw new AssertionError("Input Derivatives Corrupt: " + derivativeAgreement);
}
return derivativeAgreement;
}
use of com.simiacryptus.mindseye.lang.TensorList in project MindsEye by SimiaCryptus.
the class TrainingTester method testInputLearning.
/**
* Test input learning.
*
* @param log the log
* @param component the component
* @param random the randomize
* @param inputPrototype the input prototype
* @return the apply result
*/
public TestResult testInputLearning(@Nonnull final NotebookOutput log, @Nonnull final Layer component, final Random random, @Nonnull final Tensor[] inputPrototype) {
@Nonnull final Layer network = shuffle(random, component.copy()).freeze();
final Tensor[][] input_target = shuffleCopy(random, inputPrototype);
log.p("In this apply, we use a network to learn this target input, given it's pre-evaluated output:");
log.code(() -> {
return Arrays.stream(input_target).flatMap(x -> Arrays.stream(x)).map(x -> x.prettyPrint()).reduce((a, b) -> a + "\n" + b).orElse("");
});
Result[] array = ConstantResult.batchResultArray(input_target);
@Nullable Result eval = network.eval(array);
TensorList result = eval.getData();
final Tensor[] output_target = result.stream().toArray(i -> new Tensor[i]);
result.freeRef();
eval.freeRef();
if (output_target.length != getBatches()) {
logger.info(String.format("Meta layers not supported. %d != %d", output_target.length, getBatches()));
return null;
}
for (@Nonnull Result nnResult : array) {
nnResult.getData().freeRef();
nnResult.freeRef();
}
for (@Nonnull Tensor[] tensors : input_target) {
for (@Nonnull Tensor tensor : tensors) {
tensor.freeRef();
}
}
// if (output_target.length != inputPrototype.length) return null;
Tensor[][] trainingInput = append(shuffleCopy(random, inputPrototype), output_target);
@Nonnull TestResult testResult = trainAll("Input Convergence", log, trainingInput, network, buildMask(inputPrototype.length));
Arrays.stream(trainingInput).flatMap(x -> Arrays.stream(x)).forEach(x -> x.freeRef());
return testResult;
}
use of com.simiacryptus.mindseye.lang.TensorList in project MindsEye by SimiaCryptus.
the class TrainingTester method testCompleteLearning.
/**
* Test complete learning apply result.
*
* @param log the log
* @param component the component
* @param random the random
* @param inputPrototype the input prototype
* @return the apply result
*/
@Nonnull
public TestResult testCompleteLearning(@Nonnull final NotebookOutput log, @Nonnull final Layer component, final Random random, @Nonnull final Tensor[] inputPrototype) {
@Nonnull final Layer network_target = shuffle(random, component.copy()).freeze();
final Tensor[][] input_target = shuffleCopy(random, inputPrototype);
log.p("In this apply, attempt to train a network to emulate a randomized network given an example input/output. The target state is:");
log.code(() -> {
return network_target.state().stream().map(Arrays::toString).reduce((a, b) -> a + "\n" + b).orElse("");
});
log.p("We simultaneously regress this target input:");
log.code(() -> {
return Arrays.stream(input_target).flatMap(x -> Arrays.stream(x)).map(x -> x.prettyPrint()).reduce((a, b) -> a + "\n" + b).orElse("");
});
log.p("Which produces the following output:");
Result[] inputs = ConstantResult.batchResultArray(input_target);
Result eval = network_target.eval(inputs);
network_target.freeRef();
Arrays.stream(inputs).forEach(ReferenceCounting::freeRef);
TensorList result = eval.getData();
eval.freeRef();
final Tensor[] output_target = result.stream().toArray(i -> new Tensor[i]);
log.code(() -> {
return Stream.of(output_target).map(x -> x.prettyPrint()).reduce((a, b) -> a + "\n" + b).orElse("");
});
// if (output_target.length != inputPrototype.length) return null;
return trainAll("Integrated Convergence", log, append(shuffleCopy(random, inputPrototype), output_target), shuffle(random, component.copy()), buildMask(inputPrototype.length));
}
Aggregations