use of com.simiacryptus.mindseye.lang.Layer in project MindsEye by SimiaCryptus.
the class ImgTileAssemblyLayer method eval.
@Nonnull
@Override
public Result eval(@Nonnull final Result... inObj) {
Arrays.stream(inObj).forEach(nnResult -> nnResult.addRef());
assert 3 == inObj[0].getData().getDimensions().length;
int[] outputDims = getOutputDims(inObj);
return new Result(TensorArray.wrap(IntStream.range(0, inObj[0].getData().length()).parallel().mapToObj(dataIndex -> {
@Nonnull final Tensor outputData = new Tensor(outputDims);
int totalWidth = 0;
int totalHeight = 0;
int inputIndex = 0;
for (int row = 0; row < rows; row++) {
int positionX = 0;
int rowHeight = 0;
for (int col = 0; col < columns; col++) {
TensorList tileTensor = inObj[inputIndex].getData();
int[] tileDimensions = tileTensor.getDimensions();
rowHeight = Math.max(rowHeight, tileDimensions[1]);
Tensor inputData = tileTensor.get(dataIndex);
ImgTileAssemblyLayer.copy(inputData, outputData, positionX, totalHeight);
inputData.freeRef();
positionX += tileDimensions[0];
inputIndex += 1;
}
totalHeight += rowHeight;
totalWidth = Math.max(totalWidth, positionX);
}
return outputData;
}).toArray(i -> new Tensor[i])), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList delta) -> {
int totalHeight = 0;
int inputIndex = 0;
for (int row = 0; row < rows; row++) {
int positionX = 0;
int rowHeight = 0;
for (int col = 0; col < columns; col++) {
Result in = inObj[inputIndex];
int[] inputDataDimensions = in.getData().getDimensions();
rowHeight = Math.max(rowHeight, inputDataDimensions[1]);
if (in.isAlive()) {
int _positionX = positionX;
int _totalHeight = totalHeight;
@Nonnull TensorArray tensorArray = TensorArray.wrap(IntStream.range(0, delta.length()).parallel().mapToObj(dataIndex -> {
@Nullable final Tensor deltaTensor = delta.get(dataIndex);
@Nonnull final Tensor passbackTensor = new Tensor(inputDataDimensions);
ImgTileAssemblyLayer.copy(deltaTensor, passbackTensor, -_positionX, -_totalHeight);
deltaTensor.freeRef();
return passbackTensor;
}).toArray(i -> new Tensor[i]));
in.accumulate(buffer, tensorArray);
}
positionX += inputDataDimensions[0];
inputIndex += 1;
}
totalHeight += rowHeight;
}
}) {
@Override
protected void _free() {
Arrays.stream(inObj).forEach(nnResult -> nnResult.freeRef());
}
@Override
public boolean isAlive() {
return inObj[0].isAlive() || !isFrozen();
}
};
}
use of com.simiacryptus.mindseye.lang.Layer in project MindsEye by SimiaCryptus.
the class L1NormalizationLayer method eval.
@Nonnull
@Override
public Result eval(@Nonnull final Result... input) {
Arrays.stream(input).forEach(nnResult -> nnResult.addRef());
final Result in = input[0];
final TensorList inData = in.getData();
inData.addRef();
return new Result(TensorArray.wrap(IntStream.range(0, inData.length()).mapToObj(dataIndex -> {
@Nullable final Tensor value = inData.get(dataIndex);
try {
final double sum = value.sum();
if (!Double.isFinite(sum) || 0 == sum) {
value.addRef();
return value;
} else {
return value.scale(1.0 / sum);
}
} finally {
value.freeRef();
}
}).toArray(i -> new Tensor[i])), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList outDelta) -> {
if (in.isAlive()) {
final Tensor[] passbackArray = IntStream.range(0, outDelta.length()).mapToObj(dataIndex -> {
Tensor inputTensor = inData.get(dataIndex);
@Nullable final double[] value = inputTensor.getData();
Tensor outputTensor = outDelta.get(dataIndex);
@Nullable final double[] delta = outputTensor.getData();
final double dot = ArrayUtil.dot(value, delta);
final double sum = Arrays.stream(value).sum();
@Nonnull final Tensor passback = new Tensor(outputTensor.getDimensions());
@Nullable final double[] passbackData = passback.getData();
if (0 != sum || Double.isFinite(sum)) {
for (int i = 0; i < value.length; i++) {
passbackData[i] = (delta[i] - dot / sum) / sum;
}
}
outputTensor.freeRef();
inputTensor.freeRef();
return passback;
}).toArray(i -> new Tensor[i]);
assert Arrays.stream(passbackArray).flatMapToDouble(x -> Arrays.stream(x.getData())).allMatch(v -> Double.isFinite(v));
@Nonnull TensorArray tensorArray = TensorArray.wrap(passbackArray);
in.accumulate(buffer, tensorArray);
}
}) {
@Override
protected void _free() {
inData.freeRef();
Arrays.stream(input).forEach(nnResult -> nnResult.freeRef());
}
@Override
public boolean isAlive() {
return in.isAlive();
}
};
}
use of com.simiacryptus.mindseye.lang.Layer in project MindsEye by SimiaCryptus.
the class DAGNetwork method visitLayers.
/**
* Visit layers.
*
* @param visitor the visitor
*/
public void visitLayers(@Nonnull final Consumer<Layer> visitor) {
layersById.values().forEach(layer -> {
Layer unwrapped = layer;
while (unwrapped instanceof WrapperLayer) {
unwrapped = ((WrapperLayer) unwrapped).getInner();
}
if (unwrapped instanceof DAGNetwork) {
((DAGNetwork) unwrapped).visitLayers(visitor);
}
visitor.accept(layer);
while (layer instanceof WrapperLayer) {
Layer inner = ((WrapperLayer) layer).getInner();
visitor.accept(inner);
layer = inner;
}
});
}
use of com.simiacryptus.mindseye.lang.Layer in project MindsEye by SimiaCryptus.
the class DAGNetwork method add.
/**
* Add dag node.
*
* @param label the label
* @param layer the layer
* @param head the head
* @return the dag node
*/
public InnerNode add(@Nullable final CharSequence label, @Nonnull final Layer layer, final DAGNode... head) {
assertAlive();
assertConsistent();
assert null != getInput();
@Nonnull final InnerNode node = new InnerNode(this, layer, head);
synchronized (layersById) {
if (!layersById.containsKey(layer.getId())) {
Layer replaced = layersById.put(layer.getId(), layer);
layer.addRef();
if (null != replaced)
replaced.freeRef();
}
}
DAGNode replaced = nodesById.put(node.getId(), node);
if (null != replaced)
replaced.freeRef();
if (null != label) {
labels.put(label, node.getId());
}
assertConsistent();
return node;
}
use of com.simiacryptus.mindseye.lang.Layer in project MindsEye by SimiaCryptus.
the class LinearSumConstraintTest method train.
@Override
public void train(@Nonnull final NotebookOutput log, @Nonnull final Layer network, @Nonnull final Tensor[][] trainingData, final TrainingMonitor monitor) {
log.code(() -> {
@Nonnull final SimpleLossNetwork supervisedNetwork = new SimpleLossNetwork(network, new EntropyLossLayer());
@Nonnull final Trainable trainable = new SampledArrayTrainable(trainingData, supervisedNetwork, 10000);
@Nonnull final TrustRegionStrategy trustRegionStrategy = new TrustRegionStrategy() {
@Override
public TrustRegion getRegionPolicy(final Layer layer) {
return new LinearSumConstraint();
}
};
return new IterativeTrainer(trainable).setIterationsPerSample(100).setMonitor(monitor).setOrientation(trustRegionStrategy).setTimeout(3, TimeUnit.MINUTES).setMaxIterations(500).runAndFree();
});
}
Aggregations