use of com.simiacryptus.util.lang.Tuple2 in project MindsEye by SimiaCryptus.
the class MaxPoolingLayer method eval.
@Nonnull
@Override
public Result eval(@Nonnull final Result... inObj) {
Arrays.stream(inObj).forEach(nnResult -> nnResult.addRef());
final Result in = inObj[0];
in.getData().length();
@Nonnull final int[] inputDims = in.getData().getDimensions();
final List<Tuple2<Integer, int[]>> regions = MaxPoolingLayer.calcRegionsCache.apply(new MaxPoolingLayer.CalcRegionsParameter(inputDims, kernelDims));
final Tensor[] outputA = IntStream.range(0, in.getData().length()).mapToObj(dataIndex -> {
final int[] newDims = IntStream.range(0, inputDims.length).map(i -> {
return (int) Math.ceil(inputDims[i] * 1.0 / kernelDims[i]);
}).toArray();
@Nonnull final Tensor output = new Tensor(newDims);
return output;
}).toArray(i -> new Tensor[i]);
Arrays.stream(outputA).mapToInt(x -> x.length()).sum();
@Nonnull final int[][] gradientMapA = new int[in.getData().length()][];
IntStream.range(0, in.getData().length()).forEach(dataIndex -> {
@Nullable final Tensor input = in.getData().get(dataIndex);
final Tensor output = outputA[dataIndex];
@Nonnull final IntToDoubleFunction keyExtractor = inputCoords -> input.get(inputCoords);
@Nonnull final int[] gradientMap = new int[input.length()];
regions.parallelStream().forEach(tuple -> {
final Integer from = tuple.getFirst();
final int[] toList = tuple.getSecond();
int toMax = -1;
double bestValue = Double.NEGATIVE_INFINITY;
for (final int c : toList) {
final double value = keyExtractor.applyAsDouble(c);
if (-1 == toMax || bestValue < value) {
bestValue = value;
toMax = c;
}
}
gradientMap[from] = toMax;
output.set(from, input.get(toMax));
});
input.freeRef();
gradientMapA[dataIndex] = gradientMap;
});
return new Result(TensorArray.wrap(outputA), (@Nonnull final DeltaSet<Layer> buffer, @Nonnull final TensorList data) -> {
if (in.isAlive()) {
@Nonnull TensorArray tensorArray = TensorArray.wrap(IntStream.range(0, in.getData().length()).parallel().mapToObj(dataIndex -> {
@Nonnull final Tensor backSignal = new Tensor(inputDims);
final int[] ints = gradientMapA[dataIndex];
@Nullable final Tensor datum = data.get(dataIndex);
for (int i = 0; i < datum.length(); i++) {
backSignal.add(ints[i], datum.get(i));
}
datum.freeRef();
return backSignal;
}).toArray(i -> new Tensor[i]));
in.accumulate(buffer, tensorArray);
}
}) {
@Override
protected void _free() {
Arrays.stream(inObj).forEach(nnResult -> nnResult.freeRef());
}
@Override
public boolean isAlive() {
return in.isAlive();
}
};
}
use of com.simiacryptus.util.lang.Tuple2 in project MindsEye by SimiaCryptus.
the class DeepDream method getContentComponents.
/**
* Gets content components.
*
* @param setup the setup
* @param nodeMap the node map
* @return the content components
*/
@Nonnull
public ArrayList<Tuple2<Double, DAGNode>> getContentComponents(NeuralSetup<T> setup, final Map<T, DAGNode> nodeMap) {
ArrayList<Tuple2<Double, DAGNode>> contentComponents = new ArrayList<>();
for (final T layerType : getLayerTypes()) {
final DAGNode node = nodeMap.get(layerType);
if (setup.style.coefficients.containsKey(layerType)) {
final double coeff_content = setup.style.coefficients.get(layerType).rms;
DAGNetwork network = node.getNetwork();
contentComponents.add(new Tuple2<>(coeff_content, network.wrap(new MeanSqLossLayer(), node, network.wrap(new ValueLayer(setup.contentTarget.content.get(layerType))))));
final double coeff_gain = setup.style.coefficients.get(layerType).gain;
contentComponents.add(new Tuple2<>(-coeff_gain, network.wrap(new AvgReducerLayer(), network.wrap(new SquareActivationLayer(), node))));
}
}
return contentComponents;
}
use of com.simiacryptus.util.lang.Tuple2 in project MindsEye by SimiaCryptus.
the class StyleTransfer method getStyleComponents.
/**
* Gets style components.
*
* @param node the node
* @param network the network
* @param styleParams the style params
* @param mean the mean
* @param covariance the covariance
* @param centeringMode the centering mode
* @return the style components
*/
@Nonnull
public ArrayList<Tuple2<Double, DAGNode>> getStyleComponents(final DAGNode node, final PipelineNetwork network, final LayerStyleParams styleParams, final Tensor mean, final Tensor covariance, final CenteringMode centeringMode) {
ArrayList<Tuple2<Double, DAGNode>> styleComponents = new ArrayList<>();
if (null != styleParams && (styleParams.cov != 0 || styleParams.mean != 0)) {
double meanRms = mean.rms();
double meanScale = 0 == meanRms ? 1 : (1.0 / meanRms);
InnerNode negTarget = network.wrap(new ValueLayer(mean.scale(-1)), new DAGNode[] {});
InnerNode negAvg = network.wrap(new BandAvgReducerLayer().setAlpha(-1), node);
if (styleParams.cov != 0) {
DAGNode recentered;
switch(centeringMode) {
case Origin:
recentered = node;
break;
case Dynamic:
recentered = network.wrap(new GateBiasLayer(), node, negAvg);
break;
case Static:
recentered = network.wrap(new GateBiasLayer(), node, negTarget);
break;
default:
throw new RuntimeException();
}
int[] covDim = covariance.getDimensions();
assert 0 < covDim[2] : Arrays.toString(covDim);
int inputBands = mean.getDimensions()[2];
assert 0 < inputBands : Arrays.toString(mean.getDimensions());
int outputBands = covDim[2] / inputBands;
assert 0 < outputBands : Arrays.toString(covDim) + " / " + inputBands;
double covRms = covariance.rms();
double covScale = 0 == covRms ? 1 : (1.0 / covRms);
styleComponents.add(new Tuple2<>(styleParams.cov, network.wrap(new MeanSqLossLayer().setAlpha(covScale), network.wrap(new ValueLayer(covariance), new DAGNode[] {}), network.wrap(ArtistryUtil.wrapTilesAvg(new GramianLayer()), recentered))));
}
if (styleParams.mean != 0) {
styleComponents.add(new Tuple2<>(styleParams.mean, network.wrap(new MeanSqLossLayer().setAlpha(meanScale), negAvg, negTarget)));
}
}
return styleComponents;
}
use of com.simiacryptus.util.lang.Tuple2 in project MindsEye by SimiaCryptus.
the class PerformanceTester method testPerformance.
/**
* Test learning performance double statistics.
*
* @param component the component
* @param inputPrototype the input prototype
* @return the double statistics
*/
@Nonnull
protected Tuple2<Double, Double> testPerformance(@Nonnull final Layer component, final Tensor... inputPrototype) {
final Tensor[][] data = IntStream.range(0, batches).mapToObj(x -> x).flatMap(x -> Stream.<Tensor[]>of(inputPrototype)).toArray(i -> new Tensor[i][]);
@Nonnull TimedResult<Result> timedEval = TimedResult.time(() -> {
Result[] input = ConstantResult.batchResultArray(data);
@Nullable Result result;
try {
result = component.eval(input);
} finally {
for (@Nonnull Result nnResult : input) {
nnResult.freeRef();
nnResult.getData().freeRef();
}
}
return result;
});
final Result result = timedEval.result;
@Nonnull final DeltaSet<Layer> buffer = new DeltaSet<Layer>();
try {
long timedBackprop = TimedResult.time(() -> {
@Nonnull TensorArray tensorArray = TensorArray.wrap(result.getData().stream().map(x -> {
return x.mapAndFree(v -> 1.0);
}).toArray(i -> new Tensor[i]));
result.accumulate(buffer, tensorArray);
assert tensorArray.currentRefCount() == 0;
return buffer;
}).timeNanos;
return new Tuple2<>(timedEval.timeNanos / 1e9, timedBackprop / 1e9);
} finally {
buffer.freeRef();
result.freeRef();
result.getData().freeRef();
}
}
use of com.simiacryptus.util.lang.Tuple2 in project MindsEye by SimiaCryptus.
the class StyleTransfer method getContentComponents.
/**
* Gets content components.
*
* @param setup the setup
* @param nodeMap the node map
* @return the content components
*/
@Nonnull
public ArrayList<Tuple2<Double, DAGNode>> getContentComponents(NeuralSetup<T> setup, final Map<T, DAGNode> nodeMap) {
ArrayList<Tuple2<Double, DAGNode>> contentComponents = new ArrayList<>();
for (final T layerType : getLayerTypes()) {
final DAGNode node = nodeMap.get(layerType);
final double coeff_content = !setup.style.content.params.containsKey(layerType) ? 0 : setup.style.content.params.get(layerType);
final PipelineNetwork network1 = (PipelineNetwork) node.getNetwork();
if (coeff_content != 0) {
Tensor content = setup.contentTarget.content.get(layerType);
contentComponents.add(new Tuple2<>(coeff_content, network1.wrap(new MeanSqLossLayer().setAlpha(1.0 / content.rms()), node, network1.wrap(new ValueLayer(content), new DAGNode[] {}))));
}
}
return contentComponents;
}
Aggregations