use of com.simiacryptus.mindseye.network.InnerNode in project MindsEye by SimiaCryptus.
the class StyleTransfer method getStyleComponents.
/**
* Gets style components.
*
* @param node the node
* @param network the network
* @param styleParams the style params
* @param mean the mean
* @param covariance the covariance
* @param centeringMode the centering mode
* @return the style components
*/
@Nonnull
public ArrayList<Tuple2<Double, DAGNode>> getStyleComponents(final DAGNode node, final PipelineNetwork network, final LayerStyleParams styleParams, final Tensor mean, final Tensor covariance, final CenteringMode centeringMode) {
ArrayList<Tuple2<Double, DAGNode>> styleComponents = new ArrayList<>();
if (null != styleParams && (styleParams.cov != 0 || styleParams.mean != 0)) {
double meanRms = mean.rms();
double meanScale = 0 == meanRms ? 1 : (1.0 / meanRms);
InnerNode negTarget = network.wrap(new ValueLayer(mean.scale(-1)), new DAGNode[] {});
InnerNode negAvg = network.wrap(new BandAvgReducerLayer().setAlpha(-1), node);
if (styleParams.cov != 0) {
DAGNode recentered;
switch(centeringMode) {
case Origin:
recentered = node;
break;
case Dynamic:
recentered = network.wrap(new GateBiasLayer(), node, negAvg);
break;
case Static:
recentered = network.wrap(new GateBiasLayer(), node, negTarget);
break;
default:
throw new RuntimeException();
}
int[] covDim = covariance.getDimensions();
assert 0 < covDim[2] : Arrays.toString(covDim);
int inputBands = mean.getDimensions()[2];
assert 0 < inputBands : Arrays.toString(mean.getDimensions());
int outputBands = covDim[2] / inputBands;
assert 0 < outputBands : Arrays.toString(covDim) + " / " + inputBands;
double covRms = covariance.rms();
double covScale = 0 == covRms ? 1 : (1.0 / covRms);
styleComponents.add(new Tuple2<>(styleParams.cov, network.wrap(new MeanSqLossLayer().setAlpha(covScale), network.wrap(new ValueLayer(covariance), new DAGNode[] {}), network.wrap(ArtistryUtil.wrapTilesAvg(new GramianLayer()), recentered))));
}
if (styleParams.mean != 0) {
styleComponents.add(new Tuple2<>(styleParams.mean, network.wrap(new MeanSqLossLayer().setAlpha(meanScale), negAvg, negTarget)));
}
}
return styleComponents;
}
use of com.simiacryptus.mindseye.network.InnerNode in project MindsEye by SimiaCryptus.
the class ExplodedConvolutionGrid method add.
/**
* Add dag node.
*
* @param input the input
* @return the dag node
*/
public DAGNode add(@Nonnull DAGNode input) {
assertAlive();
DAGNetwork network = input.getNetwork();
int defaultPaddingX = 0;
int defaultPaddingY = 0;
boolean customPaddingX = this.convolutionParams.paddingX != null && convolutionParams.paddingX != defaultPaddingX;
boolean customPaddingY = this.convolutionParams.paddingY != null && convolutionParams.paddingY != defaultPaddingY;
final DAGNode paddedInput;
if (customPaddingX || customPaddingY) {
int x;
if (this.convolutionParams.paddingX < -defaultPaddingX) {
x = this.convolutionParams.paddingX + defaultPaddingX;
} else if (this.convolutionParams.paddingX > defaultPaddingX) {
x = this.convolutionParams.paddingX - defaultPaddingX;
} else {
x = 0;
}
int y;
if (this.convolutionParams.paddingY < -defaultPaddingY) {
y = this.convolutionParams.paddingY + defaultPaddingY;
} else if (this.convolutionParams.paddingY > defaultPaddingY) {
y = this.convolutionParams.paddingY - defaultPaddingY;
} else {
y = 0;
}
if (x != 0 || y != 0) {
paddedInput = network.wrap(new ImgZeroPaddingLayer(x, y).setPrecision(convolutionParams.precision), input);
} else {
paddedInput = input;
}
} else {
paddedInput = input;
}
InnerNode output;
if (subLayers.size() == 1) {
output = (InnerNode) subLayers.get(0).add(paddedInput);
} else {
ImgLinearSubnetLayer linearSubnetLayer = new ImgLinearSubnetLayer();
subLayers.forEach(leg -> {
PipelineNetwork subnet = new PipelineNetwork();
leg.add(subnet.getHead());
linearSubnetLayer.add(leg.fromBand, leg.toBand, subnet);
});
boolean isParallel = CudaSettings.INSTANCE.isConv_para_1();
linearSubnetLayer.setPrecision(convolutionParams.precision).setParallel(isParallel);
output = network.wrap(linearSubnetLayer, paddedInput).setParallel(isParallel);
}
if (customPaddingX || customPaddingY) {
int x = !customPaddingX ? 0 : (this.convolutionParams.paddingX - defaultPaddingX);
int y = !customPaddingY ? 0 : (this.convolutionParams.paddingY - defaultPaddingY);
if (x > 0)
x = 0;
if (y > 0)
y = 0;
if (x != 0 || y != 0) {
return network.wrap(new ImgZeroPaddingLayer(x, y).setPrecision(convolutionParams.precision), output);
}
}
return output;
}
Aggregations