use of org.tribuo.math.la.SparseVector in project tribuo by oracle.
the class MultiLabelConverter method convertToTensor.
@Override
public Tensor convertToTensor(List<Example<MultiLabel>> examples, ImmutableOutputInfo<MultiLabel> outputIDInfo) {
TFloat32 returnVal = TFloat32.tensorOf(Shape.of(examples.size(), outputIDInfo.size()));
int i = 0;
for (Example<MultiLabel> e : examples) {
SparseVector vec = e.getOutput().convertToSparseVector(outputIDInfo);
for (int j = 0; j < outputIDInfo.size(); j++) {
returnVal.setFloat(0.0f, i, j);
}
for (VectorTuple v : vec) {
returnVal.setFloat((float) v.value, i, v.index);
}
i++;
}
return returnVal;
}
use of org.tribuo.math.la.SparseVector in project tribuo by oracle.
the class MultiLabelConverter method convertToTensor.
@Override
public Tensor convertToTensor(MultiLabel example, ImmutableOutputInfo<MultiLabel> outputIDInfo) {
SparseVector vec = example.convertToSparseVector(outputIDInfo);
TFloat32 returnVal = TFloat32.tensorOf(Shape.of(1, outputIDInfo.size()));
for (int j = 0; j < outputIDInfo.size(); j++) {
returnVal.setFloat(0.0f, 0, j);
}
for (VectorTuple v : vec) {
returnVal.setFloat((float) v.value, 0, v.index);
}
return returnVal;
}
use of org.tribuo.math.la.SparseVector in project tribuo by oracle.
the class TreeModel method predict.
@Override
public Prediction<T> predict(Example<T> example) {
//
// Ensures we handle collisions correctly
SparseVector vec = SparseVector.createSparseVector(example, featureIDMap, false);
if (vec.numActiveElements() == 0) {
throw new IllegalArgumentException("No features found in Example " + example.toString());
}
Node<T> oldNode = root;
Node<T> curNode = root;
while (curNode != null) {
oldNode = curNode;
curNode = oldNode.getNextNode(vec);
}
// oldNode must be a LeafNode.
return ((LeafNode<T>) oldNode).getPrediction(vec.numActiveElements(), example);
}
use of org.tribuo.math.la.SparseVector in project tribuo by oracle.
the class HeapMerger method merge.
@Override
public DenseSparseMatrix merge(DenseSparseMatrix[] inputs) {
int denseLength = inputs[0].getDimension1Size();
int sparseLength = inputs[0].getDimension2Size();
int[] totalLengths = new int[inputs[0].getDimension1Size()];
for (int i = 0; i < inputs.length; i++) {
for (int j = 0; j < totalLengths.length; j++) {
totalLengths[j] += inputs[i].numActiveElements(j);
}
}
int maxLength = 0;
for (int i = 0; i < totalLengths.length; i++) {
if (totalLengths[i] > maxLength) {
maxLength = totalLengths[i];
}
}
SparseVector[] output = new SparseVector[denseLength];
int[] indicesBuffer = new int[maxLength];
double[] valuesBuffer = new double[maxLength];
List<SparseVector> vectors = new ArrayList<>();
for (int i = 0; i < denseLength; i++) {
vectors.clear();
for (DenseSparseMatrix m : inputs) {
SparseVector vec = m.getRow(i);
if (vec.numActiveElements() > 0) {
vectors.add(vec);
}
}
output[i] = merge(vectors, sparseLength, indicesBuffer, valuesBuffer);
}
return DenseSparseMatrix.createFromSparseVectors(output);
}
use of org.tribuo.math.la.SparseVector in project tribuo by oracle.
the class Hinge method lossAndGradient.
/**
* Returns a {@link Pair} of {@link Double} and {@link SGDVector} representing the loss
* and per label gradients respectively.
* @param truth The true label id.
* @param prediction The prediction for each label id.
* @return The loss and per label gradient.
*/
@Override
public Pair<Double, SGDVector> lossAndGradient(Integer truth, SGDVector prediction) {
prediction.add(truth, -margin);
int predIndex = prediction.indexOfMax();
if (truth == predIndex) {
return new Pair<>(0.0, SparseVector.createSparseVector(prediction.size(), new int[0], new double[0]));
} else {
int[] indices = new int[2];
double[] values = new double[2];
if (truth < predIndex) {
indices[0] = truth;
values[0] = margin;
indices[1] = predIndex;
values[1] = -margin;
} else {
indices[0] = predIndex;
values[0] = -margin;
indices[1] = truth;
values[1] = margin;
}
SparseVector output = SparseVector.createSparseVector(prediction.size(), indices, values);
double loss = prediction.get(truth) - prediction.get(predIndex);
return new Pair<>(loss, output);
}
}
Aggregations