use of com.alibaba.alink.common.linalg.tensor.FloatTensor in project Alink by alibaba.
the class TFExampleConversionV2 method javaToFeature.
/**
* convert java object to tensorflow feature.
*
* @param dt java object data type.
* @param val given java object.
* @return tensorflow feature.
*/
public static Feature javaToFeature(DataTypesV2 dt, Object val) {
Feature.Builder featureBuilder = Feature.newBuilder();
FloatList.Builder floatListBuilder = FloatList.newBuilder();
Int64List.Builder int64ListBuilder = Int64List.newBuilder();
// When dt is TENSOR, find the exact type first.
if (DataTypesV2.TENSOR.equals(dt)) {
if (val instanceof FloatTensor) {
dt = DataTypesV2.FLOAT_TENSOR;
} else if (val instanceof DoubleTensor) {
dt = DataTypesV2.DOUBLE_TENSOR;
} else if (val instanceof IntTensor) {
dt = DataTypesV2.INT_TENSOR;
} else if (val instanceof LongTensor) {
dt = DataTypesV2.LONG_TENSOR;
} else if (val instanceof BoolTensor) {
dt = DataTypesV2.BOOLEAN_TENSOR;
} else if (val instanceof UByteTensor) {
dt = DataTypesV2.UBYTE_TENSOR;
} else if (val instanceof StringTensor) {
dt = DataTypesV2.STRING_TENSOR;
} else if (val instanceof ByteTensor) {
dt = DataTypesV2.BYTE_TENSOR;
}
}
switch(dt) {
case FLOAT_16:
case FLOAT_32:
case FLOAT_64:
{
floatListBuilder.addValue((Float) val);
featureBuilder.setFloatList(floatListBuilder);
break;
}
case INT_8:
case INT_16:
case INT_32:
case INT_64:
case UINT_8:
case UINT_16:
case UINT_32:
case UINT_64:
{
int64ListBuilder.addValue(castAsLong(val));
featureBuilder.setInt64List(int64ListBuilder);
break;
}
case STRING:
{
BytesList.Builder bb = BytesList.newBuilder();
bb.addValue(castAsBytes(val));
featureBuilder.setBytesList(bb);
break;
}
case FLOAT_TENSOR:
{
FloatTensor floatTensor = (FloatTensor) val;
long size = floatTensor.size();
floatTensor = floatTensor.reshape(new Shape(size));
for (long i = 0; i < size; i += 1) {
floatListBuilder.addValue(floatTensor.getFloat(i));
}
featureBuilder.setFloatList(floatListBuilder);
break;
}
case DOUBLE_TENSOR:
{
DoubleTensor doubleTensor = (DoubleTensor) val;
long size = doubleTensor.size();
doubleTensor = doubleTensor.reshape(new Shape(size));
for (long i = 0; i < size; i += 1) {
floatListBuilder.addValue((float) doubleTensor.getDouble(i));
}
featureBuilder.setFloatList(floatListBuilder);
break;
}
case INT_TENSOR:
{
IntTensor intTensor = (IntTensor) val;
long size = intTensor.size();
intTensor = intTensor.reshape(new Shape(size));
for (long i = 0; i < size; i += 1) {
int64ListBuilder.addValue(intTensor.getInt(i));
}
featureBuilder.setInt64List(int64ListBuilder);
break;
}
case LONG_TENSOR:
{
LongTensor longTensor = (LongTensor) val;
long size = longTensor.size();
longTensor = longTensor.reshape(new Shape(size));
for (long i = 0; i < size; i += 1) {
int64ListBuilder.addValue(longTensor.getLong(i));
}
featureBuilder.setInt64List(int64ListBuilder);
break;
}
case BOOLEAN_TENSOR:
{
BoolTensor boolTensor = (BoolTensor) val;
long size = boolTensor.size();
boolTensor = boolTensor.reshape(new Shape(size));
for (long i = 0; i < size; i += 1) {
int64ListBuilder.addValue(boolTensor.getBoolean(i) ? 1 : 0);
}
featureBuilder.setInt64List(int64ListBuilder);
break;
}
case UBYTE_TENSOR:
{
UByteTensor ubyteTensor = (UByteTensor) val;
long size = ubyteTensor.size();
ubyteTensor = ubyteTensor.reshape(new Shape(size));
for (long i = 0; i < size; i += 1) {
int64ListBuilder.addValue(ubyteTensor.getUByte(i));
}
featureBuilder.setInt64List(int64ListBuilder);
break;
}
case STRING_TENSOR:
{
StringTensor stringTensor = (StringTensor) val;
long size = stringTensor.size();
stringTensor = stringTensor.reshape(new Shape(size));
BytesList.Builder bb = BytesList.newBuilder();
for (long i = 0; i < size; i += 1) {
bb.addValue(castAsBytes(stringTensor.getString(i)));
}
featureBuilder.setBytesList(bb);
break;
}
case BYTE_TENSOR:
default:
throw new RuntimeException("Unsupported data type for TF");
}
return featureBuilder.build();
}
use of com.alibaba.alink.common.linalg.tensor.FloatTensor in project Alink by alibaba.
the class DeepARModelMapper method predictMultiVar.
@Override
protected Tuple2<Vector[], String> predictMultiVar(Timestamp[] historyTimes, Vector[] historyVals, int predictNum) {
Timestamp[] predictTimes = TimeSeriesMapper.getPredictTimes(historyTimes, predictNum);
int window = historyVals.length;
int series = 0;
DenseVector[] vectors = new DenseVector[historyVals.length];
for (int i = 0; i < window; ++i) {
vectors[i] = VectorUtil.getDenseVector(historyVals[i]);
if (vectors[i] == null) {
throw new IllegalArgumentException("history values should not be null.");
}
series = vectors[i].size();
}
FloatTensor[][] tensors = new FloatTensor[series][window];
for (int i = 0; i < series; ++i) {
tensors[i][0] = Tensor.cat(new FloatTensor[] { new FloatTensor(new float[] { 0.0f }), DeepARFeaturesGenerator.generateFromFrequency(calendar.get(), unit, historyTimes[0]) }, -1, null);
for (int j = 1; j < window; ++j) {
tensors[i][j] = Tensor.cat(new FloatTensor[] { new FloatTensor(new float[] { (float) vectors[j - 1].get(i) }), DeepARFeaturesGenerator.generateFromFrequency(calendar.get(), unit, historyTimes[j]) }, -1, null);
}
}
FloatTensor[] batch = new FloatTensor[series];
for (int i = 0; i < series; ++i) {
batch[i] = Tensor.stack(tensors[i], 0, null);
}
Vector[] result = new Vector[predictNum];
Row[] sigmas = new Row[predictNum];
for (int i = 0; i < predictNum; ++i) {
result[i] = new DenseVector(series);
sigmas[i] = Row.of(new DenseVector(series));
}
for (int i = 0; i < series; ++i) {
float mu = (float) historyVals[window - 1].get(i);
FloatTensor v = new FloatTensor(new float[] { 0.0f, 0.0f });
int nonZero = 0;
for (int j = 0; j < window; ++j) {
float cell = batch[i].getFloat(j, 0);
if (cell != 0) {
nonZero += 1;
}
v.setFloat(v.getFloat(0) + cell, 0);
}
if (mu != 0) {
nonZero += 1;
v.setFloat(v.getFloat(0) + mu, 0);
}
if (nonZero == 0) {
continue;
}
v.setFloat(v.getFloat(0) / nonZero + 1.0f, 0);
for (int j = 0; j < window; ++j) {
batch[i].setFloat(batch[i].getFloat(j, 0) / v.getFloat(0), j, 0);
}
mu = mu / v.getFloat(0);
for (int j = 0; j < predictNum; ++j) {
batch[i] = Tensor.cat(new FloatTensor[] { batch[i], Tensor.stack(new FloatTensor[] { Tensor.cat(new FloatTensor[] { new FloatTensor(new float[] { mu }), DeepARFeaturesGenerator.generateFromFrequency(calendar.get(), unit, predictTimes[j]) }, -1, null) }, 0, null) }, 0, null);
FloatTensor pred;
try {
pred = (FloatTensor) tfTableModelPredictModelMapper.map(Row.of(batch[i])).getField(0);
} catch (Exception e) {
return Tuple2.of(null, null);
}
mu = pred.getFloat(window + j, 0);
float sigma = pred.getFloat(window + j, 1);
result[j].set(i, mu * v.getFloat(0) + v.getFloat(1));
((Vector) (sigmas[j].getField(0))).set(i, sigma * v.getFloat(0));
}
}
return Tuple2.of(result, new MTable(Arrays.asList(sigmas), new String[] { "sigma" }, new TypeInformation<?>[] { VectorTypes.DENSE_VECTOR }).toString());
}
use of com.alibaba.alink.common.linalg.tensor.FloatTensor in project Alink by alibaba.
the class DeepARModelMapper method predictSingleVar.
@Override
protected Tuple2<double[], String> predictSingleVar(Timestamp[] historyTimes, double[] historyVals, int predictNum) {
Timestamp[] predictTimes = TimeSeriesMapper.getPredictTimes(historyTimes, predictNum);
int window = historyVals.length;
FloatTensor[] tensors = new FloatTensor[window];
// fill the first z with zero
tensors[0] = Tensor.cat(new FloatTensor[] { new FloatTensor(new float[] { 0.0f }), DeepARFeaturesGenerator.generateFromFrequency(calendar.get(), unit, historyTimes[0]) }, -1, null);
// others
for (int i = 1; i < window; ++i) {
tensors[i] = Tensor.cat(new FloatTensor[] { new FloatTensor(new float[] { (float) historyVals[i - 1] }), DeepARFeaturesGenerator.generateFromFrequency(calendar.get(), unit, historyTimes[i]) }, -1, null);
}
FloatTensor batch = Tensor.stack(tensors, 0, null);
// initialize mu
float mu = (float) historyVals[window - 1];
// calculate v
FloatTensor v = new FloatTensor(new float[] { 0.0f, 0.0f });
int nonZero = 0;
for (int i = 0; i < window; ++i) {
float cell = batch.getFloat(i, 0);
if (cell != 0) {
nonZero += 1;
}
v.setFloat(v.getFloat(0) + cell, 0);
}
if (mu != 0) {
nonZero += 1;
v.setFloat(v.getFloat(0) + mu, 0);
}
if (nonZero == 0) {
double[] result = new double[predictNum];
Row[] sigmas = new Row[predictNum];
Arrays.fill(result, 0.0);
Arrays.fill(sigmas, Row.of(0));
return Tuple2.of(result, new MTable(Arrays.asList(sigmas), new String[] { "sigma" }, new TypeInformation<?>[] { Types.DOUBLE }).toString());
}
v.setFloat(v.getFloat(0) / nonZero + 1.0f, 0);
// normalize with v
for (int i = 0; i < window; ++i) {
batch.setFloat(batch.getFloat(i, 0) / v.getFloat(0), i, 0);
}
mu = mu / v.getFloat(0);
// result initialize.
double[] result = new double[predictNum];
Row[] sigmas = new Row[predictNum];
Arrays.fill(result, 0.0);
for (int i = 0; i < predictNum; ++i) {
sigmas[i] = Row.of(0.0);
}
// prediction
for (int j = 0; j < predictNum; ++j) {
batch = Tensor.cat(new FloatTensor[] { batch, Tensor.stack(new FloatTensor[] { Tensor.cat(new FloatTensor[] { new FloatTensor(new float[] { mu }), DeepARFeaturesGenerator.generateFromFrequency(calendar.get(), unit, predictTimes[j]) }, -1, null) }, 0, null) }, 0, null);
FloatTensor pred;
try {
pred = (FloatTensor) tfTableModelPredictModelMapper.map(Row.of(batch)).getField(0);
} catch (Exception e) {
return Tuple2.of(null, null);
}
mu = pred.getFloat(window + j, 0);
float sigma = pred.getFloat(window + j, 1);
result[j] = mu * v.getFloat(0) + v.getFloat(1);
sigmas[j].setField(0, sigma * v.getFloat(0));
}
return Tuple2.of(result, new MTable(Arrays.asList(sigmas), new String[] { "sigma" }, new TypeInformation<?>[] { Types.DOUBLE }).toString());
}
use of com.alibaba.alink.common.linalg.tensor.FloatTensor in project Alink by alibaba.
the class LSTNetModelMapper method predictMultiVar.
@Override
protected Tuple2<Vector[], String> predictMultiVar(Timestamp[] historyTimes, Vector[] historyVals, int predictNum) {
Tuple2<Timestamp[], FloatTensor> t = toTensor(historyTimes, historyVals);
FloatTensor pred = null;
try {
pred = (FloatTensor) tfTableModelPredictModelMapper.map(Row.of(t.f1)).getField(0);
} catch (Exception ex) {
// pass
}
return pred == null ? Tuple2.of(null, null) : Tuple2.of(new Vector[] { DoubleTensor.of(pred).toVector() }, null);
}
use of com.alibaba.alink.common.linalg.tensor.FloatTensor in project Alink by alibaba.
the class FlattenMTableStreamTest method linkFrom.
@Test
public void linkFrom() throws Exception {
List<Row> rows = new ArrayList<>();
rows.add(Row.of(1, "2", 0, null, new SparseVector(3, new int[] { 1 }, new double[] { 2.0 }), new FloatTensor(new float[] { 3.0f })));
rows.add(Row.of(null, "2", 0, new DenseVector(new double[] { 0.0, 1.0 }), new SparseVector(4, new int[] { 2 }, new double[] { 3.0 }), new FloatTensor(new float[] { 3.0f })));
rows.add(Row.of(null, "2", 0, new DenseVector(new double[] { 0.1, 1.0 }), new SparseVector(4, new int[] { 2 }, new double[] { 3.0 }), new FloatTensor(new float[] { 3.0f })));
String schemaStr = "col0 int, col1 string, label int" + ", d_vec DENSE_VECTOR" + ", s_vec SPARSE_VECTOR" + ", tensor FLOAT_TENSOR";
MTable mTable = new MTable(rows, schemaStr);
List<Row> table = new ArrayList<>();
table.add(Row.of("id", mTable.toString()));
StreamOperator<?> op = new MemSourceStreamOp(table, new String[] { "id", "mTable" });
StreamOperator<?> res = op.link(new FlattenMTableStreamOp().setSchemaStr(schemaStr).setSelectedCol("mTable").setReservedCols("id"));
CollectSinkStreamOp sop = res.link(new CollectSinkStreamOp());
StreamOperator.execute();
List<Row> list = sop.getAndRemoveValues();
for (Row row : list) {
Assert.assertEquals(row.getField(0), "id");
}
}
Aggregations