use of org.apache.flink.table.data.TimestampData in project flink by apache.
the class DataGenTableSourceFactoryTest method testSource.
@Test
public void testSource() throws Exception {
DescriptorProperties descriptor = new DescriptorProperties();
descriptor.putString(FactoryUtil.CONNECTOR.key(), "datagen");
descriptor.putLong(DataGenConnectorOptions.ROWS_PER_SECOND.key(), 100);
descriptor.putString(DataGenConnectorOptionsUtil.FIELDS + ".f0." + DataGenConnectorOptionsUtil.KIND, DataGenConnectorOptionsUtil.RANDOM);
descriptor.putLong(DataGenConnectorOptionsUtil.FIELDS + ".f0." + DataGenConnectorOptionsUtil.LENGTH, 20);
descriptor.putString(DataGenConnectorOptionsUtil.FIELDS + ".f1." + DataGenConnectorOptionsUtil.KIND, DataGenConnectorOptionsUtil.RANDOM);
descriptor.putLong(DataGenConnectorOptionsUtil.FIELDS + ".f1." + DataGenConnectorOptionsUtil.MIN, 10);
descriptor.putLong(DataGenConnectorOptionsUtil.FIELDS + ".f1." + DataGenConnectorOptionsUtil.MAX, 100);
descriptor.putString(DataGenConnectorOptionsUtil.FIELDS + ".f2." + DataGenConnectorOptionsUtil.KIND, DataGenConnectorOptionsUtil.SEQUENCE);
descriptor.putLong(DataGenConnectorOptionsUtil.FIELDS + ".f2." + DataGenConnectorOptionsUtil.START, 50);
descriptor.putLong(DataGenConnectorOptionsUtil.FIELDS + ".f2." + DataGenConnectorOptionsUtil.END, 60);
descriptor.putString(DataGenConnectorOptionsUtil.FIELDS + ".f3." + DataGenConnectorOptionsUtil.KIND, DataGenConnectorOptionsUtil.RANDOM);
descriptor.putString(DataGenConnectorOptionsUtil.FIELDS + ".f3." + DataGenConnectorOptionsUtil.MAX_PAST, "5s");
final long begin = System.currentTimeMillis();
List<RowData> results = runGenerator(SCHEMA, descriptor);
final long end = System.currentTimeMillis();
Assert.assertEquals(11, results.size());
for (int i = 0; i < results.size(); i++) {
RowData row = results.get(i);
Assert.assertEquals(20, row.getString(0).toString().length());
long f1 = row.getLong(1);
Assert.assertTrue(f1 >= 10 && f1 <= 100);
Assert.assertEquals(i + 50, row.getLong(2));
final TimestampData f3 = row.getTimestamp(3, 3);
Assert.assertTrue(f3.getMillisecond() >= begin - 5000 && f3.getMillisecond() <= end);
}
}
use of org.apache.flink.table.data.TimestampData in project flink by apache.
the class RowDataToAvroConverters method createConverter.
// --------------------------------------------------------------------------------
// IMPORTANT! We use anonymous classes instead of lambdas for a reason here. It is
// necessary because the maven shade plugin cannot relocate classes in
// SerializedLambdas (MSHADE-260). On the other hand we want to relocate Avro for
// sql-client uber jars.
// --------------------------------------------------------------------------------
/**
* Creates a runtime converter according to the given logical type that converts objects of
* Flink Table & SQL internal data structures to corresponding Avro data structures.
*/
public static RowDataToAvroConverter createConverter(LogicalType type) {
final RowDataToAvroConverter converter;
switch(type.getTypeRoot()) {
case NULL:
converter = new RowDataToAvroConverter() {
private static final long serialVersionUID = 1L;
@Override
public Object convert(Schema schema, Object object) {
return null;
}
};
break;
case TINYINT:
converter = new RowDataToAvroConverter() {
private static final long serialVersionUID = 1L;
@Override
public Object convert(Schema schema, Object object) {
return ((Byte) object).intValue();
}
};
break;
case SMALLINT:
converter = new RowDataToAvroConverter() {
private static final long serialVersionUID = 1L;
@Override
public Object convert(Schema schema, Object object) {
return ((Short) object).intValue();
}
};
break;
// boolean
case BOOLEAN:
// int
case INTEGER:
// long
case INTERVAL_YEAR_MONTH:
// long
case BIGINT:
// long
case INTERVAL_DAY_TIME:
// float
case FLOAT:
// double
case DOUBLE:
// int
case TIME_WITHOUT_TIME_ZONE:
case // int
DATE:
converter = new RowDataToAvroConverter() {
private static final long serialVersionUID = 1L;
@Override
public Object convert(Schema schema, Object object) {
return object;
}
};
break;
case CHAR:
case VARCHAR:
converter = new RowDataToAvroConverter() {
private static final long serialVersionUID = 1L;
@Override
public Object convert(Schema schema, Object object) {
return new Utf8(object.toString());
}
};
break;
case BINARY:
case VARBINARY:
converter = new RowDataToAvroConverter() {
private static final long serialVersionUID = 1L;
@Override
public Object convert(Schema schema, Object object) {
return ByteBuffer.wrap((byte[]) object);
}
};
break;
case TIMESTAMP_WITHOUT_TIME_ZONE:
converter = new RowDataToAvroConverter() {
private static final long serialVersionUID = 1L;
@Override
public Object convert(Schema schema, Object object) {
return ((TimestampData) object).toInstant().toEpochMilli();
}
};
break;
case DECIMAL:
converter = new RowDataToAvroConverter() {
private static final long serialVersionUID = 1L;
@Override
public Object convert(Schema schema, Object object) {
return ByteBuffer.wrap(((DecimalData) object).toUnscaledBytes());
}
};
break;
case ARRAY:
converter = createArrayConverter((ArrayType) type);
break;
case ROW:
converter = createRowConverter((RowType) type);
break;
case MAP:
case MULTISET:
converter = createMapConverter(type);
break;
case RAW:
default:
throw new UnsupportedOperationException("Unsupported type: " + type);
}
// wrap into nullable converter
return new RowDataToAvroConverter() {
private static final long serialVersionUID = 1L;
@Override
public Object convert(Schema schema, Object object) {
if (object == null) {
return null;
}
// get actual schema if it is a nullable schema
Schema actualSchema;
if (schema.getType() == Schema.Type.UNION) {
List<Schema> types = schema.getTypes();
int size = types.size();
if (size == 2 && types.get(1).getType() == Schema.Type.NULL) {
actualSchema = types.get(0);
} else if (size == 2 && types.get(0).getType() == Schema.Type.NULL) {
actualSchema = types.get(1);
} else {
throw new IllegalArgumentException("The Avro schema is not a nullable type: " + schema.toString());
}
} else {
actualSchema = schema;
}
return converter.convert(actualSchema, object);
}
};
}
use of org.apache.flink.table.data.TimestampData in project flink by apache.
the class OrcBulkRowDataWriterTest method readMap.
/**
* Read MapColumnVector with specify schema {@literal
* map<string,struct<_col3_col0:string,_col3_col1:timestamp>>}.
*/
private static MapData readMap(MapColumnVector mapVector, int row) {
int offset = (int) mapVector.offsets[row];
StringData keyData = readStringData((BytesColumnVector) mapVector.keys, offset);
GenericRowData valueData = new GenericRowData(2);
StructColumnVector structVector = (StructColumnVector) mapVector.values;
BytesColumnVector bytesVector = (BytesColumnVector) structVector.fields[0];
TimestampColumnVector timestampVector = (TimestampColumnVector) structVector.fields[1];
StringData strValueData = readStringData(bytesVector, offset);
TimestampData timestampData = readTimestamp(timestampVector, offset);
valueData.setField(0, strValueData);
valueData.setField(1, timestampData);
Map<StringData, RowData> mapDataMap = new HashMap<>();
mapDataMap.put(keyData, valueData);
return new GenericMapData(mapDataMap);
}
use of org.apache.flink.table.data.TimestampData in project flink by apache.
the class StreamRecordUtils method binaryrow.
/**
* Receives a object array, generates a BinaryRowData based on the array.
*
* @param fields input object array
* @return generated BinaryRowData.
*/
public static BinaryRowData binaryrow(Object... fields) {
BinaryRowData row = new BinaryRowData(fields.length);
BinaryRowWriter writer = new BinaryRowWriter(row);
for (int j = 0; j < fields.length; j++) {
Object value = fields[j];
if (value == null) {
writer.setNullAt(j);
} else if (value instanceof Byte) {
writer.writeByte(j, (Byte) value);
} else if (value instanceof Short) {
writer.writeShort(j, (Short) value);
} else if (value instanceof Integer) {
writer.writeInt(j, (Integer) value);
} else if (value instanceof String) {
writer.writeString(j, StringData.fromString((String) value));
} else if (value instanceof Double) {
writer.writeDouble(j, (Double) value);
} else if (value instanceof Float) {
writer.writeFloat(j, (Float) value);
} else if (value instanceof Long) {
writer.writeLong(j, (Long) value);
} else if (value instanceof Boolean) {
writer.writeBoolean(j, (Boolean) value);
} else if (value instanceof byte[]) {
writer.writeBinary(j, (byte[]) value);
} else if (value instanceof DecimalData) {
DecimalData decimal = (DecimalData) value;
writer.writeDecimal(j, decimal, decimal.precision());
} else if (value instanceof TimestampData) {
TimestampData timestamp = (TimestampData) value;
writer.writeTimestamp(j, timestamp, 3);
} else if (value instanceof Tuple2 && ((Tuple2) value).f0 instanceof TimestampData) {
TimestampData timestamp = (TimestampData) ((Tuple2) value).f0;
writer.writeTimestamp(j, timestamp, (int) ((Tuple2) value).f1);
} else if (value instanceof Tuple2 && ((Tuple2) value).f0 instanceof ArrayData) {
ArrayData array = (ArrayData) ((Tuple2) value).f0;
ArrayDataSerializer serializer = (ArrayDataSerializer) ((Tuple2) value).f1;
writer.writeArray(j, array, serializer);
} else if (value instanceof Tuple2 && ((Tuple2) value).f0 instanceof RowData) {
RowData rowData = ((RowData) ((Tuple2) value).f0);
RowDataSerializer serializer = (RowDataSerializer) ((Tuple2) value).f1;
writer.writeRow(j, rowData, serializer);
} else {
throw new RuntimeException("Not support yet!");
}
}
writer.complete();
return row;
}
use of org.apache.flink.table.data.TimestampData in project flink by apache.
the class EqualiserCodeGeneratorTest method testTimestamp.
@Test
public void testTimestamp() {
RecordEqualiser equaliser = new EqualiserCodeGenerator(new LogicalType[] { new TimestampType() }).generateRecordEqualiser("TIMESTAMP").newInstance(Thread.currentThread().getContextClassLoader());
Function<TimestampData, BinaryRowData> func = o -> {
BinaryRowData row = new BinaryRowData(1);
BinaryRowWriter writer = new BinaryRowWriter(row);
writer.writeTimestamp(0, o, 9);
writer.complete();
return row;
};
assertBoolean(equaliser, func, fromEpochMillis(1024), fromEpochMillis(1024), true);
assertBoolean(equaliser, func, fromEpochMillis(1024), fromEpochMillis(1025), false);
}
Aggregations