use of org.apache.flink.table.types.logical.TimestampType in project flink by apache.
the class ArrowUtilsTest method init.
@BeforeClass
public static void init() {
testFields = new ArrayList<>();
testFields.add(Tuple5.of("f1", new TinyIntType(), new ArrowType.Int(8, true), TinyIntWriter.TinyIntWriterForRow.class, ArrowTinyIntColumnVector.class));
testFields.add(Tuple5.of("f2", new SmallIntType(), new ArrowType.Int(8 * 2, true), SmallIntWriter.SmallIntWriterForRow.class, ArrowSmallIntColumnVector.class));
testFields.add(Tuple5.of("f3", new IntType(), new ArrowType.Int(8 * 4, true), IntWriter.IntWriterForRow.class, ArrowIntColumnVector.class));
testFields.add(Tuple5.of("f4", new BigIntType(), new ArrowType.Int(8 * 8, true), BigIntWriter.BigIntWriterForRow.class, ArrowBigIntColumnVector.class));
testFields.add(Tuple5.of("f5", new BooleanType(), new ArrowType.Bool(), BooleanWriter.BooleanWriterForRow.class, ArrowBooleanColumnVector.class));
testFields.add(Tuple5.of("f6", new FloatType(), new ArrowType.FloatingPoint(FloatingPointPrecision.SINGLE), FloatWriter.FloatWriterForRow.class, ArrowFloatColumnVector.class));
testFields.add(Tuple5.of("f7", new DoubleType(), new ArrowType.FloatingPoint(FloatingPointPrecision.DOUBLE), DoubleWriter.DoubleWriterForRow.class, ArrowDoubleColumnVector.class));
testFields.add(Tuple5.of("f8", new VarCharType(), ArrowType.Utf8.INSTANCE, VarCharWriter.VarCharWriterForRow.class, ArrowVarCharColumnVector.class));
testFields.add(Tuple5.of("f9", new VarBinaryType(), ArrowType.Binary.INSTANCE, VarBinaryWriter.VarBinaryWriterForRow.class, ArrowVarBinaryColumnVector.class));
testFields.add(Tuple5.of("f10", new DecimalType(10, 3), new ArrowType.Decimal(10, 3), DecimalWriter.DecimalWriterForRow.class, ArrowDecimalColumnVector.class));
testFields.add(Tuple5.of("f11", new DateType(), new ArrowType.Date(DateUnit.DAY), DateWriter.DateWriterForRow.class, ArrowDateColumnVector.class));
testFields.add(Tuple5.of("f13", new TimeType(0), new ArrowType.Time(TimeUnit.SECOND, 32), TimeWriter.TimeWriterForRow.class, ArrowTimeColumnVector.class));
testFields.add(Tuple5.of("f14", new TimeType(2), new ArrowType.Time(TimeUnit.MILLISECOND, 32), TimeWriter.TimeWriterForRow.class, ArrowTimeColumnVector.class));
testFields.add(Tuple5.of("f15", new TimeType(4), new ArrowType.Time(TimeUnit.MICROSECOND, 64), TimeWriter.TimeWriterForRow.class, ArrowTimeColumnVector.class));
testFields.add(Tuple5.of("f16", new TimeType(8), new ArrowType.Time(TimeUnit.NANOSECOND, 64), TimeWriter.TimeWriterForRow.class, ArrowTimeColumnVector.class));
testFields.add(Tuple5.of("f17", new LocalZonedTimestampType(0), new ArrowType.Timestamp(TimeUnit.SECOND, null), TimestampWriter.TimestampWriterForRow.class, ArrowTimestampColumnVector.class));
testFields.add(Tuple5.of("f18", new LocalZonedTimestampType(2), new ArrowType.Timestamp(TimeUnit.MILLISECOND, null), TimestampWriter.TimestampWriterForRow.class, ArrowTimestampColumnVector.class));
testFields.add(Tuple5.of("f19", new LocalZonedTimestampType(4), new ArrowType.Timestamp(TimeUnit.MICROSECOND, null), TimestampWriter.TimestampWriterForRow.class, ArrowTimestampColumnVector.class));
testFields.add(Tuple5.of("f20", new LocalZonedTimestampType(8), new ArrowType.Timestamp(TimeUnit.NANOSECOND, null), TimestampWriter.TimestampWriterForRow.class, ArrowTimestampColumnVector.class));
testFields.add(Tuple5.of("f21", new TimestampType(0), new ArrowType.Timestamp(TimeUnit.SECOND, null), TimestampWriter.TimestampWriterForRow.class, ArrowTimestampColumnVector.class));
testFields.add(Tuple5.of("f22", new TimestampType(2), new ArrowType.Timestamp(TimeUnit.MILLISECOND, null), TimestampWriter.TimestampWriterForRow.class, ArrowTimestampColumnVector.class));
testFields.add(Tuple5.of("f23", new TimestampType(4), new ArrowType.Timestamp(TimeUnit.MICROSECOND, null), TimestampWriter.TimestampWriterForRow.class, ArrowTimestampColumnVector.class));
testFields.add(Tuple5.of("f24", new TimestampType(8), new ArrowType.Timestamp(TimeUnit.NANOSECOND, null), TimestampWriter.TimestampWriterForRow.class, ArrowTimestampColumnVector.class));
testFields.add(Tuple5.of("f25", new ArrayType(new VarCharType()), ArrowType.List.INSTANCE, ArrayWriter.ArrayWriterForRow.class, ArrowArrayColumnVector.class));
RowType rowFieldType = new RowType(Arrays.asList(new RowType.RowField("a", new IntType()), new RowType.RowField("b", new VarCharType()), new RowType.RowField("c", new ArrayType(new VarCharType())), new RowType.RowField("d", new TimestampType(2)), new RowType.RowField("e", new RowType((Arrays.asList(new RowType.RowField("e1", new IntType()), new RowType.RowField("e2", new VarCharType())))))));
testFields.add(Tuple5.of("f26", rowFieldType, ArrowType.Struct.INSTANCE, RowWriter.RowWriterForRow.class, ArrowRowColumnVector.class));
List<RowType.RowField> rowFields = new ArrayList<>();
for (Tuple5<String, LogicalType, ArrowType, Class<?>, Class<?>> field : testFields) {
rowFields.add(new RowType.RowField(field.f0, field.f1));
}
rowType = new RowType(rowFields);
allocator = ArrowUtils.getRootAllocator().newChildAllocator("stdout", 0, Long.MAX_VALUE);
}
use of org.apache.flink.table.types.logical.TimestampType in project flink by apache.
the class PythonStreamGroupWindowAggregateOperatorTest method getTestOperator.
@Override
OneInputStreamOperator getTestOperator(Configuration config) {
long size = 10000L;
long slide = 5000L;
SlidingWindowAssigner windowAssigner = SlidingWindowAssigner.of(Duration.ofMillis(size), Duration.ofMillis(slide)).withEventTime();
WindowReference windowRef = new WindowReference("w$", new TimestampType(3));
return new PassThroughPythonStreamGroupWindowAggregateOperator(config, getInputType(), getOutputType(), new PythonAggregateFunctionInfo[] { new PythonAggregateFunctionInfo(PythonScalarFunctionOperatorTestBase.DummyPythonFunction.INSTANCE, new Integer[] { 2 }, -1, false) }, getGrouping(), -1, false, false, 3, windowAssigner, FlinkFnApi.GroupWindow.WindowType.SLIDING_GROUP_WINDOW, true, true, size, slide, 0L, 0L, new NamedWindowProperty[] { new NamedWindowProperty("start", new WindowStart(null)), new NamedWindowProperty("end", new WindowEnd(null)) }, UTC_ZONE_ID);
}
use of org.apache.flink.table.types.logical.TimestampType in project flink by apache.
the class PythonBridgeUtils method getPickledBytesFromJavaObject.
private static Object getPickledBytesFromJavaObject(Object obj, LogicalType dataType) throws IOException {
Pickler pickler = new Pickler();
initialize();
if (obj == null) {
return new byte[0];
} else {
if (dataType instanceof DateType) {
long time;
if (obj instanceof LocalDate) {
time = ((LocalDate) (obj)).toEpochDay();
} else {
time = ((Date) obj).toLocalDate().toEpochDay();
}
return pickler.dumps(time);
} else if (dataType instanceof TimeType) {
long time;
if (obj instanceof LocalTime) {
time = ((LocalTime) obj).toNanoOfDay();
} else {
time = ((Time) obj).toLocalTime().toNanoOfDay();
}
time = time / 1000;
return pickler.dumps(time);
} else if (dataType instanceof TimestampType) {
if (obj instanceof LocalDateTime) {
return pickler.dumps(Timestamp.valueOf((LocalDateTime) obj));
} else {
return pickler.dumps(obj);
}
} else if (dataType instanceof RowType) {
Row tmpRow = (Row) obj;
LogicalType[] tmpRowFieldTypes = ((RowType) dataType).getChildren().toArray(new LogicalType[0]);
List<Object> rowFieldBytes = new ArrayList<>(tmpRow.getArity() + 1);
rowFieldBytes.add(new byte[] { tmpRow.getKind().toByteValue() });
for (int i = 0; i < tmpRow.getArity(); i++) {
rowFieldBytes.add(getPickledBytesFromJavaObject(tmpRow.getField(i), tmpRowFieldTypes[i]));
}
return rowFieldBytes;
} else if (dataType instanceof MapType) {
List<List<Object>> serializedMapKV = new ArrayList<>(2);
MapType mapType = (MapType) dataType;
Map<Object, Object> mapObj = (Map) obj;
List<Object> keyBytesList = new ArrayList<>(mapObj.size());
List<Object> valueBytesList = new ArrayList<>(mapObj.size());
for (Map.Entry entry : mapObj.entrySet()) {
keyBytesList.add(getPickledBytesFromJavaObject(entry.getKey(), mapType.getKeyType()));
valueBytesList.add(getPickledBytesFromJavaObject(entry.getValue(), mapType.getValueType()));
}
serializedMapKV.add(keyBytesList);
serializedMapKV.add(valueBytesList);
return pickler.dumps(serializedMapKV);
} else if (dataType instanceof ArrayType) {
Object[] objects = (Object[]) obj;
List<Object> serializedElements = new ArrayList<>(objects.length);
ArrayType arrayType = (ArrayType) dataType;
LogicalType elementType = arrayType.getElementType();
for (Object object : objects) {
serializedElements.add(getPickledBytesFromJavaObject(object, elementType));
}
return pickler.dumps(serializedElements);
}
if (dataType instanceof FloatType) {
return pickler.dumps(String.valueOf(obj));
} else {
return pickler.dumps(obj);
}
}
}
use of org.apache.flink.table.types.logical.TimestampType in project flink by apache.
the class TableauStyle method columnWidthsByType.
// Package private and private static methods to deal with complexity of string writing and
// formatting
/**
* Try to derive column width based on column types. If result set is not small enough to be
* stored in java heap memory, we can't determine column widths based on column values.
*/
static int[] columnWidthsByType(List<Column> columns, int maxColumnWidth, boolean printNullAsEmpty, boolean printRowKind) {
// fill width with field names first
final int[] colWidths = columns.stream().mapToInt(col -> col.getName().length()).toArray();
// determine proper column width based on types
for (int i = 0; i < columns.size(); ++i) {
LogicalType type = columns.get(i).getDataType().getLogicalType();
int len;
switch(type.getTypeRoot()) {
case TINYINT:
// extra for negative value
len = TinyIntType.PRECISION + 1;
break;
case SMALLINT:
// extra for negative value
len = SmallIntType.PRECISION + 1;
break;
case INTEGER:
// extra for negative value
len = IntType.PRECISION + 1;
break;
case BIGINT:
// extra for negative value
len = BigIntType.PRECISION + 1;
break;
case DECIMAL:
len = ((DecimalType) type).getPrecision() + // extra for negative value and decimal point
2;
break;
case BOOLEAN:
// "true" or "false"
len = 5;
break;
case DATE:
// e.g. 9999-12-31
len = 10;
break;
case TIME_WITHOUT_TIME_ZONE:
int precision = ((TimeType) type).getPrecision();
// 23:59:59[.999999999]
len = precision == 0 ? 8 : precision + 9;
break;
case TIMESTAMP_WITHOUT_TIME_ZONE:
precision = ((TimestampType) type).getPrecision();
len = timestampTypeColumnWidth(precision);
break;
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
precision = ((LocalZonedTimestampType) type).getPrecision();
len = timestampTypeColumnWidth(precision);
break;
default:
len = maxColumnWidth;
}
// adjust column width with potential null values
len = printNullAsEmpty ? len : Math.max(len, PrintStyle.NULL_VALUE.length());
colWidths[i] = Math.max(colWidths[i], len);
}
// add an extra column for row kind if necessary
if (printRowKind) {
final int[] ret = new int[columns.size() + 1];
ret[0] = ROW_KIND_COLUMN.length();
System.arraycopy(colWidths, 0, ret, 1, columns.size());
return ret;
} else {
return colWidths;
}
}
use of org.apache.flink.table.types.logical.TimestampType in project flink by apache.
the class BinaryWriter method write.
// --------------------------------------------------------------------------------------------
/**
* @deprecated Use {@link #createValueSetter(LogicalType)} for avoiding logical types during
* runtime.
*/
@Deprecated
static void write(BinaryWriter writer, int pos, Object o, LogicalType type, TypeSerializer<?> serializer) {
switch(type.getTypeRoot()) {
case BOOLEAN:
writer.writeBoolean(pos, (boolean) o);
break;
case TINYINT:
writer.writeByte(pos, (byte) o);
break;
case SMALLINT:
writer.writeShort(pos, (short) o);
break;
case INTEGER:
case DATE:
case TIME_WITHOUT_TIME_ZONE:
case INTERVAL_YEAR_MONTH:
writer.writeInt(pos, (int) o);
break;
case BIGINT:
case INTERVAL_DAY_TIME:
writer.writeLong(pos, (long) o);
break;
case TIMESTAMP_WITHOUT_TIME_ZONE:
TimestampType timestampType = (TimestampType) type;
writer.writeTimestamp(pos, (TimestampData) o, timestampType.getPrecision());
break;
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
LocalZonedTimestampType lzTs = (LocalZonedTimestampType) type;
writer.writeTimestamp(pos, (TimestampData) o, lzTs.getPrecision());
break;
case FLOAT:
writer.writeFloat(pos, (float) o);
break;
case DOUBLE:
writer.writeDouble(pos, (double) o);
break;
case CHAR:
case VARCHAR:
writer.writeString(pos, (StringData) o);
break;
case DECIMAL:
DecimalType decimalType = (DecimalType) type;
writer.writeDecimal(pos, (DecimalData) o, decimalType.getPrecision());
break;
case ARRAY:
writer.writeArray(pos, (ArrayData) o, (ArrayDataSerializer) serializer);
break;
case MAP:
case MULTISET:
writer.writeMap(pos, (MapData) o, (MapDataSerializer) serializer);
break;
case ROW:
case STRUCTURED_TYPE:
writer.writeRow(pos, (RowData) o, (RowDataSerializer) serializer);
break;
case RAW:
writer.writeRawValue(pos, (RawValueData<?>) o, (RawValueDataSerializer<?>) serializer);
break;
case BINARY:
case VARBINARY:
writer.writeBinary(pos, (byte[]) o);
break;
default:
throw new UnsupportedOperationException("Not support type: " + type);
}
}
Aggregations