use of org.apache.flink.table.data.GenericRowData in project flink by apache.
the class RowDataFieldsKinesisPartitionKeyGeneratorTest method createElement.
private RowData createElement(LocalDateTime time, String ip, String route) {
GenericRowData element = new GenericRowData(ROW_TYPE.getFieldCount());
element.setField(0, TimestampData.fromLocalDateTime(time));
element.setField(1, StringData.fromString(ip));
element.setField(2, StringData.fromString(route));
element.setField(3, StringData.fromString(String.valueOf(days(time))));
element.setField(4, StringData.fromString(String.valueOf(year(time))));
element.setField(5, StringData.fromString(String.valueOf(monthOfYear(time))));
element.setField(6, StringData.fromString(String.valueOf(dayOfMonth(time))));
return element;
}
use of org.apache.flink.table.data.GenericRowData in project flink by apache.
the class FileSystemLookupFunction method checkCacheReload.
private void checkCacheReload() {
if (nextLoadTime > System.currentTimeMillis()) {
return;
}
if (nextLoadTime > 0) {
LOG.info("Lookup join cache has expired after {} minute(s), reloading", reloadInterval.toMinutes());
} else {
LOG.info("Populating lookup join cache");
}
int numRetry = 0;
while (true) {
cache.clear();
try {
long count = 0;
GenericRowData reuse = new GenericRowData(rowType.getFieldCount());
partitionReader.open(partitionFetcher.fetch(fetcherContext));
RowData row;
while ((row = partitionReader.read(reuse)) != null) {
count++;
RowData rowData = serializer.copy(row);
RowData key = extractLookupKey(rowData);
List<RowData> rows = cache.computeIfAbsent(key, k -> new ArrayList<>());
rows.add(rowData);
}
partitionReader.close();
nextLoadTime = System.currentTimeMillis() + reloadInterval.toMillis();
LOG.info("Loaded {} row(s) into lookup join cache", count);
return;
} catch (Exception e) {
if (numRetry >= MAX_RETRIES) {
throw new FlinkRuntimeException(String.format("Failed to load table into cache after %d retries", numRetry), e);
}
numRetry++;
long toSleep = numRetry * RETRY_INTERVAL.toMillis();
LOG.warn(String.format("Failed to load table into cache, will retry in %d seconds", toSleep / 1000), e);
try {
Thread.sleep(toSleep);
} catch (InterruptedException ex) {
LOG.warn("Interrupted while waiting to retry failed cache load, aborting");
throw new FlinkRuntimeException(ex);
}
}
}
}
use of org.apache.flink.table.data.GenericRowData in project flink by apache.
the class HBaseSerde method convertToNewRow.
/**
* Converts HBase {@link Result} into a new {@link RowData} instance.
*
* <p>Note: this method is thread-safe.
*/
public RowData convertToNewRow(Result result) {
// The output rows needs to be initialized each time
// to prevent the possibility of putting the output object into the cache.
GenericRowData resultRow = new GenericRowData(fieldLength);
GenericRowData[] familyRows = new GenericRowData[families.length];
for (int f = 0; f < families.length; f++) {
familyRows[f] = new GenericRowData(qualifiers[f].length);
}
return convertToRow(result, resultRow, familyRows);
}
use of org.apache.flink.table.data.GenericRowData in project flink by apache.
the class DebeziumAvroSerDeSchemaTest method debeziumRow2RowData.
private static RowData debeziumRow2RowData() {
GenericRowData rowData = new GenericRowData(4);
rowData.setField(0, 107L);
rowData.setField(1, StringData.fromString("rocks"));
rowData.setField(2, StringData.fromString("box of assorted rocks"));
rowData.setField(3, 5.3D);
return rowData;
}
use of org.apache.flink.table.data.GenericRowData in project flink by apache.
the class AvroRowDataDeSerializationSchemaTest method testSerializationWithTypesMismatch.
@Test
public void testSerializationWithTypesMismatch() throws Exception {
AvroRowDataSerializationSchema serializationSchema = createSerializationSchema(ROW(FIELD("f0", INT()), FIELD("f1", STRING())).notNull());
GenericRowData rowData = new GenericRowData(2);
rowData.setField(0, 1);
rowData.setField(0, 2);
String errorMessage = "Fail to serialize at field: f1.";
try {
serializationSchema.serialize(rowData);
fail("expecting exception message: " + errorMessage);
} catch (Throwable t) {
assertThat(t, FlinkMatchers.containsMessage(errorMessage));
}
}
Aggregations