use of org.apache.flink.table.data.RowData in project flink by apache.
the class EnrichedRowDataTest method testEnrichedRow.
@Test
public void testEnrichedRow() {
final List<String> completeRowFields = Arrays.asList("fixedRow1", "mutableRow1", "mutableRow3", "mutableRow2", "fixedRow2", "mutableRow4");
final List<String> mutableRowFields = Arrays.asList("mutableRow1", "mutableRow2", "mutableRow3", "mutableRow4");
final List<String> fixedRowFields = Arrays.asList("fixedRow1", "fixedRow2");
final RowData fixedRowData = GenericRowData.of(1L, 2L);
final EnrichedRowData enrichedRowData = EnrichedRowData.from(fixedRowData, completeRowFields, mutableRowFields, fixedRowFields);
final RowData mutableRowData = GenericRowData.of(3L, 4L, 5L, 6L);
enrichedRowData.replaceMutableRow(mutableRowData);
assertEquals(RowKind.INSERT, enrichedRowData.getRowKind());
assertEquals(6, enrichedRowData.getArity());
assertEquals(1L, enrichedRowData.getLong(0));
assertEquals(3L, enrichedRowData.getLong(1));
assertEquals(5L, enrichedRowData.getLong(2));
assertEquals(4L, enrichedRowData.getLong(3));
assertEquals(2L, enrichedRowData.getLong(4));
assertEquals(6L, enrichedRowData.getLong(5));
final RowData newMutableRowData = GenericRowData.of(7L, 8L, 9L, 10L);
enrichedRowData.replaceMutableRow(newMutableRowData);
assertEquals(1L, enrichedRowData.getLong(0));
assertEquals(7L, enrichedRowData.getLong(1));
assertEquals(9L, enrichedRowData.getLong(2));
assertEquals(8L, enrichedRowData.getLong(3));
assertEquals(2L, enrichedRowData.getLong(4));
assertEquals(10L, enrichedRowData.getLong(5));
}
use of org.apache.flink.table.data.RowData in project flink by apache.
the class StreamingFileWriterTest method testCommitFileWhenPartitionIsCommittableByPartitionTime.
@Test
public void testCommitFileWhenPartitionIsCommittableByPartitionTime() throws Exception {
// the rolling policy is not to roll file by filesize and roll file after one day,
// it can ensure the file can be closed only when the partition is committable in this test.
FileSystemTableSink.TableRollingPolicy tableRollingPolicy = new FileSystemTableSink.TableRollingPolicy(false, Long.MAX_VALUE, Duration.ofDays(1).toMillis(), Duration.ofDays(1).toMillis());
List<String> partitionKeys = Collections.singletonList("d");
// commit delay is 1 day with partition-time trigger
Configuration conf = getPartitionCommitTriggerConf(Duration.ofDays(1).toMillis());
long currentTimeMillis = System.currentTimeMillis();
Date nextYear = new Date(currentTimeMillis + Duration.ofDays(365).toMillis());
String nextYearPartition = "d=" + dateFormat.format(nextYear);
Date yesterday = new Date(currentTimeMillis - Duration.ofDays(1).toMillis());
String yesterdayPartition = "d=" + dateFormat.format(yesterday);
Date today = new Date(currentTimeMillis);
String todayPartition = "d=" + dateFormat.format(today);
Date tomorrow = new Date(currentTimeMillis + Duration.ofDays(1).toMillis());
String tomorrowPartition = "d=" + dateFormat.format(tomorrow);
OperatorSubtaskState state;
try (OneInputStreamOperatorTestHarness<RowData, PartitionCommitInfo> harness = create(tableRollingPolicy, partitionKeys, conf)) {
harness.setup();
harness.initializeEmptyState();
harness.open();
harness.processElement(row(yesterdayPartition), 0);
harness.processWatermark(currentTimeMillis);
state = harness.snapshot(1, 1);
harness.notifyOfCompletedCheckpoint(1);
// assert yesterday partition file is committed
Assert.assertTrue(isPartitionFileCommitted(yesterdayPartition, 0, 0));
}
// first retry
try (OneInputStreamOperatorTestHarness<RowData, PartitionCommitInfo> harness = create(tableRollingPolicy, partitionKeys, conf)) {
harness.setup();
harness.initializeState(state);
harness.open();
harness.processElement(row(tomorrowPartition), 0);
harness.processElement(row(todayPartition), 0);
// simulate waiting for 1 day
currentTimeMillis += Duration.ofDays(1).toMillis();
harness.processWatermark(currentTimeMillis);
harness.snapshot(2, 2);
harness.notifyOfCompletedCheckpoint(2);
// assert today partition file is committed
Assert.assertTrue(isPartitionFileCommitted(todayPartition, 0, 2));
// assert tomorrow partition file isn't committed
Assert.assertFalse(isPartitionFileCommitted(tomorrowPartition, 0, 1));
// simulate waiting for 1 day again, now tomorrow partition is committable
currentTimeMillis += Duration.ofDays(1).toMillis();
harness.processWatermark(currentTimeMillis);
state = harness.snapshot(3, 3);
harness.notifyOfCompletedCheckpoint(3);
Assert.assertTrue(isPartitionFileCommitted(tomorrowPartition, 0, 1));
harness.processElement(row(nextYearPartition), 0);
}
// second retry
try (OneInputStreamOperatorTestHarness<RowData, PartitionCommitInfo> harness = create(tableRollingPolicy, partitionKeys, conf)) {
harness.setup();
harness.initializeState(state);
harness.open();
harness.processElement(row(nextYearPartition), 0);
harness.processElement(row(tomorrowPartition), 0);
harness.endInput();
// assert files in all partition have been committed
Assert.assertTrue(isPartitionFileCommitted(tomorrowPartition, 0, 4));
Assert.assertTrue(isPartitionFileCommitted(nextYearPartition, 0, 3));
}
}
use of org.apache.flink.table.data.RowData in project flink by apache.
the class CompactFileWriterTest method testEmitEndCheckpointAfterEndInput.
@Test
public void testEmitEndCheckpointAfterEndInput() throws Exception {
CompactFileWriter<RowData> compactFileWriter = new CompactFileWriter<>(1000, StreamingFileSink.forRowFormat(folder, new SimpleStringEncoder<>()));
try (OneInputStreamOperatorTestHarness<RowData, CoordinatorInput> harness = new OneInputStreamOperatorTestHarness<>(compactFileWriter)) {
harness.setup();
harness.open();
harness.processElement(row("test"), 0);
harness.snapshot(1, 1);
harness.notifyOfCompletedCheckpoint(1);
List<CoordinatorInput> coordinatorInputs = harness.extractOutputValues();
Assert.assertEquals(2, coordinatorInputs.size());
// assert emit InputFile
Assert.assertTrue(coordinatorInputs.get(0) instanceof InputFile);
// assert emit EndCheckpoint
Assert.assertEquals(1, ((EndCheckpoint) coordinatorInputs.get(1)).getCheckpointId());
harness.processElement(row("test1"), 0);
harness.processElement(row("test2"), 0);
harness.getOutput().clear();
// end input
harness.endInput();
coordinatorInputs = harness.extractOutputValues();
// assert emit EndCheckpoint with Long.MAX_VALUE lastly
EndCheckpoint endCheckpoint = (EndCheckpoint) coordinatorInputs.get(coordinatorInputs.size() - 1);
Assert.assertEquals(Long.MAX_VALUE, endCheckpoint.getCheckpointId());
}
}
use of org.apache.flink.table.data.RowData in project flink by apache.
the class JdbcOutputFormatTest method testIncompatibleTypes.
@Test
public void testIncompatibleTypes() {
try {
JdbcConnectorOptions jdbcOptions = JdbcConnectorOptions.builder().setDriverName(DERBY_EBOOKSHOP_DB.getDriverClass()).setDBUrl(DERBY_EBOOKSHOP_DB.getUrl()).setTableName(INPUT_TABLE).build();
JdbcDmlOptions dmlOptions = JdbcDmlOptions.builder().withTableName(jdbcOptions.getTableName()).withDialect(jdbcOptions.getDialect()).withFieldNames(fieldNames).build();
outputFormat = new JdbcOutputFormatBuilder().setJdbcOptions(jdbcOptions).setFieldDataTypes(fieldDataTypes).setJdbcDmlOptions(dmlOptions).setJdbcExecutionOptions(JdbcExecutionOptions.builder().build()).setRowDataTypeInfo(rowDataTypeInfo).build();
setRuntimeContext(outputFormat, false);
outputFormat.open(0, 1);
RowData row = buildGenericData(4, "hello", "world", 0.99, "imthewrongtype");
outputFormat.writeRecord(row);
outputFormat.close();
fail("Expected exception is not thrown.");
} catch (Exception e) {
assertTrue(findThrowable(e, ClassCastException.class).isPresent());
}
}
use of org.apache.flink.table.data.RowData in project flink by apache.
the class JdbcRowDataLookupFunctionTest method testEvalWithCacheMissingKeyPositive.
@Test
public void testEvalWithCacheMissingKeyPositive() throws Exception {
JdbcLookupOptions lookupOptions = JdbcLookupOptions.builder().setCacheMissingKey(true).setCacheExpireMs(60000).setCacheMaxSize(10).build();
JdbcRowDataLookupFunction lookupFunction = buildRowDataLookupFunction(lookupOptions);
ListOutputCollector collector = new ListOutputCollector();
lookupFunction.setCollector(collector);
lookupFunction.open(null);
lookupFunction.eval(4, StringData.fromString("9"));
RowData keyRow = GenericRowData.of(4, StringData.fromString("9"));
Cache<RowData, List<RowData>> cache = lookupFunction.getCache();
// empty data should cache
assertEquals(cache.getIfPresent(keyRow), Collections.<RowData>emptyList());
// put db entry for keyRow
// final cache output should also be empty till TTL expires
insert("INSERT INTO " + LOOKUP_TABLE + " (id1, id2, comment1, comment2) VALUES (4, '9', '49-c1', '49-c2')");
lookupFunction.eval(4, StringData.fromString("9"));
assertEquals(cache.getIfPresent(keyRow), Collections.<RowData>emptyList());
}
Aggregations