Search in sources :

Example 61 with RowData

use of org.apache.flink.table.data.RowData in project flink by apache.

the class EnrichedRowDataTest method testEnrichedRow.

@Test
public void testEnrichedRow() {
    final List<String> completeRowFields = Arrays.asList("fixedRow1", "mutableRow1", "mutableRow3", "mutableRow2", "fixedRow2", "mutableRow4");
    final List<String> mutableRowFields = Arrays.asList("mutableRow1", "mutableRow2", "mutableRow3", "mutableRow4");
    final List<String> fixedRowFields = Arrays.asList("fixedRow1", "fixedRow2");
    final RowData fixedRowData = GenericRowData.of(1L, 2L);
    final EnrichedRowData enrichedRowData = EnrichedRowData.from(fixedRowData, completeRowFields, mutableRowFields, fixedRowFields);
    final RowData mutableRowData = GenericRowData.of(3L, 4L, 5L, 6L);
    enrichedRowData.replaceMutableRow(mutableRowData);
    assertEquals(RowKind.INSERT, enrichedRowData.getRowKind());
    assertEquals(6, enrichedRowData.getArity());
    assertEquals(1L, enrichedRowData.getLong(0));
    assertEquals(3L, enrichedRowData.getLong(1));
    assertEquals(5L, enrichedRowData.getLong(2));
    assertEquals(4L, enrichedRowData.getLong(3));
    assertEquals(2L, enrichedRowData.getLong(4));
    assertEquals(6L, enrichedRowData.getLong(5));
    final RowData newMutableRowData = GenericRowData.of(7L, 8L, 9L, 10L);
    enrichedRowData.replaceMutableRow(newMutableRowData);
    assertEquals(1L, enrichedRowData.getLong(0));
    assertEquals(7L, enrichedRowData.getLong(1));
    assertEquals(9L, enrichedRowData.getLong(2));
    assertEquals(8L, enrichedRowData.getLong(3));
    assertEquals(2L, enrichedRowData.getLong(4));
    assertEquals(10L, enrichedRowData.getLong(5));
}
Also used : GenericRowData(org.apache.flink.table.data.GenericRowData) RowData(org.apache.flink.table.data.RowData) JoinedRowData(org.apache.flink.table.data.utils.JoinedRowData) Test(org.junit.jupiter.api.Test)

Example 62 with RowData

use of org.apache.flink.table.data.RowData in project flink by apache.

the class StreamingFileWriterTest method testCommitFileWhenPartitionIsCommittableByPartitionTime.

@Test
public void testCommitFileWhenPartitionIsCommittableByPartitionTime() throws Exception {
    // the rolling policy is not to roll file by filesize and roll file after one day,
    // it can ensure the file can be closed only when the partition is committable in this test.
    FileSystemTableSink.TableRollingPolicy tableRollingPolicy = new FileSystemTableSink.TableRollingPolicy(false, Long.MAX_VALUE, Duration.ofDays(1).toMillis(), Duration.ofDays(1).toMillis());
    List<String> partitionKeys = Collections.singletonList("d");
    // commit delay is 1 day with partition-time trigger
    Configuration conf = getPartitionCommitTriggerConf(Duration.ofDays(1).toMillis());
    long currentTimeMillis = System.currentTimeMillis();
    Date nextYear = new Date(currentTimeMillis + Duration.ofDays(365).toMillis());
    String nextYearPartition = "d=" + dateFormat.format(nextYear);
    Date yesterday = new Date(currentTimeMillis - Duration.ofDays(1).toMillis());
    String yesterdayPartition = "d=" + dateFormat.format(yesterday);
    Date today = new Date(currentTimeMillis);
    String todayPartition = "d=" + dateFormat.format(today);
    Date tomorrow = new Date(currentTimeMillis + Duration.ofDays(1).toMillis());
    String tomorrowPartition = "d=" + dateFormat.format(tomorrow);
    OperatorSubtaskState state;
    try (OneInputStreamOperatorTestHarness<RowData, PartitionCommitInfo> harness = create(tableRollingPolicy, partitionKeys, conf)) {
        harness.setup();
        harness.initializeEmptyState();
        harness.open();
        harness.processElement(row(yesterdayPartition), 0);
        harness.processWatermark(currentTimeMillis);
        state = harness.snapshot(1, 1);
        harness.notifyOfCompletedCheckpoint(1);
        // assert yesterday partition file is committed
        Assert.assertTrue(isPartitionFileCommitted(yesterdayPartition, 0, 0));
    }
    // first retry
    try (OneInputStreamOperatorTestHarness<RowData, PartitionCommitInfo> harness = create(tableRollingPolicy, partitionKeys, conf)) {
        harness.setup();
        harness.initializeState(state);
        harness.open();
        harness.processElement(row(tomorrowPartition), 0);
        harness.processElement(row(todayPartition), 0);
        // simulate waiting for 1 day
        currentTimeMillis += Duration.ofDays(1).toMillis();
        harness.processWatermark(currentTimeMillis);
        harness.snapshot(2, 2);
        harness.notifyOfCompletedCheckpoint(2);
        // assert today partition file is committed
        Assert.assertTrue(isPartitionFileCommitted(todayPartition, 0, 2));
        // assert tomorrow partition file isn't committed
        Assert.assertFalse(isPartitionFileCommitted(tomorrowPartition, 0, 1));
        // simulate waiting for 1 day again, now tomorrow partition is committable
        currentTimeMillis += Duration.ofDays(1).toMillis();
        harness.processWatermark(currentTimeMillis);
        state = harness.snapshot(3, 3);
        harness.notifyOfCompletedCheckpoint(3);
        Assert.assertTrue(isPartitionFileCommitted(tomorrowPartition, 0, 1));
        harness.processElement(row(nextYearPartition), 0);
    }
    // second retry
    try (OneInputStreamOperatorTestHarness<RowData, PartitionCommitInfo> harness = create(tableRollingPolicy, partitionKeys, conf)) {
        harness.setup();
        harness.initializeState(state);
        harness.open();
        harness.processElement(row(nextYearPartition), 0);
        harness.processElement(row(tomorrowPartition), 0);
        harness.endInput();
        // assert files in all partition have been committed
        Assert.assertTrue(isPartitionFileCommitted(tomorrowPartition, 0, 4));
        Assert.assertTrue(isPartitionFileCommitted(nextYearPartition, 0, 3));
    }
}
Also used : GenericRowData(org.apache.flink.table.data.GenericRowData) RowData(org.apache.flink.table.data.RowData) Configuration(org.apache.flink.configuration.Configuration) FileSystemTableSink(org.apache.flink.connector.file.table.FileSystemTableSink) Date(java.util.Date) OperatorSubtaskState(org.apache.flink.runtime.checkpoint.OperatorSubtaskState) Test(org.junit.Test)

Example 63 with RowData

use of org.apache.flink.table.data.RowData in project flink by apache.

the class CompactFileWriterTest method testEmitEndCheckpointAfterEndInput.

@Test
public void testEmitEndCheckpointAfterEndInput() throws Exception {
    CompactFileWriter<RowData> compactFileWriter = new CompactFileWriter<>(1000, StreamingFileSink.forRowFormat(folder, new SimpleStringEncoder<>()));
    try (OneInputStreamOperatorTestHarness<RowData, CoordinatorInput> harness = new OneInputStreamOperatorTestHarness<>(compactFileWriter)) {
        harness.setup();
        harness.open();
        harness.processElement(row("test"), 0);
        harness.snapshot(1, 1);
        harness.notifyOfCompletedCheckpoint(1);
        List<CoordinatorInput> coordinatorInputs = harness.extractOutputValues();
        Assert.assertEquals(2, coordinatorInputs.size());
        // assert emit InputFile
        Assert.assertTrue(coordinatorInputs.get(0) instanceof InputFile);
        // assert emit EndCheckpoint
        Assert.assertEquals(1, ((EndCheckpoint) coordinatorInputs.get(1)).getCheckpointId());
        harness.processElement(row("test1"), 0);
        harness.processElement(row("test2"), 0);
        harness.getOutput().clear();
        // end input
        harness.endInput();
        coordinatorInputs = harness.extractOutputValues();
        // assert emit EndCheckpoint with Long.MAX_VALUE lastly
        EndCheckpoint endCheckpoint = (EndCheckpoint) coordinatorInputs.get(coordinatorInputs.size() - 1);
        Assert.assertEquals(Long.MAX_VALUE, endCheckpoint.getCheckpointId());
    }
}
Also used : GenericRowData(org.apache.flink.table.data.GenericRowData) RowData(org.apache.flink.table.data.RowData) CoordinatorInput(org.apache.flink.connector.file.table.stream.compact.CompactMessages.CoordinatorInput) EndCheckpoint(org.apache.flink.connector.file.table.stream.compact.CompactMessages.EndCheckpoint) OneInputStreamOperatorTestHarness(org.apache.flink.streaming.util.OneInputStreamOperatorTestHarness) SimpleStringEncoder(org.apache.flink.api.common.serialization.SimpleStringEncoder) InputFile(org.apache.flink.connector.file.table.stream.compact.CompactMessages.InputFile) Test(org.junit.Test)

Example 64 with RowData

use of org.apache.flink.table.data.RowData in project flink by apache.

the class JdbcOutputFormatTest method testIncompatibleTypes.

@Test
public void testIncompatibleTypes() {
    try {
        JdbcConnectorOptions jdbcOptions = JdbcConnectorOptions.builder().setDriverName(DERBY_EBOOKSHOP_DB.getDriverClass()).setDBUrl(DERBY_EBOOKSHOP_DB.getUrl()).setTableName(INPUT_TABLE).build();
        JdbcDmlOptions dmlOptions = JdbcDmlOptions.builder().withTableName(jdbcOptions.getTableName()).withDialect(jdbcOptions.getDialect()).withFieldNames(fieldNames).build();
        outputFormat = new JdbcOutputFormatBuilder().setJdbcOptions(jdbcOptions).setFieldDataTypes(fieldDataTypes).setJdbcDmlOptions(dmlOptions).setJdbcExecutionOptions(JdbcExecutionOptions.builder().build()).setRowDataTypeInfo(rowDataTypeInfo).build();
        setRuntimeContext(outputFormat, false);
        outputFormat.open(0, 1);
        RowData row = buildGenericData(4, "hello", "world", 0.99, "imthewrongtype");
        outputFormat.writeRecord(row);
        outputFormat.close();
        fail("Expected exception is not thrown.");
    } catch (Exception e) {
        assertTrue(findThrowable(e, ClassCastException.class).isPresent());
    }
}
Also used : RowData(org.apache.flink.table.data.RowData) JdbcConnectorOptions(org.apache.flink.connector.jdbc.internal.options.JdbcConnectorOptions) JdbcDmlOptions(org.apache.flink.connector.jdbc.internal.options.JdbcDmlOptions) SQLException(java.sql.SQLException) IOException(java.io.IOException) Test(org.junit.Test)

Example 65 with RowData

use of org.apache.flink.table.data.RowData in project flink by apache.

the class JdbcRowDataLookupFunctionTest method testEvalWithCacheMissingKeyPositive.

@Test
public void testEvalWithCacheMissingKeyPositive() throws Exception {
    JdbcLookupOptions lookupOptions = JdbcLookupOptions.builder().setCacheMissingKey(true).setCacheExpireMs(60000).setCacheMaxSize(10).build();
    JdbcRowDataLookupFunction lookupFunction = buildRowDataLookupFunction(lookupOptions);
    ListOutputCollector collector = new ListOutputCollector();
    lookupFunction.setCollector(collector);
    lookupFunction.open(null);
    lookupFunction.eval(4, StringData.fromString("9"));
    RowData keyRow = GenericRowData.of(4, StringData.fromString("9"));
    Cache<RowData, List<RowData>> cache = lookupFunction.getCache();
    // empty data should cache
    assertEquals(cache.getIfPresent(keyRow), Collections.<RowData>emptyList());
    // put db entry for keyRow
    // final cache output should also be empty till TTL expires
    insert("INSERT INTO " + LOOKUP_TABLE + " (id1, id2, comment1, comment2) VALUES (4, '9', '49-c1', '49-c2')");
    lookupFunction.eval(4, StringData.fromString("9"));
    assertEquals(cache.getIfPresent(keyRow), Collections.<RowData>emptyList());
}
Also used : RowData(org.apache.flink.table.data.RowData) GenericRowData(org.apache.flink.table.data.GenericRowData) JdbcLookupOptions(org.apache.flink.connector.jdbc.internal.options.JdbcLookupOptions) ArrayList(java.util.ArrayList) List(java.util.List) Test(org.junit.Test)

Aggregations

RowData (org.apache.flink.table.data.RowData)602 Test (org.junit.Test)201 GenericRowData (org.apache.flink.table.data.GenericRowData)178 ArrayList (java.util.ArrayList)109 RowType (org.apache.flink.table.types.logical.RowType)105 JoinedRowData (org.apache.flink.table.data.utils.JoinedRowData)90 Watermark (org.apache.flink.streaming.api.watermark.Watermark)84 ConcurrentLinkedQueue (java.util.concurrent.ConcurrentLinkedQueue)72 Transformation (org.apache.flink.api.dag.Transformation)70 Configuration (org.apache.flink.configuration.Configuration)68 BinaryRowData (org.apache.flink.table.data.binary.BinaryRowData)67 List (java.util.List)65 ExecEdge (org.apache.flink.table.planner.plan.nodes.exec.ExecEdge)54 DataType (org.apache.flink.table.types.DataType)52 Map (java.util.Map)42 LogicalType (org.apache.flink.table.types.logical.LogicalType)41 TableException (org.apache.flink.table.api.TableException)34 OneInputTransformation (org.apache.flink.streaming.api.transformations.OneInputTransformation)33 RowDataKeySelector (org.apache.flink.table.runtime.keyselector.RowDataKeySelector)32 OperatorSubtaskState (org.apache.flink.runtime.checkpoint.OperatorSubtaskState)31