use of org.apache.flink.table.data.RowData in project flink by apache.
the class BatchArrowPythonOverWindowAggregateFunctionOperatorTest method testFinishBundleTriggeredByCount.
@Test
public void testFinishBundleTriggeredByCount() throws Exception {
Configuration conf = new Configuration();
conf.setInteger(PythonOptions.MAX_BUNDLE_SIZE, 3);
OneInputStreamOperatorTestHarness<RowData, RowData> testHarness = getTestHarness(conf);
long initialTime = 0L;
ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
testHarness.open();
testHarness.processElement(new StreamRecord<>(newBinaryRow(true, "c1", "c2", 0L, 0L), initialTime + 1));
testHarness.processElement(new StreamRecord<>(newBinaryRow(true, "c1", "c4", 1L, 0L), initialTime + 2));
testHarness.processElement(new StreamRecord<>(newBinaryRow(true, "c1", "c6", 2L, 10L), initialTime + 3));
assertOutputEquals("FinishBundle should not be triggered.", expectedOutput, testHarness.getOutput());
testHarness.processElement(new StreamRecord<>(newBinaryRow(true, "c2", "c8", 3L, 0L), initialTime + 3));
expectedOutput.add(new StreamRecord<>(newRow(true, "c1", "c2", 0L, 0L, 0L)));
expectedOutput.add(new StreamRecord<>(newRow(true, "c1", "c4", 1L, 0L, 0L)));
expectedOutput.add(new StreamRecord<>(newRow(true, "c1", "c6", 2L, 10L, 2L)));
assertOutputEquals("Output was not correct.", expectedOutput, testHarness.getOutput());
testHarness.close();
expectedOutput.add(new StreamRecord<>(newRow(true, "c2", "c8", 3L, 0L, 3L)));
assertOutputEquals("Output was not correct.", expectedOutput, testHarness.getOutput());
}
use of org.apache.flink.table.data.RowData in project flink by apache.
the class PythonTableFunctionOperatorTest method newRow.
@Override
public RowData newRow(boolean accumulateMsg, Object... fields) {
if (accumulateMsg) {
return row(fields);
} else {
RowData row = row(fields);
row.setRowKind(RowKind.DELETE);
return row;
}
}
use of org.apache.flink.table.data.RowData in project flink by apache.
the class JdbcOutputFormatBuilder method createBufferReduceExecutor.
private static JdbcBatchStatementExecutor<RowData> createBufferReduceExecutor(JdbcDmlOptions opt, RuntimeContext ctx, TypeInformation<RowData> rowDataTypeInfo, LogicalType[] fieldTypes) {
checkArgument(opt.getKeyFields().isPresent());
JdbcDialect dialect = opt.getDialect();
String tableName = opt.getTableName();
String[] pkNames = opt.getKeyFields().get();
int[] pkFields = Arrays.stream(pkNames).mapToInt(Arrays.asList(opt.getFieldNames())::indexOf).toArray();
LogicalType[] pkTypes = Arrays.stream(pkFields).mapToObj(f -> fieldTypes[f]).toArray(LogicalType[]::new);
final TypeSerializer<RowData> typeSerializer = rowDataTypeInfo.createSerializer(ctx.getExecutionConfig());
final Function<RowData, RowData> valueTransform = ctx.getExecutionConfig().isObjectReuseEnabled() ? typeSerializer::copy : Function.identity();
return new TableBufferReducedStatementExecutor(createUpsertRowExecutor(dialect, tableName, opt.getFieldNames(), fieldTypes, pkFields, pkNames, pkTypes), createDeleteExecutor(dialect, tableName, pkNames, pkTypes), createRowKeyExtractor(fieldTypes, pkFields), valueTransform);
}
use of org.apache.flink.table.data.RowData in project flink by apache.
the class JdbcRowDataLookupFunction method eval.
/**
* This is a lookup method which is called by Flink framework in runtime.
*
* @param keys lookup keys
*/
public void eval(Object... keys) {
RowData keyRow = GenericRowData.of(keys);
if (cache != null) {
List<RowData> cachedRows = cache.getIfPresent(keyRow);
if (cachedRows != null) {
for (RowData cachedRow : cachedRows) {
collect(cachedRow);
}
return;
}
}
for (int retry = 0; retry <= maxRetryTimes; retry++) {
try {
statement.clearParameters();
statement = lookupKeyRowConverter.toExternal(keyRow, statement);
try (ResultSet resultSet = statement.executeQuery()) {
if (cache == null) {
while (resultSet.next()) {
collect(jdbcRowConverter.toInternal(resultSet));
}
} else {
ArrayList<RowData> rows = new ArrayList<>();
while (resultSet.next()) {
RowData row = jdbcRowConverter.toInternal(resultSet);
rows.add(row);
collect(row);
}
rows.trimToSize();
if (!rows.isEmpty() || cacheMissingKey) {
cache.put(keyRow, rows);
}
}
}
break;
} catch (SQLException e) {
LOG.error(String.format("JDBC executeBatch error, retry times = %d", retry), e);
if (retry >= maxRetryTimes) {
throw new RuntimeException("Execution of JDBC statement failed.", e);
}
try {
if (!connectionProvider.isConnectionValid()) {
statement.close();
connectionProvider.closeConnection();
establishConnectionAndStatement();
}
} catch (SQLException | ClassNotFoundException exception) {
LOG.error("JDBC connection is not valid, and reestablish connection failed", exception);
throw new RuntimeException("Reestablish JDBC connection failed", exception);
}
try {
Thread.sleep(1000 * retry);
} catch (InterruptedException e1) {
throw new RuntimeException(e1);
}
}
}
}
use of org.apache.flink.table.data.RowData in project flink by apache.
the class JdbcRowDataInputFormat method nextRecord.
/**
* Stores the next resultSet row in a tuple.
*
* @param reuse row to be reused.
* @return row containing next {@link RowData}
* @throws IOException
*/
@Override
public RowData nextRecord(RowData reuse) throws IOException {
try {
if (!hasNext) {
return null;
}
RowData row = rowConverter.toInternal(resultSet);
// update hasNext after we've read the record
hasNext = resultSet.next();
return row;
} catch (SQLException se) {
throw new IOException("Couldn't read data - " + se.getMessage(), se);
} catch (NullPointerException npe) {
throw new IOException("Couldn't access resultSet", npe);
}
}
Aggregations