use of org.apache.flink.connector.jdbc.dialect.JdbcDialect in project flink by apache.
the class JdbcOutputFormatBuilder method createBufferReduceExecutor.
private static JdbcBatchStatementExecutor<RowData> createBufferReduceExecutor(JdbcDmlOptions opt, RuntimeContext ctx, TypeInformation<RowData> rowDataTypeInfo, LogicalType[] fieldTypes) {
checkArgument(opt.getKeyFields().isPresent());
JdbcDialect dialect = opt.getDialect();
String tableName = opt.getTableName();
String[] pkNames = opt.getKeyFields().get();
int[] pkFields = Arrays.stream(pkNames).mapToInt(Arrays.asList(opt.getFieldNames())::indexOf).toArray();
LogicalType[] pkTypes = Arrays.stream(pkFields).mapToObj(f -> fieldTypes[f]).toArray(LogicalType[]::new);
final TypeSerializer<RowData> typeSerializer = rowDataTypeInfo.createSerializer(ctx.getExecutionConfig());
final Function<RowData, RowData> valueTransform = ctx.getExecutionConfig().isObjectReuseEnabled() ? typeSerializer::copy : Function.identity();
return new TableBufferReducedStatementExecutor(createUpsertRowExecutor(dialect, tableName, opt.getFieldNames(), fieldTypes, pkFields, pkNames, pkTypes), createDeleteExecutor(dialect, tableName, pkNames, pkTypes), createRowKeyExtractor(fieldTypes, pkFields), valueTransform);
}
use of org.apache.flink.connector.jdbc.dialect.JdbcDialect in project flink by apache.
the class JdbcDynamicTableSource method getScanRuntimeProvider.
@Override
public ScanRuntimeProvider getScanRuntimeProvider(ScanContext runtimeProviderContext) {
final JdbcRowDataInputFormat.Builder builder = JdbcRowDataInputFormat.builder().setDrivername(options.getDriverName()).setDBUrl(options.getDbURL()).setUsername(options.getUsername().orElse(null)).setPassword(options.getPassword().orElse(null)).setAutoCommit(readOptions.getAutoCommit());
if (readOptions.getFetchSize() != 0) {
builder.setFetchSize(readOptions.getFetchSize());
}
final JdbcDialect dialect = options.getDialect();
String query = dialect.getSelectFromStatement(options.getTableName(), DataType.getFieldNames(physicalRowDataType).toArray(new String[0]), new String[0]);
if (readOptions.getPartitionColumnName().isPresent()) {
long lowerBound = readOptions.getPartitionLowerBound().get();
long upperBound = readOptions.getPartitionUpperBound().get();
int numPartitions = readOptions.getNumPartitions().get();
builder.setParametersProvider(new JdbcNumericBetweenParametersProvider(lowerBound, upperBound).ofBatchNum(numPartitions));
query += " WHERE " + dialect.quoteIdentifier(readOptions.getPartitionColumnName().get()) + " BETWEEN ? AND ?";
}
if (limit >= 0) {
query = String.format("%s %s", query, dialect.getLimitClause(limit));
}
builder.setQuery(query);
final RowType rowType = (RowType) physicalRowDataType.getLogicalType();
builder.setRowConverter(dialect.getRowConverter(rowType));
builder.setRowDataTypeInfo(runtimeProviderContext.createTypeInformation(physicalRowDataType));
return InputFormatProvider.of(builder.build());
}
use of org.apache.flink.connector.jdbc.dialect.JdbcDialect in project flink by apache.
the class JdbcDynamicTableFactory method validateDataTypeWithJdbcDialect.
private static void validateDataTypeWithJdbcDialect(DataType dataType, String url) {
final JdbcDialect dialect = JdbcDialectLoader.load(url);
dialect.validate((RowType) dataType.getLogicalType());
}
Aggregations