use of org.apache.kafka.connect.data.SchemaBuilder in project ksql by confluentinc.
the class LogicalPlanner method buildProjectNode.
private ProjectNode buildProjectNode(final Schema inputSchema, final PlanNode sourcePlanNode) {
SchemaBuilder projectionSchema = SchemaBuilder.struct();
ExpressionTypeManager expressionTypeManager = new ExpressionTypeManager(inputSchema, functionRegistry);
for (int i = 0; i < analysis.getSelectExpressions().size(); i++) {
Expression expression = analysis.getSelectExpressions().get(i);
String alias = analysis.getSelectExpressionAlias().get(i);
Schema expressionType = expressionTypeManager.getExpressionType(expression);
projectionSchema = projectionSchema.field(alias, expressionType);
}
return new ProjectNode(new PlanNodeId("Project"), sourcePlanNode, projectionSchema, analysis.getSelectExpressions());
}
use of org.apache.kafka.connect.data.SchemaBuilder in project ksql by confluentinc.
the class AggregateNode method buildStream.
@Override
public SchemaKStream buildStream(final StreamsBuilder builder, final KsqlConfig ksqlConfig, final KafkaTopicClient kafkaTopicClient, final FunctionRegistry functionRegistry, final Map<String, Object> props, final SchemaRegistryClient schemaRegistryClient) {
final StructuredDataSourceNode streamSourceNode = getTheSourceNode();
final SchemaKStream sourceSchemaKStream = getSource().buildStream(builder, ksqlConfig, kafkaTopicClient, functionRegistry, props, schemaRegistryClient);
if (sourceSchemaKStream instanceof SchemaKTable) {
throw new KsqlException("Unsupported aggregation. KSQL currently only supports aggregation on a Stream.");
}
// Pre aggregate computations
final List<Pair<String, Expression>> aggArgExpansionList = new ArrayList<>();
final Map<String, Integer> expressionNames = new HashMap<>();
collectAggregateArgExpressions(getRequiredColumnList(), aggArgExpansionList, expressionNames);
collectAggregateArgExpressions(getAggregateFunctionArguments(), aggArgExpansionList, expressionNames);
final SchemaKStream aggregateArgExpanded = sourceSchemaKStream.select(aggArgExpansionList);
KsqlTopicSerDe ksqlTopicSerDe = streamSourceNode.getStructuredDataSource().getKsqlTopic().getKsqlTopicSerDe();
final Serde<GenericRow> genericRowSerde = ksqlTopicSerDe.getGenericRowSerde(aggregateArgExpanded.getSchema(), ksqlConfig, true, schemaRegistryClient);
final SchemaKGroupedStream schemaKGroupedStream = aggregateArgExpanded.groupBy(Serdes.String(), genericRowSerde, getGroupByExpressions());
// Aggregate computations
final SchemaBuilder aggregateSchema = SchemaBuilder.struct();
final Map<Integer, Integer> aggValToValColumnMap = createAggregateValueToValueColumnMap(aggregateArgExpanded, aggregateSchema);
final Schema aggStageSchema = buildAggregateSchema(aggregateArgExpanded.getSchema(), functionRegistry);
final Serde<GenericRow> aggValueGenericRowSerde = ksqlTopicSerDe.getGenericRowSerde(aggStageSchema, ksqlConfig, true, schemaRegistryClient);
final KudafInitializer initializer = new KudafInitializer(aggValToValColumnMap.size());
final SchemaKTable schemaKTable = schemaKGroupedStream.aggregate(initializer, new KudafAggregator(createAggValToFunctionMap(expressionNames, aggregateArgExpanded, aggregateSchema, initializer, aggValToValColumnMap.size(), functionRegistry), aggValToValColumnMap), getWindowExpression(), aggValueGenericRowSerde);
SchemaKTable result = new SchemaKTable(aggStageSchema, schemaKTable.getKtable(), schemaKTable.getKeyField(), schemaKTable.getSourceSchemaKStreams(), schemaKTable.isWindowed(), SchemaKStream.Type.AGGREGATE, functionRegistry, schemaRegistryClient);
if (getHavingExpressions() != null) {
result = result.filter(getHavingExpressions());
}
return result.select(getFinalSelectExpressions());
}
use of org.apache.kafka.connect.data.SchemaBuilder in project ksql by confluentinc.
the class JoinNode method buildSchema.
private Schema buildSchema(final PlanNode left, final PlanNode right) {
Schema leftSchema = left.getSchema();
Schema rightSchema = right.getSchema();
SchemaBuilder schemaBuilder = SchemaBuilder.struct();
for (Field field : leftSchema.fields()) {
String fieldName = leftAlias + "." + field.name();
schemaBuilder.field(fieldName, field.schema());
}
for (Field field : rightSchema.fields()) {
String fieldName = rightAlias + "." + field.name();
schemaBuilder.field(fieldName, field.schema());
}
return schemaBuilder.build();
}
use of org.apache.kafka.connect.data.SchemaBuilder in project ksql by confluentinc.
the class SchemaUtil method addImplicitRowTimeRowKeyToSchema.
public static Schema addImplicitRowTimeRowKeyToSchema(Schema schema) {
SchemaBuilder schemaBuilder = SchemaBuilder.struct();
schemaBuilder.field(SchemaUtil.ROWTIME_NAME, Schema.INT64_SCHEMA);
schemaBuilder.field(SchemaUtil.ROWKEY_NAME, Schema.STRING_SCHEMA);
for (Field field : schema.fields()) {
if (!field.name().equals(SchemaUtil.ROWKEY_NAME) && !field.name().equals(SchemaUtil.ROWTIME_NAME)) {
schemaBuilder.field(field.name(), field.schema());
}
}
return schemaBuilder.build();
}
use of org.apache.kafka.connect.data.SchemaBuilder in project kafka-connect-cdc-mssql by jcustenborder.
the class MsSqlTableMetadataProvider method generateSchema.
Schema generateSchema(ResultSet resultSet, final ChangeKey changeKey, final String columnName) throws SQLException {
boolean optional = resultSet.getBoolean(2);
String dataType = resultSet.getString(3);
int scale = resultSet.getInt(4);
SchemaBuilder builder;
log.trace("{}: columnName='{}' dataType='{}' scale={} optional={}", changeKey, columnName, dataType, scale, optional);
switch(dataType) {
case "bigint":
builder = SchemaBuilder.int64();
break;
case "bit":
builder = SchemaBuilder.bool();
break;
case "char":
case "varchar":
case "text":
case "nchar":
case "nvarchar":
case "ntext":
case "uniqueidentifier":
builder = SchemaBuilder.string();
break;
case "smallmoney":
case "money":
case "decimal":
case "numeric":
builder = Decimal.builder(scale);
break;
case "binary":
case "image":
case "varbinary":
builder = SchemaBuilder.bytes();
break;
case "date":
builder = Date.builder();
break;
case "datetime":
case "datetime2":
case "smalldatetime":
builder = Timestamp.builder();
break;
case "time":
builder = Time.builder();
break;
case "int":
builder = SchemaBuilder.int32();
break;
case "smallint":
builder = SchemaBuilder.int16();
break;
case "tinyint":
builder = SchemaBuilder.int8();
break;
case "real":
builder = SchemaBuilder.float32();
break;
case "float":
builder = SchemaBuilder.float64();
break;
default:
throw new DataException(String.format("Could not process (dataType = '%s', optional = %s, scale = %d) for %s.", dataType, optional, scale, changeKey));
}
log.trace("{}: columnName='{}' schema.type='{}' schema.name='{}'", changeKey, columnName, builder.type(), builder.name());
builder.parameters(ImmutableMap.of(Change.ColumnValue.COLUMN_NAME, columnName));
if (optional) {
builder.optional();
}
return builder.build();
}
Aggregations