use of org.apache.storm.sql.runtime.FieldInfo in project storm by apache.
the class StormSqlImpl method handleCreateTableForTrident.
private void handleCreateTableForTrident(SqlCreateTable n, Map<String, ISqlTridentDataSource> dataSources) {
List<FieldInfo> fields = updateSchema(n);
ISqlTridentDataSource ds = DataSourcesRegistry.constructTridentDataSource(n.location(), n.inputFormatClass(), n.outputFormatClass(), n.properties(), fields);
if (ds == null) {
throw new RuntimeException("Failed to find data source for " + n.tableName() + " URI: " + n.location());
} else if (dataSources.containsKey(n.tableName())) {
throw new RuntimeException("Duplicated definition for table " + n.tableName());
}
dataSources.put(n.tableName(), ds);
}
use of org.apache.storm.sql.runtime.FieldInfo in project storm by apache.
the class StormSqlImpl method updateSchema.
private List<FieldInfo> updateSchema(SqlCreateTable n) {
TableBuilderInfo builder = new TableBuilderInfo(typeFactory);
List<FieldInfo> fields = new ArrayList<>();
for (ColumnDefinition col : n.fieldList()) {
builder.field(col.name(), col.type(), col.constraint());
RelDataType dataType = col.type().deriveType(typeFactory);
Class<?> javaType = (Class<?>) typeFactory.getJavaClass(dataType);
ColumnConstraint constraint = col.constraint();
boolean isPrimary = constraint != null && constraint instanceof ColumnConstraint.PrimaryKey;
fields.add(new FieldInfo(col.name(), javaType, isPrimary));
}
if (n.parallelism() != null) {
builder.parallelismHint(n.parallelism());
}
Table table = builder.build();
schema.add(n.tableName(), table);
return fields;
}
use of org.apache.storm.sql.runtime.FieldInfo in project storm by apache.
the class KafkaDataSourcesProvider method constructTrident.
@Override
public ISqlTridentDataSource constructTrident(URI uri, String inputFormatClass, String outputFormatClass, Properties properties, List<FieldInfo> fields) {
int port = uri.getPort() != -1 ? uri.getPort() : DEFAULT_ZK_PORT;
ZkHosts zk = new ZkHosts(uri.getHost() + ":" + port, uri.getPath());
Map<String, String> values = parseURIParams(uri.getQuery());
String topic = values.get("topic");
Preconditions.checkNotNull(topic, "No topic of the spout is specified");
TridentKafkaConfig conf = new TridentKafkaConfig(zk, topic);
List<String> fieldNames = new ArrayList<>();
int primaryIndex = -1;
for (int i = 0; i < fields.size(); ++i) {
FieldInfo f = fields.get(i);
fieldNames.add(f.name());
if (f.isPrimary()) {
primaryIndex = i;
}
}
Preconditions.checkState(primaryIndex != -1, "Kafka stream table must have a primary key");
Scheme scheme = SerdeUtils.getScheme(inputFormatClass, properties, fieldNames);
conf.scheme = new SchemeAsMultiScheme(scheme);
IOutputSerializer serializer = SerdeUtils.getSerializer(outputFormatClass, properties, fieldNames);
return new KafkaTridentDataSource(conf, topic, primaryIndex, properties, serializer);
}
use of org.apache.storm.sql.runtime.FieldInfo in project storm by apache.
the class StormSqlImpl method handleCreateTable.
private void handleCreateTable(SqlCreateTable n, Map<String, DataSource> dataSources) {
List<FieldInfo> fields = updateSchema(n);
DataSource ds = DataSourcesRegistry.construct(n.location(), n.inputFormatClass(), n.outputFormatClass(), fields);
if (ds == null) {
throw new RuntimeException("Cannot construct data source for " + n.tableName());
} else if (dataSources.containsKey(n.tableName())) {
throw new RuntimeException("Duplicated definition for table " + n.tableName());
}
dataSources.put(n.tableName(), ds);
}
use of org.apache.storm.sql.runtime.FieldInfo in project storm by apache.
the class KafkaDataSourcesProvider method constructStreams.
@Override
public ISqlStreamsDataSource constructStreams(URI uri, String inputFormatClass, String outputFormatClass, Properties properties, List<FieldInfo> fields) {
List<String> fieldNames = new ArrayList<>();
int primaryIndex = -1;
for (int i = 0; i < fields.size(); ++i) {
FieldInfo f = fields.get(i);
fieldNames.add(f.name());
if (f.isPrimary()) {
primaryIndex = i;
}
}
Preconditions.checkState(primaryIndex != -1, "Kafka stream table must have a primary key");
Scheme scheme = SerdeUtils.getScheme(inputFormatClass, properties, fieldNames);
Map<String, String> values = parseUriParams(uri.getQuery());
String bootstrapServers = values.get(URI_PARAMS_BOOTSTRAP_SERVERS);
Preconditions.checkNotNull(bootstrapServers, "bootstrap-servers must be specified");
String topic = uri.getHost();
KafkaSpoutConfig<ByteBuffer, ByteBuffer> kafkaSpoutConfig = new KafkaSpoutConfig.Builder<ByteBuffer, ByteBuffer>(bootstrapServers, topic).setProp(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteBufferDeserializer.class).setProp(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteBufferDeserializer.class).setProp(ConsumerConfig.GROUP_ID_CONFIG, "storm-sql-kafka-" + UUID.randomUUID().toString()).setRecordTranslator(new RecordTranslatorSchemeAdapter(scheme)).build();
IOutputSerializer serializer = SerdeUtils.getSerializer(outputFormatClass, properties, fieldNames);
return new KafkaStreamsDataSource(kafkaSpoutConfig, bootstrapServers, topic, properties, serializer);
}
Aggregations