use of io.confluent.ksql.GenericRow in project ksql by confluentinc.
the class DataGenProducer method populateTopic.
public void populateTopic(Properties props, Generator generator, String kafkaTopicName, String key, int messageCount, long maxInterval) {
if (maxInterval < 0) {
maxInterval = INTER_MESSAGE_MAX_INTERVAL;
}
Schema avroSchema = generator.schema();
org.apache.kafka.connect.data.Schema kafkaSchema = new AvroData(1).toConnectSchema(avroSchema);
Serializer<GenericRow> serializer = getSerializer(avroSchema, kafkaSchema, kafkaTopicName);
final KafkaProducer<String, GenericRow> producer = new KafkaProducer<>(props, new StringSerializer(), serializer);
SessionManager sessionManager = new SessionManager();
for (int i = 0; i < messageCount; i++) {
Object generatedObject = generator.generate();
if (!(generatedObject instanceof GenericRecord)) {
throw new RuntimeException(String.format("Expected Avro Random Generator to return instance of GenericRecord, found %s instead", generatedObject.getClass().getName()));
}
GenericRecord randomAvroMessage = (GenericRecord) generatedObject;
List<Object> genericRowValues = new ArrayList<>();
SimpleDateFormat timeformatter = null;
/**
* Populate the record entries
*/
String sessionisationValue = null;
for (Schema.Field field : avroSchema.getFields()) {
boolean isSession = field.schema().getProp("session") != null;
boolean isSessionSiblingIntHash = field.schema().getProp("session-sibling-int-hash") != null;
String timeFormatFromLong = field.schema().getProp("format_as_time");
if (isSession) {
String currentValue = (String) randomAvroMessage.get(field.name());
String newCurrentValue = handleSessionisationOfValue(sessionManager, currentValue);
sessionisationValue = newCurrentValue;
genericRowValues.add(newCurrentValue);
} else if (isSessionSiblingIntHash && sessionisationValue != null) {
// super cheeky hack to link int-ids to session-values - if anything fails then we use
// the 'avro-gen' randomised version
handleSessionSiblingField(randomAvroMessage, genericRowValues, sessionisationValue, field);
} else if (timeFormatFromLong != null) {
Date date = new Date(System.currentTimeMillis());
if (timeFormatFromLong.equals("unix_long")) {
genericRowValues.add(date.getTime());
} else {
if (timeformatter == null) {
timeformatter = new SimpleDateFormat(timeFormatFromLong);
}
genericRowValues.add(timeformatter.format(date));
}
} else {
genericRowValues.add(randomAvroMessage.get(field.name()));
}
}
GenericRow genericRow = new GenericRow(genericRowValues);
String keyString = randomAvroMessage.get(key).toString();
ProducerRecord<String, GenericRow> producerRecord = new ProducerRecord<>(kafkaTopicName, keyString, genericRow);
producer.send(producerRecord);
System.err.println(keyString + " --> (" + genericRow + ")");
try {
Thread.sleep((long) (maxInterval * Math.random()));
} catch (InterruptedException e) {
// Ignore the exception.
}
}
producer.flush();
producer.close();
}
use of io.confluent.ksql.GenericRow in project ksql by confluentinc.
the class ConsoleTest method testPrintGenericStreamedRow.
@Test
public void testPrintGenericStreamedRow() throws IOException {
StreamedRow row = new StreamedRow(new GenericRow(Arrays.asList("col_1", "col_2")));
terminal.printStreamedRow(row);
}
use of io.confluent.ksql.GenericRow in project ksql by confluentinc.
the class EndToEndIntegrationTest method shouldSelectFromPageViewsWithSpecificColumn.
@Test
public void shouldSelectFromPageViewsWithSpecificColumn() throws Exception {
final QueuedQueryMetadata queryMetadata = executeQuery("SELECT pageid from %s;", pageViewStream);
BlockingQueue<KeyValue<String, GenericRow>> rowQueue = queryMetadata.getRowQueue();
List<String> actualPages = new ArrayList<>();
List<String> expectedPages = Arrays.asList("PAGE_1", "PAGE_2", "PAGE_3", "PAGE_4", "PAGE_5", "PAGE_5", "PAGE_5");
while (actualPages.size() < expectedPages.size()) {
KeyValue<String, GenericRow> nextRow = rowQueue.poll();
if (nextRow != null) {
List<Object> columns = nextRow.value.getColumns();
assertEquals(1, columns.size());
String page = (String) columns.get(0);
actualPages.add(page);
}
}
assertEquals(expectedPages, actualPages);
queryMetadata.getKafkaStreams().close();
}
use of io.confluent.ksql.GenericRow in project ksql by confluentinc.
the class EndToEndIntegrationTest method shouldSelectAllFromUsers.
@Test
public void shouldSelectAllFromUsers() throws Exception {
final QueuedQueryMetadata queryMetadata = executeQuery("SELECT * from %s;", userTable);
BlockingQueue<KeyValue<String, GenericRow>> rowQueue = queryMetadata.getRowQueue();
Set<String> actualUsers = new HashSet<>();
Set<String> expectedUsers = Utils.mkSet("USER_0", "USER_1", "USER_2", "USER_3", "USER_4");
while (actualUsers.size() < expectedUsers.size()) {
KeyValue<String, GenericRow> nextRow = rowQueue.poll();
if (nextRow != null) {
List<Object> columns = nextRow.value.getColumns();
assertEquals(6, columns.size());
actualUsers.add((String) columns.get(1));
}
}
assertEquals(expectedUsers, actualUsers);
}
use of io.confluent.ksql.GenericRow in project ksql by confluentinc.
the class JoinIntTest method shouldLeftJoinOrderAndItems.
private void shouldLeftJoinOrderAndItems(String testStreamName, String orderStreamTopic, String orderStreamName, String itemTableName, DataSource.DataSourceSerDe dataSourceSerDe) throws Exception {
final String queryString = String.format("CREATE STREAM %s AS SELECT ORDERID, ITEMID, ORDERUNITS, DESCRIPTION FROM %s LEFT JOIN" + " %s on %s.ITEMID = %s.ID WHERE %s.ITEMID = 'ITEM_1' ;", testStreamName, orderStreamName, itemTableName, orderStreamName, itemTableName, orderStreamName);
ksqlContext.sql(queryString);
Schema resultSchema = ksqlContext.getMetaStore().getSource(testStreamName).getSchema();
Map<String, GenericRow> expectedResults = Collections.singletonMap("ITEM_1", new GenericRow(Arrays.asList(null, null, "ORDER_1", "ITEM_1", 10.0, "home cinema")));
final Map<String, GenericRow> results = new HashMap<>();
TestUtils.waitForCondition(() -> {
results.putAll(testHarness.consumeData(testStreamName, resultSchema, 1, new StringDeserializer(), IntegrationTestHarness.RESULTS_POLL_MAX_TIME_MS, dataSourceSerDe));
final boolean success = results.equals(expectedResults);
if (!success) {
try {
// The join may not be triggered fist time around due to order in which the
// consumer pulls the records back. So we publish again to make the stream
// trigger the join.
testHarness.publishTestData(orderStreamTopic, orderDataProvider, now, dataSourceSerDe);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
return success;
}, IntegrationTestHarness.RESULTS_POLL_MAX_TIME_MS * 2 + 30000, "failed to complete join correctly");
}
Aggregations