use of org.apache.flink.api.common.serialization.SimpleStringSchema in project flink by apache.
the class KafkaShortRetentionTestBase method runAutoOffsetResetTest.
public void runAutoOffsetResetTest() throws Exception {
final String topic = "auto-offset-reset-test";
final int parallelism = 1;
final int elementsPerPartition = 50000;
Properties tprops = new Properties();
tprops.setProperty("retention.ms", "250");
kafkaServer.createTestTopic(topic, parallelism, 1, tprops);
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(parallelism);
// fail immediately
env.setRestartStrategy(RestartStrategies.noRestart());
// ----------- add producer dataflow ----------
DataStream<String> stream = env.addSource(new RichParallelSourceFunction<String>() {
private boolean running = true;
@Override
public void run(SourceContext<String> ctx) throws InterruptedException {
int cnt = getRuntimeContext().getIndexOfThisSubtask() * elementsPerPartition;
int limit = cnt + elementsPerPartition;
while (running && !stopProducer && cnt < limit) {
ctx.collect("element-" + cnt);
cnt++;
Thread.sleep(10);
}
LOG.info("Stopping producer");
}
@Override
public void cancel() {
running = false;
}
});
Properties props = new Properties();
props.putAll(standardProps);
props.putAll(secureProps);
kafkaServer.produceIntoKafka(stream, topic, new SimpleStringSchema(), props, null);
// ----------- add consumer dataflow ----------
NonContinousOffsetsDeserializationSchema deserSchema = new NonContinousOffsetsDeserializationSchema();
FlinkKafkaConsumerBase<String> source = kafkaServer.getConsumer(topic, deserSchema, props);
DataStreamSource<String> consuming = env.addSource(source);
consuming.addSink(new DiscardingSink<String>());
tryExecute(env, "run auto offset reset test");
kafkaServer.deleteTestTopic(topic);
}
use of org.apache.flink.api.common.serialization.SimpleStringSchema in project flink by apache.
the class KafkaShortRetentionTestBase method runFailOnAutoOffsetResetNoneEager.
public void runFailOnAutoOffsetResetNoneEager() throws Exception {
final String topic = "auto-offset-reset-none-test";
final int parallelism = 1;
kafkaServer.createTestTopic(topic, parallelism, 1);
// ----------- add consumer ----------
Properties customProps = new Properties();
customProps.putAll(standardProps);
customProps.putAll(secureProps);
customProps.setProperty("auto.offset.reset", // test that "none" leads to an exception
"none");
try {
kafkaServer.getConsumer(topic, new SimpleStringSchema(), customProps);
fail("should fail with an exception");
} catch (IllegalArgumentException e) {
// expected
assertTrue(e.getMessage().contains("none"));
}
kafkaServer.deleteTestTopic(topic);
}
use of org.apache.flink.api.common.serialization.SimpleStringSchema in project flink by apache.
the class KafkaRecordSerializationSchemaBuilderTest method testSerializeRecordWithPartitioner.
@Test
public void testSerializeRecordWithPartitioner() throws Exception {
AtomicBoolean opened = new AtomicBoolean(false);
final int partition = 5;
final FlinkKafkaPartitioner<Object> partitioner = new ConstantPartitioner<>(opened, partition);
final KafkaRecordSerializationSchema<String> schema = KafkaRecordSerializationSchema.builder().setTopic(DEFAULT_TOPIC).setValueSerializationSchema(new SimpleStringSchema()).setPartitioner(partitioner).build();
final KafkaRecordSerializationSchema.KafkaSinkContext sinkContext = new TestSinkContext();
schema.open(null, sinkContext);
final ProducerRecord<byte[], byte[]> record = schema.serialize("a", sinkContext, null);
assertEquals(partition, record.partition());
assertTrue(opened.get());
}
use of org.apache.flink.api.common.serialization.SimpleStringSchema in project flink by apache.
the class FlinkKafkaProducerBaseTest method testInstantiationFailsWhenBootstrapServersMissing.
/**
* Tests that the constructor eagerly checks bootstrap servers are set in config.
*/
@Test(expected = IllegalArgumentException.class)
public void testInstantiationFailsWhenBootstrapServersMissing() throws Exception {
// no bootstrap servers set in props
Properties props = new Properties();
// should throw IllegalArgumentException
new DummyFlinkKafkaProducer<>(props, new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), null);
}
use of org.apache.flink.api.common.serialization.SimpleStringSchema in project flink by apache.
the class KinesisFirehoseSinkITCase method firehoseSinkWritesCorrectDataToMockAWSServices.
@Test
public void firehoseSinkWritesCorrectDataToMockAWSServices() throws Exception {
LOG.info("1 - Creating the bucket for Firehose to deliver into...");
createBucket(s3AsyncClient, BUCKET_NAME);
LOG.info("2 - Creating the IAM Role for Firehose to write into the s3 bucket...");
createIAMRole(iamAsyncClient, ROLE_NAME);
LOG.info("3 - Creating the Firehose delivery stream...");
createDeliveryStream(STREAM_NAME, BUCKET_NAME, ROLE_ARN, firehoseAsyncClient);
KinesisFirehoseSink<String> kdsSink = KinesisFirehoseSink.<String>builder().setSerializationSchema(new SimpleStringSchema()).setDeliveryStreamName(STREAM_NAME).setMaxBatchSize(1).setFirehoseClientProperties(createConfig(mockFirehoseContainer.getEndpoint())).build();
KinesisFirehoseTestUtils.getSampleDataGenerator(env, NUMBER_OF_ELEMENTS).sinkTo(kdsSink);
env.execute("Integration Test");
List<S3Object> objects = listBucketObjects(createS3Client(mockFirehoseContainer.getEndpoint(), httpClient), BUCKET_NAME);
assertThat(objects.size()).isEqualTo(NUMBER_OF_ELEMENTS);
assertThat(readObjectsFromS3Bucket(s3AsyncClient, objects, BUCKET_NAME, response -> new String(response.asByteArrayUnsafe()))).containsAll(KinesisFirehoseTestUtils.getSampleData(NUMBER_OF_ELEMENTS));
}
Aggregations