use of com.amazonaws.services.kinesis.producer.UserRecordResult in project flink by apache.
the class FlinkKinesisProducer method open.
// --------------------------- Lifecycle methods ---------------------------
@Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
schema.open(RuntimeContextInitializationContextAdapters.serializationAdapter(getRuntimeContext(), metricGroup -> metricGroup.addGroup("user")));
// check and pass the configuration properties
KinesisProducerConfiguration producerConfig = KinesisConfigUtil.getValidatedProducerConfiguration(configProps);
producer = getKinesisProducer(producerConfig);
final MetricGroup kinesisMectricGroup = getRuntimeContext().getMetricGroup().addGroup(KINESIS_PRODUCER_METRIC_GROUP);
this.backpressureCycles = kinesisMectricGroup.counter(METRIC_BACKPRESSURE_CYCLES);
kinesisMectricGroup.gauge(METRIC_OUTSTANDING_RECORDS_COUNT, producer::getOutstandingRecordsCount);
backpressureLatch = new TimeoutLatch();
callback = new FutureCallback<UserRecordResult>() {
@Override
public void onSuccess(UserRecordResult result) {
backpressureLatch.trigger();
if (!result.isSuccessful()) {
if (failOnError) {
// only remember the first thrown exception
if (thrownException == null) {
thrownException = new RuntimeException("Record was not sent successful");
}
} else {
LOG.warn("Record was not sent successful");
}
}
}
@Override
public void onFailure(Throwable t) {
backpressureLatch.trigger();
if (failOnError) {
thrownException = t;
} else {
LOG.warn("An exception occurred while processing a record", t);
}
}
};
if (this.customPartitioner != null) {
this.customPartitioner.initialize(getRuntimeContext().getIndexOfThisSubtask(), getRuntimeContext().getNumberOfParallelSubtasks());
}
final RuntimeContext ctx = getRuntimeContext();
ctx.registerUserCodeClassLoaderReleaseHookIfAbsent(KINESIS_PRODUCER_RELEASE_HOOK_NAME, () -> this.runClassLoaderReleaseHook(ctx.getUserCodeClassLoader()));
LOG.info("Started Kinesis producer instance for region '{}'", producerConfig.getRegion());
}
use of com.amazonaws.services.kinesis.producer.UserRecordResult in project flink by apache.
the class FlinkKinesisProducerTest method testBackpressure.
/**
* Test ensuring that the producer blocks if the queue limit is exceeded, until the queue length
* drops below the limit; we set a timeout because the test will not finish if the logic is
* broken.
*/
@Test(timeout = 10000)
public void testBackpressure() throws Throwable {
final Deadline deadline = Deadline.fromNow(Duration.ofSeconds(10));
final DummyFlinkKinesisProducer<String> producer = new DummyFlinkKinesisProducer<>(new SimpleStringSchema());
producer.setQueueLimit(1);
OneInputStreamOperatorTestHarness<String, Object> testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink<>(producer));
testHarness.open();
UserRecordResult result = mock(UserRecordResult.class);
when(result.isSuccessful()).thenReturn(true);
CheckedThread msg1 = new CheckedThread() {
@Override
public void go() throws Exception {
testHarness.processElement(new StreamRecord<>("msg-1"));
}
};
msg1.start();
msg1.trySync(deadline.timeLeftIfAny().toMillis());
assertFalse("Flush triggered before reaching queue limit", msg1.isAlive());
// consume msg-1 so that queue is empty again
producer.getPendingRecordFutures().get(0).set(result);
CheckedThread msg2 = new CheckedThread() {
@Override
public void go() throws Exception {
testHarness.processElement(new StreamRecord<>("msg-2"));
}
};
msg2.start();
msg2.trySync(deadline.timeLeftIfAny().toMillis());
assertFalse("Flush triggered before reaching queue limit", msg2.isAlive());
CheckedThread moreElementsThread = new CheckedThread() {
@Override
public void go() throws Exception {
// this should block until msg-2 is consumed
testHarness.processElement(new StreamRecord<>("msg-3"));
// this should block until msg-3 is consumed
testHarness.processElement(new StreamRecord<>("msg-4"));
}
};
moreElementsThread.start();
assertTrue("Producer should still block, but doesn't", moreElementsThread.isAlive());
// consume msg-2 from the queue, leaving msg-3 in the queue and msg-4 blocked
while (producer.getPendingRecordFutures().size() < 2) {
Thread.sleep(50);
}
producer.getPendingRecordFutures().get(1).set(result);
assertTrue("Producer should still block, but doesn't", moreElementsThread.isAlive());
// consume msg-3, blocked msg-4 can be inserted into the queue and block is released
while (producer.getPendingRecordFutures().size() < 3) {
Thread.sleep(50);
}
producer.getPendingRecordFutures().get(2).set(result);
moreElementsThread.trySync(deadline.timeLeftIfAny().toMillis());
assertFalse("Prodcuer still blocks although the queue is flushed", moreElementsThread.isAlive());
producer.getPendingRecordFutures().get(3).set(result);
testHarness.close();
}
Aggregations