use of com.amazonaws.services.kinesis.model.ProvisionedThroughputExceededException in project hazelcast by hazelcast.
the class KinesisSinkP method checkIfSendingFinished.
private void checkIfSendingFinished() {
if (sendResult.isDone()) {
PutRecordsResult result;
try {
result = KinesisUtil.readResult(this.sendResult);
} catch (ProvisionedThroughputExceededException pte) {
dealWithThroughputExceeded("Data throughput rate exceeded. Backing off and retrying in %d ms");
return;
} catch (SdkClientException sce) {
dealWithSendFailure(sce);
return;
} catch (Throwable t) {
throw rethrow(t);
} finally {
sendResult = null;
}
pruneSentFromBuffer(result);
if (result.getFailedRecordCount() > 0) {
dealWithThroughputExceeded("Failed to send " + result.getFailedRecordCount() + " (out of " + result.getRecords().size() + ") record(s) to stream '" + stream + "'. Sending will be retried in %d ms, message reordering is likely.");
} else {
long sleepTimeNanos = throughputController.markSuccess();
this.nextSendTime += sleepTimeNanos;
this.sleepMetric.set(NANOSECONDS.toMillis(sleepTimeNanos));
sendRetryTracker.reset();
}
}
}
use of com.amazonaws.services.kinesis.model.ProvisionedThroughputExceededException in project flink by apache.
the class KinesisProxyTest method testIsRecoverableExceptionWithProvisionedThroughputExceeded.
@Test
public void testIsRecoverableExceptionWithProvisionedThroughputExceeded() {
final ProvisionedThroughputExceededException ex = new ProvisionedThroughputExceededException("asdf");
ex.setErrorType(ErrorType.Client);
assertTrue(KinesisProxy.isRecoverableException(ex));
}
use of com.amazonaws.services.kinesis.model.ProvisionedThroughputExceededException in project druid by druid-io.
the class KinesisRecordSupplier method getSequenceNumber.
/**
* Given a partition and a {@link ShardIteratorType}, create a shard iterator and fetch
* {@link #GET_SEQUENCE_NUMBER_RECORD_COUNT} records and return the first sequence number from the result set.
* This method is thread safe as it does not depend on the internal state of the supplier (it doesn't use the
* {@link PartitionResource} which have been assigned to the supplier), and the Kinesis client is thread safe.
*/
@Nullable
private String getSequenceNumber(StreamPartition<String> partition, ShardIteratorType iteratorEnum) {
return wrapExceptions(() -> {
String shardIterator = kinesis.getShardIterator(partition.getStream(), partition.getPartitionId(), iteratorEnum.toString()).getShardIterator();
long timeoutMillis = System.currentTimeMillis() + fetchSequenceNumberTimeout;
GetRecordsResult recordsResult = null;
while (shardIterator != null && System.currentTimeMillis() < timeoutMillis) {
if (closed) {
log.info("KinesisRecordSupplier closed while fetching sequenceNumber");
return null;
}
final String currentShardIterator = shardIterator;
final GetRecordsRequest request = new GetRecordsRequest().withShardIterator(currentShardIterator).withLimit(GET_SEQUENCE_NUMBER_RECORD_COUNT);
recordsResult = RetryUtils.retry(() -> kinesis.getRecords(request), (throwable) -> {
if (throwable instanceof ProvisionedThroughputExceededException) {
log.warn(throwable, "encountered ProvisionedThroughputExceededException while fetching records, this means " + "that the request rate for the stream is too high, or the requested data is too large for " + "the available throughput. Reduce the frequency or size of your requests. Consider increasing " + "the number of shards to increase throughput.");
return true;
}
if (throwable instanceof AmazonClientException) {
AmazonClientException ase = (AmazonClientException) throwable;
return AWSClientUtil.isClientExceptionRecoverable(ase);
}
return false;
}, GET_SEQUENCE_NUMBER_RETRY_COUNT);
List<Record> records = recordsResult.getRecords();
if (!records.isEmpty()) {
return records.get(0).getSequenceNumber();
}
shardIterator = recordsResult.getNextShardIterator();
}
if (shardIterator == null) {
log.info("Partition[%s] returned a null shard iterator, is the shard closed?", partition.getPartitionId());
return KinesisSequenceNumber.END_OF_SHARD_MARKER;
}
// if we reach here, it usually means either the shard has no more records, or records have not been
// added to this shard
log.warn("timed out while trying to fetch position for shard[%s], millisBehindLatest is [%s], likely no more records in shard", partition.getPartitionId(), recordsResult != null ? recordsResult.getMillisBehindLatest() : "UNKNOWN");
return null;
});
}
Aggregations