use of org.alfresco.repo.rendition2.RenditionDefinition2.TIMEOUT in project hono by eclipse.
the class HonoKafkaConsumerIT method testConsumerReadsLatestRecordsPublishedAfterTopicSubscriptionConfirmed.
/**
* Verifies that a HonoKafkaConsumer configured with "latest" as offset reset strategy and a topic pattern
* subscription only receives records published after the consumer <em>start()</em> method has completed.
* <p>
* Also verifies that all records published after the consumer <em>ensureTopicIsAmongSubscribedTopicPatternTopics()</em>
* method has completed are received by the consumer, also if the topic was only created after the consumer
* <em>start</em> method has completed.
*
* @param partitionAssignmentStrategy The partition assignment strategy to use for the consumer.
* @param ctx The vert.x test context.
* @throws InterruptedException if test execution gets interrupted.
*/
@ParameterizedTest
@MethodSource("partitionAssignmentStrategies")
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerReadsLatestRecordsPublishedAfterTopicSubscriptionConfirmed(final String partitionAssignmentStrategy, final VertxTestContext ctx) throws InterruptedException {
final String patternPrefix = "test_" + UUID.randomUUID() + "_";
final int numTopics = 2;
final Pattern topicPattern = Pattern.compile(Pattern.quote(patternPrefix) + ".*");
final int numPartitions = 5;
final int numTestRecordsPerTopic = 20;
final Set<String> topics = IntStream.range(0, numTopics).mapToObj(i -> patternPrefix + i).collect(Collectors.toSet());
final VertxTestContext setup = new VertxTestContext();
createTopics(topics, numPartitions).compose(v -> publishRecords(numTestRecordsPerTopic, "key_", topics)).onComplete(setup.succeedingThenComplete());
assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
if (setup.failed()) {
ctx.failNow(setup.causeOfFailure());
return;
}
LOG.debug("topics created and (to be ignored) test records published");
// prepare consumer
final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
applyPartitionAssignmentStrategy(consumerConfig, partitionAssignmentStrategy);
consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
final AtomicReference<Promise<Void>> nextRecordReceivedPromiseRef = new AtomicReference<>();
final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
receivedRecords.add(record);
Optional.ofNullable(nextRecordReceivedPromiseRef.get()).ifPresent(Promise::complete);
};
kafkaConsumer = new HonoKafkaConsumer(vertx, topicPattern, recordHandler, consumerConfig);
// start consumer
kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
ctx.verify(() -> {
assertThat(receivedRecords.size()).isEqualTo(0);
});
final Promise<Void> nextRecordReceivedPromise = Promise.promise();
nextRecordReceivedPromiseRef.set(nextRecordReceivedPromise);
LOG.debug("consumer started, create new topic implicitly by invoking ensureTopicIsAmongSubscribedTopicPatternTopics()");
final String newTopic = patternPrefix + "new";
final String recordKey = "addedAfterStartKey";
kafkaConsumer.ensureTopicIsAmongSubscribedTopicPatternTopics(newTopic).onComplete(ctx.succeeding(v2 -> {
LOG.debug("publish record to be received by the consumer");
publish(newTopic, recordKey, Buffer.buffer("testPayload"));
}));
nextRecordReceivedPromise.future().onComplete(ar -> {
ctx.verify(() -> {
assertThat(receivedRecords.size()).isEqualTo(1);
assertThat(receivedRecords.get(0).key()).isEqualTo(recordKey);
});
ctx.completeNow();
});
}));
}
use of org.alfresco.repo.rendition2.RenditionDefinition2.TIMEOUT in project hono by eclipse.
the class HonoKafkaConsumerIT method testConsumerReadsLatestRecordsPublishedAfterStart.
/**
* Verifies that a HonoKafkaConsumer configured with "latest" as offset reset strategy only receives
* records published after the consumer <em>start()</em> method has completed.
*
* @param partitionAssignmentStrategy The partition assignment strategy to use for the consumer.
* @param ctx The vert.x test context.
* @throws InterruptedException if test execution gets interrupted.
*/
@ParameterizedTest
@MethodSource("partitionAssignmentStrategies")
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerReadsLatestRecordsPublishedAfterStart(final String partitionAssignmentStrategy, final VertxTestContext ctx) throws InterruptedException {
final int numTopics = 2;
final int numPartitions = 5;
final int numTestRecordsPerTopic = 20;
final Set<String> topics = IntStream.range(0, numTopics).mapToObj(i -> "test_" + i + "_" + UUID.randomUUID()).collect(Collectors.toSet());
final String publishTestTopic = topics.iterator().next();
final VertxTestContext setup = new VertxTestContext();
createTopics(topics, numPartitions).compose(v -> publishRecords(numTestRecordsPerTopic, "key_", topics)).onComplete(setup.succeedingThenComplete());
assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
if (setup.failed()) {
ctx.failNow(setup.causeOfFailure());
return;
}
LOG.debug("topics created and (to be ignored) test records published");
// prepare consumer
final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
applyPartitionAssignmentStrategy(consumerConfig, partitionAssignmentStrategy);
consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
final String publishedAfterStartRecordKey = "publishedAfterStartKey";
final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
// verify received record
ctx.verify(() -> assertThat(record.key()).isEqualTo(publishedAfterStartRecordKey));
ctx.completeNow();
};
kafkaConsumer = new HonoKafkaConsumer(vertx, topics, recordHandler, consumerConfig);
// start consumer
kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
LOG.debug("consumer started, publish record to be received by the consumer");
publish(publishTestTopic, publishedAfterStartRecordKey, Buffer.buffer("testPayload"));
}));
if (!ctx.awaitCompletion(9, TimeUnit.SECONDS)) {
ctx.failNow(new IllegalStateException("timeout waiting for record to be received"));
}
}
use of org.alfresco.repo.rendition2.RenditionDefinition2.TIMEOUT in project hono by eclipse.
the class ApplicationClientIT method testConnectFailsWithClientErrorIfTlsHandshakeFails.
/**
* Verifies that a connection attempt where the TLS handshake cannot be finished successfully fails after two
* retries with a ClientErrorException with status code 400.
*
* @param ctx The vert.x test context.
*/
@Test
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConnectFailsWithClientErrorIfTlsHandshakeFails(final VertxTestContext ctx) {
// GIVEN a client that is configured to try to connect using an unsupported TLS version
final ClientConfigProperties downstreamProps = new ClientConfigProperties();
downstreamProps.setHost(IntegrationTestSupport.DOWNSTREAM_HOST);
downstreamProps.setPort(IntegrationTestSupport.DOWNSTREAM_PORT);
downstreamProps.setTlsEnabled(true);
downstreamProps.setSecureProtocols(List.of("TLSv1.1"));
downstreamProps.setReconnectAttempts(2);
client = new ProtonBasedApplicationClient(HonoConnection.newConnection(vertx, downstreamProps));
// WHEN the client tries to connect
client.connect().onComplete(ctx.failing(t -> {
// THEN the connection attempt fails due to lack of authorization
ctx.verify(() -> {
assertThat(t).isInstanceOf(ClientErrorException.class);
assertThat(((ClientErrorException) t).getErrorCode()).isEqualTo(HttpURLConnection.HTTP_BAD_REQUEST);
});
ctx.completeNow();
}));
}
use of org.alfresco.repo.rendition2.RenditionDefinition2.TIMEOUT in project hono by eclipse.
the class KafkaBasedCommandConsumerFactoryImplIT method testCommandsGetForwardedIfOneConsumerInstanceGetsClosed.
/**
* Verifies that records, published on the tenant-specific Kafka command topic, get received
* and forwarded by consumers created by factory instances even if one factory and its contained
* consumer gets closed in the middle of processing some of the commands.
*
* @param ctx The vert.x test context.
* @throws InterruptedException if test execution gets interrupted.
*/
@Test
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testCommandsGetForwardedIfOneConsumerInstanceGetsClosed(final VertxTestContext ctx) throws InterruptedException {
final String tenantId = "tenant_" + UUID.randomUUID();
final VertxTestContext setup = new VertxTestContext();
// Scenario to test:
// - first command gets sent, forwarded and received without any imposed delay
// - second command gets sent, received by the factory consumer instance; processing gets blocked
// while trying to get the target adapter instance
// - for the rest of the commands, retrieval of the target adapter instance is successful, but they won't
// get forwarded until processing of the second command is finished
// - now the factory consumer gets closed and a new factory/consumer gets started; at that point
// also the processing of the second command gets finished
//
// Expected outcome:
// - processing of the second command and all following commands by the first consumer gets aborted, so that
// these commands don't get forwarded on the internal command topic
// - instead, the second consumer takes over at the offset of the first command (position must have been committed
// when closing the first consumer) and processes and forwards all commands starting with the second command
final int numTestCommands = 10;
final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
final Promise<Void> firstRecordReceivedPromise = Promise.promise();
final Promise<Void> allRecordsReceivedPromise = Promise.promise();
final List<String> receivedCommandSubjects = new ArrayList<>();
final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
receivedRecords.add(record);
LOG.trace("received {}", record);
receivedCommandSubjects.add(KafkaRecordHelper.getSubject(record.headers()).orElse(""));
if (receivedRecords.size() == 1) {
firstRecordReceivedPromise.complete();
}
if (receivedRecords.size() == numTestCommands) {
allRecordsReceivedPromise.tryComplete();
}
};
final Promise<Void> firstConsumerAllGetAdapterInstanceInvocationsDone = Promise.promise();
final LinkedList<Promise<Void>> firstConsumerGetAdapterInstancePromisesQueue = new LinkedList<>();
// don't let getting the target adapter instance finish immediately
final Supplier<Future<Void>> firstConsumerGetAdapterInstanceSupplier = () -> {
final Promise<Void> resultPromise = Promise.promise();
firstConsumerGetAdapterInstancePromisesQueue.addFirst(resultPromise);
// don't complete the future for the second command here yet
if (firstConsumerGetAdapterInstancePromisesQueue.size() != 2) {
resultPromise.complete();
}
if (firstConsumerGetAdapterInstancePromisesQueue.size() == numTestCommands) {
firstConsumerAllGetAdapterInstanceInvocationsDone.complete();
}
return resultPromise.future();
};
final AtomicReference<KafkaBasedCommandConsumerFactoryImpl> consumerFactory1Ref = new AtomicReference<>();
final Context vertxContext = vertx.getOrCreateContext();
vertxContext.runOnContext(v0 -> {
final HonoKafkaConsumer internalConsumer = getInternalCommandConsumer(recordHandler);
final KafkaBasedCommandConsumerFactoryImpl consumerFactory1 = getKafkaBasedCommandConsumerFactory(firstConsumerGetAdapterInstanceSupplier, tenantId);
consumerFactory1Ref.set(consumerFactory1);
CompositeFuture.join(internalConsumer.start(), consumerFactory1.start()).compose(f -> createCommandConsumer(tenantId, consumerFactory1)).onComplete(setup.succeedingThenComplete());
});
assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
if (setup.failed()) {
ctx.failNow(setup.causeOfFailure());
return;
}
LOG.debug("command consumer started");
final List<String> sentCommandSubjects = new ArrayList<>();
IntStream.range(0, numTestCommands).forEach(i -> {
final String subject = "cmd_" + i;
sentCommandSubjects.add(subject);
sendOneWayCommand(tenantId, "myDeviceId", subject);
});
final AtomicInteger secondConsumerGetAdapterInstanceInvocations = new AtomicInteger();
// wait for first record on internal topic to have been received ...
CompositeFuture.join(firstConsumerAllGetAdapterInstanceInvocationsDone.future(), firstRecordReceivedPromise.future()).compose(v -> {
// ... and wait some more, making sure that the offset of the first record has been committed
final Promise<Void> delayPromise = Promise.promise();
vertx.setTimer(500, tid -> delayPromise.complete());
return delayPromise.future();
}).onComplete(v -> {
LOG.info("stopping first consumer factory");
consumerFactory1Ref.get().stop().onComplete(ctx.succeeding(ar -> {
LOG.info("factory stopped");
// no delay on getting the target adapter instance added here
final KafkaBasedCommandConsumerFactoryImpl consumerFactory2 = getKafkaBasedCommandConsumerFactory(() -> {
secondConsumerGetAdapterInstanceInvocations.incrementAndGet();
return Future.succeededFuture();
}, tenantId);
consumerFactory2.start().onComplete(ctx.succeeding(ar2 -> {
LOG.info("creating command consumer in new consumer factory");
createCommandConsumer(tenantId, consumerFactory2).onComplete(ctx.succeeding(ar3 -> {
LOG.debug("consumer created");
firstConsumerGetAdapterInstancePromisesQueue.forEach(Promise::tryComplete);
}));
}));
}));
});
final long timerId = vertx.setTimer(8000, tid -> {
LOG.info("received records:\n{}", receivedRecords.stream().map(Object::toString).collect(Collectors.joining(",\n")));
allRecordsReceivedPromise.tryFail(String.format("only received %d out of %d expected messages after 8s", receivedRecords.size(), numTestCommands));
});
allRecordsReceivedPromise.future().onComplete(ctx.succeeding(v -> {
vertx.cancelTimer(timerId);
ctx.verify(() -> {
assertThat(receivedCommandSubjects).isEqualTo(sentCommandSubjects);
// all but the first command should have been processed by the second consumer
assertThat(secondConsumerGetAdapterInstanceInvocations.get()).isEqualTo(numTestCommands - 1);
});
ctx.completeNow();
}));
}
use of org.alfresco.repo.rendition2.RenditionDefinition2.TIMEOUT in project alfresco-repository by Alfresco.
the class TransformationOptionsConverter method getTransformationOptions.
/**
* @deprecated as we do not plan to use TransformationOptions moving forwards as local transformations will also
* use the same options as the Transform Service.
*/
@Deprecated
TransformationOptions getTransformationOptions(String renditionName, Map<String, String> options) {
TransformationOptions transformationOptions = null;
Set<String> optionNames = options.keySet();
// The "pdf" rendition is special as it was incorrectly set up as an SWFTransformationOptions in 6.0
// It should have been simply a TransformationOptions.
boolean isPdfRendition = "pdf".equals(renditionName);
Set<String> subclassOptionNames = new HashSet<>(optionNames);
subclassOptionNames.removeAll(LIMIT_OPTIONS);
subclassOptionNames.remove(INCLUDE_CONTENTS);
boolean hasOptions = !subclassOptionNames.isEmpty();
if (isPdfRendition || hasOptions) {
// The "pdf" rendition used the wrong TransformationOptions subclass.
if (isPdfRendition || FLASH_OPTIONS.containsAll(subclassOptionNames)) {
SWFTransformationOptions opts = new SWFTransformationOptions();
transformationOptions = opts;
opts.setFlashVersion(isPdfRendition ? "9" : options.get(FLASH_VERSION));
} else // that use ImageTransformOptions to specify width, height etc.
if (IMAGE_OPTIONS.containsAll(subclassOptionNames) || PDF_OPTIONS.containsAll(subclassOptionNames)) {
ImageTransformationOptions opts = new ImageTransformationOptions();
transformationOptions = opts;
if (containsAny(subclassOptionNames, RESIZE_OPTIONS)) {
ImageResizeOptions imageResizeOptions = new ImageResizeOptions();
opts.setResizeOptions(imageResizeOptions);
// PDF
ifSet(options, WIDTH, (v) -> imageResizeOptions.setWidth(Integer.parseInt(v)));
ifSet(options, HEIGHT, (v) -> imageResizeOptions.setHeight(Integer.parseInt(v)));
// ImageMagick
ifSet(options, RESIZE_WIDTH, (v) -> imageResizeOptions.setWidth(Integer.parseInt(v)));
ifSet(options, RESIZE_HEIGHT, (v) -> imageResizeOptions.setHeight(Integer.parseInt(v)));
ifSet(options, THUMBNAIL, (v) -> imageResizeOptions.setResizeToThumbnail(Boolean.parseBoolean(v)));
ifSet(options, RESIZE_PERCENTAGE, (v) -> imageResizeOptions.setPercentResize(Boolean.parseBoolean(v)));
set(options, ALLOW_ENLARGEMENT, (v) -> imageResizeOptions.setAllowEnlargement(Boolean.parseBoolean(v == null ? "true" : v)));
set(options, MAINTAIN_ASPECT_RATIO, (v) -> imageResizeOptions.setMaintainAspectRatio(Boolean.parseBoolean(v == null ? "true" : v)));
}
// ALPHA_REMOVE can be ignored as it is automatically added in the legacy code if the sourceMimetype is jpeg
set(options, AUTO_ORIENT, (v) -> opts.setAutoOrient(Boolean.parseBoolean(v == null ? "true" : v)));
boolean containsPaged = containsAny(subclassOptionNames, PAGED_OPTIONS);
boolean containsCrop = containsAny(subclassOptionNames, CROP_OPTIONS);
boolean containsTemporal = containsAny(subclassOptionNames, TEMPORAL_OPTIONS);
if (containsPaged || containsCrop || containsTemporal) {
List<TransformationSourceOptions> sourceOptionsList = new ArrayList<>();
if (containsPaged) {
// The legacy transformer options start at page 1, where as image magick and the local
// transforms start at 0;
PagedSourceOptions pagedSourceOptions = new PagedSourceOptions();
sourceOptionsList.add(pagedSourceOptions);
ifSet(options, START_PAGE, (v) -> pagedSourceOptions.setStartPageNumber(Integer.parseInt(v) + 1));
ifSet(options, END_PAGE, (v) -> pagedSourceOptions.setEndPageNumber(Integer.parseInt(v) + 1));
ifSet(options, PAGE, (v) -> {
int i = Integer.parseInt(v) + 1;
pagedSourceOptions.setStartPageNumber(i);
pagedSourceOptions.setEndPageNumber(i);
});
}
if (containsCrop) {
CropSourceOptions cropSourceOptions = new CropSourceOptions();
sourceOptionsList.add(cropSourceOptions);
ifSet(options, CROP_GRAVITY, (v) -> cropSourceOptions.setGravity(v));
ifSet(options, CROP_PERCENTAGE, (v) -> cropSourceOptions.setPercentageCrop(Boolean.parseBoolean(v)));
ifSet(options, CROP_WIDTH, (v) -> cropSourceOptions.setWidth(Integer.parseInt(v)));
ifSet(options, CROP_HEIGHT, (v) -> cropSourceOptions.setHeight(Integer.parseInt(v)));
ifSet(options, CROP_X_OFFSET, (v) -> cropSourceOptions.setXOffset(Integer.parseInt(v)));
ifSet(options, CROP_Y_OFFSET, (v) -> cropSourceOptions.setYOffset(Integer.parseInt(v)));
}
if (containsTemporal) {
TemporalSourceOptions temporalSourceOptions = new TemporalSourceOptions();
sourceOptionsList.add(temporalSourceOptions);
ifSet(options, DURATION, (v) -> temporalSourceOptions.setDuration(v));
ifSet(options, OFFSET, (v) -> temporalSourceOptions.setOffset(v));
}
opts.setSourceOptionsList(sourceOptionsList);
}
}
} else {
// This what the "pdf" rendition should have used in 6.0 and it is not unreasonable for a custom transformer
// and rendition to do the same.
transformationOptions = new TransformationOptions();
}
if (transformationOptions == null) {
StringJoiner sj = new StringJoiner("\n ");
sj.add("The RenditionDefinition2 " + renditionName + " contains options that cannot be mapped to TransformationOptions used by local transformers. " + " The TransformOptionConverter may need to be sub classed to support this conversion.");
HashSet<String> otherNames = new HashSet<>(optionNames);
otherNames.removeAll(FLASH_OPTIONS);
otherNames.removeAll(IMAGE_OPTIONS);
otherNames.removeAll(PDF_OPTIONS);
otherNames.removeAll(LIMIT_OPTIONS);
otherNames.forEach(sj::add);
sj.add("---");
optionNames.forEach(sj::add);
throw new IllegalArgumentException(sj.toString());
}
final TransformationOptions opts = transformationOptions;
ifSet(options, INCLUDE_CONTENTS, (v) -> opts.setIncludeEmbedded(Boolean.parseBoolean(v)));
if (containsAny(optionNames, LIMIT_OPTIONS)) {
TransformationOptionLimits limits = new TransformationOptionLimits();
transformationOptions.setLimits(limits);
ifSet(options, TIMEOUT, (v) -> limits.setTimeoutMs(Long.parseLong(v)));
limits.setMaxSourceSizeKBytes(maxSourceSizeKBytes);
limits.setReadLimitKBytes(readLimitTimeMs);
limits.setReadLimitTimeMs(readLimitKBytes);
limits.setMaxPages(maxPages);
limits.setPageLimit(pageLimit);
}
transformationOptions.setUse(renditionName);
return transformationOptions;
}
Aggregations