use of org.alfresco.repo.rendition2.RenditionDefinition2.TIMEOUT in project hono by eclipse.
the class CredentialsApiTests method testGetCredentialsWithAutoProvisioning.
/**
* Verifies that if no credentials are found and the client context in the Get request contains a serialized X.509
* certificate, the credentials and device are created (i.e., automatic provisioning is performed).
* <p>
* It also verifies that an auto-provisioning event is successfully sent and the device registration
* is updated accordingly.
*
* @param ctx The vert.x test context.
* @throws CertificateException if the self signed certificate cannot be created.
* @throws FileNotFoundException if the self signed certificate cannot be read.
*/
@Timeout(value = 5, timeUnit = TimeUnit.SECONDS)
@Test
public void testGetCredentialsWithAutoProvisioning(final VertxTestContext ctx) throws CertificateException, FileNotFoundException {
// GIVEN a tenant with auto-provisioning enabled and a client context that contains a client certificate
// while device has not been registered and no credentials are stored yet
final X509Certificate cert = createCertificate();
final var tenant = Tenants.createTenantForTrustAnchor(cert);
tenant.getTrustedCertificateAuthorities().get(0).setAutoProvisioningEnabled(true);
testAutoProvisioningSucceeds(ctx, tenant, cert, false, null);
}
use of org.alfresco.repo.rendition2.RenditionDefinition2.TIMEOUT in project hono by eclipse.
the class CredentialsApiTests method testGetCredentialsWithGatewayAutoProvisioningUsingDeviceIdTemplate.
/**
* Verifies when no credentials are found and if the properties related to auto-provisioning of gateways are
* enabled, device id template is configured in the corresponding tenant's CA entry and the client context
* contains a serialized X.509 certificate then a gateway is auto-provisioned.
* (i.e A gateway is registered and it's corresponding credentials are stored).
* <p>
* Also verify that the gateway is auto-provisioned with a device id generated in accordance
* with the configured device id template.
* <p>
* It also verifies that an auto-provisioning event is sent successfully sent and the device registration's
* property {@value RegistryManagementConstants#FIELD_AUTO_PROVISIONING_NOTIFICATION_SENT} is updated to
* {@code true}.
*
* @param ctx The vert.x test context.
* @throws CertificateException if the self signed certificate cannot be created.
* @throws FileNotFoundException if the self signed certificate cannot be read.
*/
@Timeout(value = 5, timeUnit = TimeUnit.SECONDS)
@Test
public void testGetCredentialsWithGatewayAutoProvisioningUsingDeviceIdTemplate(final VertxTestContext ctx) throws CertificateException, FileNotFoundException {
// GIVEN a tenant's trusted CA entry with auto-provisioning and auto-provision as gateway enabled
// while gateway has not been registered and no credentials are stored yet
final X509Certificate cert = createCertificate();
final var tenant = Tenants.createTenantForTrustAnchor(cert);
tenant.getTrustedCertificateAuthorities().get(0).setAutoProvisioningEnabled(true).setAutoProvisioningAsGatewayEnabled(true).setAutoProvisioningDeviceIdTemplate("test-device-{{subject-cn}}");
final String expectedDeviceId = "test-device-" + AuthenticationConstants.getCommonName(cert.getSubjectX500Principal().getName(X500Principal.RFC2253));
testAutoProvisioningSucceeds(ctx, tenant, cert, true, expectedDeviceId);
}
use of org.alfresco.repo.rendition2.RenditionDefinition2.TIMEOUT in project hono by eclipse.
the class CredentialsApiTests method testGetCredentialsFailsForDisabledCredentials.
/**
* Verifies that the service fails when the credentials set is disabled.
*
* @param ctx The vert.x test context.
*/
@Timeout(value = 5, timeUnit = TimeUnit.SECONDS)
@Test
public void testGetCredentialsFailsForDisabledCredentials(final VertxTestContext ctx) {
final String deviceId = getHelper().getRandomDeviceId(tenantId);
final String authId = UUID.randomUUID().toString();
final CommonCredential credential = getRandomHashedPasswordCredential(authId);
credential.setEnabled(false);
getHelper().registry.registerDevice(tenantId, deviceId).compose(ok -> getHelper().registry.addCredentials(tenantId, deviceId, Collections.singleton(credential))).compose(ok -> getClient().get(tenantId, CredentialsConstants.SECRETS_TYPE_HASHED_PASSWORD, authId, spanContext)).onComplete(ctx.failing(t -> {
ctx.verify(() -> assertErrorCode(t, HttpURLConnection.HTTP_NOT_FOUND));
ctx.completeNow();
}));
}
use of org.alfresco.repo.rendition2.RenditionDefinition2.TIMEOUT in project hono by eclipse.
the class CredentialsApiTests method testGetCredentialsByClientContext.
/**
* Verifies that the service returns credentials for a given type, authentication ID and matching client context.
*
* @param ctx The vert.x test context.
*/
@Timeout(value = 5, timeUnit = TimeUnit.SECONDS)
@Test
public void testGetCredentialsByClientContext(final VertxTestContext ctx) {
final String deviceId = getHelper().getRandomDeviceId(tenantId);
final String authId = UUID.randomUUID().toString();
final CommonCredential credentials = getRandomHashedPasswordCredential(authId).putExtension("client-id", "gateway-one");
final JsonObject clientContext = new JsonObject().put("client-id", "gateway-one");
getHelper().registry.registerDevice(tenantId, deviceId).compose(httpResponse -> getHelper().registry.addCredentials(tenantId, deviceId, List.of(credentials))).compose(ok -> getClient().get(tenantId, CredentialsConstants.SECRETS_TYPE_HASHED_PASSWORD, authId, clientContext, spanContext)).onComplete(ctx.succeeding(result -> {
ctx.verify(() -> {
assertStandardProperties(result, deviceId, authId, CredentialsConstants.SECRETS_TYPE_HASHED_PASSWORD, 2);
});
ctx.completeNow();
}));
}
use of org.alfresco.repo.rendition2.RenditionDefinition2.TIMEOUT in project hono by eclipse.
the class HonoKafkaConsumerIT method testConsumerReadsLatestRecordsPublishedAfterOutOfRangeOffsetReset.
/**
* Verifies that a HonoKafkaConsumer configured with "latest" as offset reset strategy will receive
* all still available records after the committed offset position has gone out of range
* (because records have been deleted according to the retention config) and the consumer is restarted.
*
* @param ctx The vert.x test context.
* @throws InterruptedException if test execution gets interrupted.
*/
@Test
@Timeout(value = 10, timeUnit = TimeUnit.SECONDS)
public void testConsumerReadsLatestRecordsPublishedAfterOutOfRangeOffsetReset(final VertxTestContext ctx) throws InterruptedException {
final int numTopics = 1;
final int numTestRecordsPerTopicPerRound = 20;
// has to be 1 here because we expect partition 0 to contain *all* the records published for a topic
final int numPartitions = 1;
// prepare topics
final Set<String> topics = IntStream.range(0, numTopics).mapToObj(i -> "test_" + i + "_" + UUID.randomUUID()).collect(Collectors.toSet());
final String publishTestTopic = topics.iterator().next();
final VertxTestContext setup = new VertxTestContext();
final Map<String, String> topicsConfig = Map.of(TopicConfig.RETENTION_MS_CONFIG, "300", TopicConfig.SEGMENT_BYTES_CONFIG, SMALL_TOPIC_SEGMENT_SIZE_BYTES);
createTopics(topics, numPartitions, topicsConfig).onComplete(setup.succeedingThenComplete());
assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
if (setup.failed()) {
ctx.failNow(setup.causeOfFailure());
return;
}
// prepare consumer
final Map<String, String> consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig().getConsumerConfig("test");
consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
final VertxTestContext firstConsumerInstanceStartedAndStopped = new VertxTestContext();
final List<KafkaConsumerRecord<String, Buffer>> receivedRecords = new ArrayList<>();
final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler = record -> {
receivedRecords.add(record);
if (receivedRecords.size() == numTestRecordsPerTopicPerRound * topics.size()) {
LOG.trace("first round of records received; stop consumer; committed offset afterwards shall be {}", numTestRecordsPerTopicPerRound);
kafkaConsumer.stop().onFailure(ctx::failNow).onSuccess(v2 -> {
LOG.trace("publish 2nd round of records (shall be deleted before the to-be-restarted consumer is able to receive them)");
publishRecords(numTestRecordsPerTopicPerRound, "round2_", topics).onFailure(ctx::failNow).onSuccess(v3 -> {
LOG.trace("wait until records of first two rounds have been deleted according to the retention policy (committed offset will be out-of-range then)");
final int beginningOffsetToWaitFor = numTestRecordsPerTopicPerRound * 2;
waitForLogDeletion(new TopicPartition(publishTestTopic, 0), beginningOffsetToWaitFor, Duration.ofSeconds(5)).onComplete(firstConsumerInstanceStartedAndStopped.succeedingThenComplete());
});
});
}
};
kafkaConsumer = new HonoKafkaConsumer(vertx, topics, recordHandler, consumerConfig);
// first start of consumer, letting it commit offsets
kafkaConsumer.start().onComplete(ctx.succeeding(v -> {
LOG.trace("consumer started, publish first round of records to be received by the consumer (so that it has offsets to commit)");
publishRecords(numTestRecordsPerTopicPerRound, "round1_", topics);
}));
assertThat(firstConsumerInstanceStartedAndStopped.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue();
if (firstConsumerInstanceStartedAndStopped.failed()) {
ctx.failNow(firstConsumerInstanceStartedAndStopped.causeOfFailure());
return;
}
// preparation done, now start same consumer again and verify it reads all still available records - even though committed offset is out-of-range now
receivedRecords.clear();
final String lastRecordKey = "lastKey";
// restarted consumer is expected to receive 3rd round of records + one extra record published after consumer start
final int expectedNumberOfRecords = (numTestRecordsPerTopicPerRound * topics.size()) + 1;
final Handler<KafkaConsumerRecord<String, Buffer>> recordHandler2 = record -> {
receivedRecords.add(record);
if (receivedRecords.size() == expectedNumberOfRecords) {
ctx.verify(() -> {
assertThat(receivedRecords.get(0).key()).startsWith("round3");
assertThat(receivedRecords.get(receivedRecords.size() - 1).key()).isEqualTo(lastRecordKey);
});
ctx.completeNow();
}
};
LOG.trace("publish 3nd round of records (shall be received by to-be-restarted consumer)");
publishRecords(numTestRecordsPerTopicPerRound, "round3_", topics).onFailure(ctx::failNow).onSuccess(v -> {
kafkaConsumer = new HonoKafkaConsumer(vertx, topics, recordHandler2, consumerConfig);
kafkaConsumer.start().onComplete(ctx.succeeding(v2 -> {
LOG.debug("consumer started, publish another record to be received by the consumer");
publish(publishTestTopic, lastRecordKey, Buffer.buffer("testPayload"));
}));
});
if (!ctx.awaitCompletion(9, TimeUnit.SECONDS)) {
ctx.failNow(new IllegalStateException(String.format("timeout waiting for expected number of records (%d) to be received; received records: %d", expectedNumberOfRecords, receivedRecords.size())));
}
}
Aggregations