use of io.confluent.ksql.rest.entity.KsqlErrorMessage in project ksql by confluentinc.
the class StreamedQueryResourceTest method shouldReturnForbiddenKafkaAccessIfKsqlTopicAuthorizationException.
@Test
public void shouldReturnForbiddenKafkaAccessIfKsqlTopicAuthorizationException() {
// Given:
when(mockStatementParser.<Query>parseSingleStatement(PUSH_QUERY_STRING)).thenReturn(query);
doThrow(new KsqlTopicAuthorizationException(AclOperation.READ, Collections.singleton(TOPIC_NAME))).when(authorizationValidator).checkAuthorization(any(), any(), any());
// When:
final EndpointResponse response = testResource.streamQuery(securityContext, new KsqlRequest(PUSH_QUERY_STRING, Collections.emptyMap(), Collections.emptyMap(), null), new CompletableFuture<>(), Optional.empty(), new MetricsCallbackHolder(), context);
final KsqlErrorMessage responseEntity = (KsqlErrorMessage) response.getEntity();
final KsqlErrorMessage expectedEntity = (KsqlErrorMessage) AUTHORIZATION_ERROR_RESPONSE.getEntity();
assertEquals(response.getStatus(), AUTHORIZATION_ERROR_RESPONSE.getStatus());
assertEquals(responseEntity.getMessage(), expectedEntity.getMessage());
}
use of io.confluent.ksql.rest.entity.KsqlErrorMessage in project ksql by confluentinc.
the class KsqlClientTest method shouldHandleErrorMessageOnGetRequests.
@Test
public void shouldHandleErrorMessageOnGetRequests() {
// Given:
server.setResponseObject(new KsqlErrorMessage(40000, "ouch"));
server.setErrorCode(400);
// When:
KsqlTarget target = ksqlClient.target(serverUri);
RestResponse<ServerInfo> response = target.getServerInfo();
// Then:
assertThat(response.getStatusCode(), is(400));
assertThat(response.getErrorMessage().getErrorCode(), is(40000));
assertThat(response.getErrorMessage().getMessage(), is("ouch"));
}
use of io.confluent.ksql.rest.entity.KsqlErrorMessage in project ksql by confluentinc.
the class DistributingExecutorTest method shouldThrowIfRateLimitHit.
@Test
public void shouldThrowIfRateLimitHit() {
// Given:
final DistributingExecutor rateLimitedDistributor = new DistributingExecutor(new KsqlConfig(ImmutableMap.of(KsqlRestConfig.KSQL_COMMAND_TOPIC_RATE_LIMIT_CONFIG, 0.5)), queue, DURATION_10_MS, (ec, sc) -> InjectorChain.of(schemaInjector, topicInjector), Optional.of(authorizationValidator), validatedCommandFactory, errorHandler, commandRunnerWarning);
// When:
rateLimitedDistributor.execute(CONFIGURED_STATEMENT, executionContext, securityContext);
// Then:
final KsqlRestException e = assertThrows(KsqlRestException.class, () -> rateLimitedDistributor.execute(CONFIGURED_STATEMENT, executionContext, securityContext));
assertEquals(e.getResponse().getStatus(), 429);
final KsqlErrorMessage errorMessage = (KsqlErrorMessage) e.getResponse().getEntity();
assertTrue(errorMessage.getMessage().contains("DDL/DML rate is crossing the configured rate limit of statements/second"));
}
use of io.confluent.ksql.rest.entity.KsqlErrorMessage in project ksql by confluentinc.
the class TestKsqlRestApp method terminateQueries.
private void terminateQueries(final Set<String> queryIds, final KsqlRestClient client) {
final HashSet<String> remaining = new HashSet<>(queryIds);
while (!remaining.isEmpty()) {
KsqlErrorMessage lastError = null;
final Set<String> toRemove = new HashSet<>();
for (final String queryId : remaining) {
final RestResponse<KsqlEntityList> response = makeKsqlRequest(client, "TERMINATE " + queryId + ";");
if (response.isSuccessful()) {
toRemove.add(queryId);
} else {
lastError = response.getErrorMessage();
}
}
if (toRemove.isEmpty()) {
throw new AssertionError("Failed to terminate queries. lastError:" + lastError);
}
remaining.removeAll(toRemove);
}
}
use of io.confluent.ksql.rest.entity.KsqlErrorMessage in project ksql by confluentinc.
the class PullQueryRoutingFunctionalTest method shouldFilterLaggyServers.
@Test
public void shouldFilterLaggyServers() throws Exception {
// Given:
ClusterFormation clusterFormation = findClusterFormation(TEST_APP_0, TEST_APP_1, TEST_APP_2);
waitForClusterToBeDiscovered(clusterFormation.router.getApp(), 3, USER_CREDS);
waitForRemoteServerToChangeStatus(clusterFormation.router.getApp(), clusterFormation.router.getHost(), HighAvailabilityTestUtil.lagsReported(3), USER_CREDS);
waitForRemoteServerToChangeStatus(clusterFormation.router.getApp(), clusterFormation.active.getHost(), HighAvailabilityTestUtil::remoteServerIsUp, USER_CREDS);
waitForRemoteServerToChangeStatus(clusterFormation.router.getApp(), clusterFormation.standBy.getHost(), HighAvailabilityTestUtil::remoteServerIsUp, USER_CREDS);
waitForRemoteServerToChangeStatus(clusterFormation.router.getApp(), clusterFormation.router.getHost(), HighAvailabilityTestUtil.lagsReported(clusterFormation.standBy.getHost(), Optional.of(5L), 5), USER_CREDS);
// Cut off standby from Kafka to simulate lag
clusterFormation.standBy.getShutoffs().setKafkaPauseOffset(0);
Thread.sleep(2000);
// Produce more data that will now only be available on active since standby is cut off
TEST_HARNESS.produceRows(topic, USER_PROVIDER, FormatFactory.KAFKA, FormatFactory.JSON, timestampSupplier::getAndIncrement);
// Make sure that the lags get reported before we kill active
waitForRemoteServerToChangeStatus(clusterFormation.router.getApp(), clusterFormation.router.getHost(), HighAvailabilityTestUtil.lagsReported(clusterFormation.active.getHost(), Optional.of(10L), 10), USER_CREDS);
// Partition active off
clusterFormation.active.getShutoffs().shutOffAll();
waitForRemoteServerToChangeStatus(clusterFormation.router.getApp(), clusterFormation.standBy.getHost(), HighAvailabilityTestUtil::remoteServerIsUp, USER_CREDS);
waitForRemoteServerToChangeStatus(clusterFormation.router.getApp(), clusterFormation.active.getHost(), HighAvailabilityTestUtil::remoteServerIsDown, USER_CREDS);
// When:
final List<StreamedRow> rows_0 = makePullQueryRequest(clusterFormation.router.getApp(), sql, LAG_FILTER_6, USER_CREDS);
// Then:
assertThat(rows_0, hasSize(HEADER + 1));
KsqlHostInfoEntity host = rows_0.get(1).getSourceHost().get();
assertThat(host.getHost(), is(clusterFormation.standBy.getHost().getHost()));
assertThat(host.getPort(), is(clusterFormation.standBy.getHost().getPort()));
assertThat(rows_0.get(1).getRow(), is(not(Optional.empty())));
// This line ensures that we've not processed the new data
assertThat(rows_0.get(1).getRow().get().getColumns(), is(ImmutableList.of(KEY, 1)));
KsqlErrorMessage errorMessage = makePullQueryRequestWithError(clusterFormation.router.getApp(), sql, LAG_FILTER_3);
Assert.assertEquals(40001, errorMessage.getErrorCode());
assertThat(errorMessage.getMessage(), containsString("Partition 0 failed to find valid host."));
assertThat(errorMessage.getMessage(), containsString("was not selected because Host is not alive as of "));
assertThat(errorMessage.getMessage(), containsString("was not selected because Host excluded because lag 5 exceeds maximum allowed lag 3"));
}
Aggregations