use of io.confluent.ksql.util.KsqlServerException in project ksql by confluentinc.
the class TestExecutor method validateCreatedMessage.
private static void validateCreatedMessage(final String topicName, final Record expectedRecord, final ProducerRecord<?, ?> actualProducerRecord, final boolean ranWithInsertStatements, final int messageIndex) {
final Object actualKey = coerceRecordFields(actualProducerRecord.key());
final Object actualValue = coerceRecordFields(actualProducerRecord.value());
final long actualTimestamp = actualProducerRecord.timestamp();
final Headers actualHeaders = actualProducerRecord.headers();
final JsonNode expectedKey = expectedRecord.getJsonKey().orElse(NullNode.getInstance());
final JsonNode expectedValue = expectedRecord.getJsonValue().orElseThrow(() -> new KsqlServerException("could not get expected value from test record: " + expectedRecord));
final long expectedTimestamp = expectedRecord.timestamp().orElse(actualTimestamp);
final List<TestHeader> expectedHeaders = expectedRecord.headers().orElse(ImmutableList.of());
final AssertionError error = new AssertionError("Topic '" + topicName + "', message " + messageIndex + ": Expected <" + expectedKey + ", " + expectedValue + "> " + "with timestamp=" + expectedTimestamp + " and headers=[" + printHeaders(expectedHeaders) + "] but was " + getProducerRecordInString(actualProducerRecord));
if (expectedRecord.getWindow() != null) {
final Windowed<?> windowed = (Windowed<?>) actualKey;
if (!new WindowData(windowed).equals(expectedRecord.getWindow()) || !ExpectedRecordComparator.matches(((Windowed<?>) actualKey).key(), expectedKey)) {
throw error;
}
} else {
if (!ExpectedRecordComparator.matches(actualKey, expectedKey)) {
throw error;
}
}
if (!ExpectedRecordComparator.matches(actualValue, expectedValue)) {
throw error;
}
if (!ExpectedRecordComparator.matches(actualHeaders.toArray(), expectedHeaders)) {
throw error;
}
if ((actualTimestamp != expectedTimestamp) && (!ranWithInsertStatements || expectedTimestamp != 0L)) {
throw error;
}
}
use of io.confluent.ksql.util.KsqlServerException in project ksql by confluentinc.
the class LocalCommandsTest method shouldNotThrowWhenFailToCleanup.
@Test
public void shouldNotThrowWhenFailToCleanup() throws IOException {
// Given
final File dir = commandsDir.newFolder();
LocalCommands localCommands = LocalCommands.open(ksqlEngine, dir);
doThrow(new KsqlServerException("Error")).when(localCommandsFile).readRecords();
// When
localCommands.write(metadata1);
LocalCommands localCommands2 = LocalCommands.open(ksqlEngine, dir);
localCommands2.write(metadata3);
// Need to create a new local commands in order not to skip the "current" file we just wrote.
localCommands2.processLocalCommandFiles(serviceContext);
// Then no exception should be thrown
verify(ksqlEngine).cleanupOrphanedInternalTopics(any(), eq(ImmutableSet.of(QUERY_APP_ID1)));
}
use of io.confluent.ksql.util.KsqlServerException in project ksql by confluentinc.
the class KafkaClusterUtil method getConfig.
public static Config getConfig(final Admin adminClient) {
try {
final Collection<Node> brokers = adminClient.describeCluster().nodes().get();
final Node broker = Iterables.getFirst(brokers, null);
if (broker == null) {
LOG.warn("No available broker found to fetch config info.");
throw new KsqlServerException("AdminClient discovered an empty Kafka Cluster. " + "Check that Kafka is deployed and KSQL is properly configured.");
}
final ConfigResource configResource = new ConfigResource(ConfigResource.Type.BROKER, broker.idString());
final Map<ConfigResource, Config> brokerConfig = ExecutorUtil.executeWithRetries(() -> adminClient.describeConfigs(Collections.singleton(configResource)).all().get(), ExecutorUtil.RetryBehaviour.ON_RETRYABLE);
return brokerConfig.get(configResource);
} catch (final KsqlServerException e) {
throw e;
} catch (final ClusterAuthorizationException e) {
throw new KsqlServerException("Could not get Kafka cluster configuration. " + "Please ensure the ksql principal has " + AclOperation.DESCRIBE_CONFIGS + " rights " + "on the Kafka cluster." + System.lineSeparator() + "See " + DocumentationLinks.SECURITY_REQUIRED_ACLS_DOC_URL + " for more info.", e);
} catch (final Exception e) {
throw new KsqlServerException("Could not get Kafka cluster configuration!", e);
}
}
use of io.confluent.ksql.util.KsqlServerException in project ksql by confluentinc.
the class DefaultConnectClient method delete.
@Override
public ConnectResponse<String> delete(final String connector) {
try {
LOG.debug("Issuing request to Kafka Connect at URI {} to delete {}", connectUri, connector);
final ConnectResponse<String> connectResponse = withRetries(() -> Request.delete(resolveUri(String.format("%s/%s", CONNECTORS, connector))).setHeaders(requestHeaders).responseTimeout(Timeout.ofMilliseconds(requestTimeoutMs)).connectTimeout(Timeout.ofMilliseconds(requestTimeoutMs)).execute(httpClient).handleResponse(createHandler(ImmutableList.of(HttpStatus.SC_NO_CONTENT, HttpStatus.SC_OK), new TypeReference<Object>() {
}, foo -> connector)));
connectResponse.error().ifPresent(error -> LOG.warn("Could not delete connector: {}.", error));
return connectResponse;
} catch (final Exception e) {
throw new KsqlServerException(e);
}
}
use of io.confluent.ksql.util.KsqlServerException in project ksql by confluentinc.
the class DefaultConnectClient method describe.
@Override
public ConnectResponse<ConnectorInfo> describe(final String connector) {
try {
LOG.debug("Issuing request to Kafka Connect at URI {} to get config for {}", connectUri, connector);
final ConnectResponse<ConnectorInfo> connectResponse = withRetries(() -> Request.get(resolveUri(String.format("%s/%s", CONNECTORS, connector))).setHeaders(requestHeaders).responseTimeout(Timeout.ofMilliseconds(requestTimeoutMs)).connectTimeout(Timeout.ofMilliseconds(requestTimeoutMs)).execute(httpClient).handleResponse(createHandler(HttpStatus.SC_OK, new TypeReference<ConnectorInfo>() {
}, Function.identity())));
connectResponse.error().ifPresent(error -> LOG.warn("Could not list connectors: {}.", error));
return connectResponse;
} catch (final Exception e) {
throw new KsqlServerException(e);
}
}
Aggregations