use of io.confluent.ksql.util.KsqlServerException in project ksql by confluentinc.
the class DefaultConnectClient method validate.
@Override
public ConnectResponse<ConfigInfos> validate(final String plugin, final Map<String, String> config) {
try {
LOG.debug("Issuing validate request to Kafka Connect at URI {} for plugin {} and config {}", connectUri, plugin, config);
final ConnectResponse<ConfigInfos> connectResponse = withRetries(() -> Request.put(resolveUri(String.format(VALIDATE_CONNECTOR, plugin))).setHeaders(requestHeaders).responseTimeout(Timeout.ofMilliseconds(requestTimeoutMs)).connectTimeout(Timeout.ofMilliseconds(requestTimeoutMs)).bodyString(MAPPER.writeValueAsString(config), ContentType.APPLICATION_JSON).execute(httpClient).handleResponse(createHandler(HttpStatus.SC_OK, new TypeReference<ConfigInfos>() {
}, Function.identity())));
connectResponse.error().ifPresent(error -> LOG.warn("Did not VALIDATE connector configuration for plugin {} and config {}: {}", plugin, config, error));
return connectResponse;
} catch (final Exception e) {
throw new KsqlServerException(e);
}
}
use of io.confluent.ksql.util.KsqlServerException in project ksql by confluentinc.
the class DefaultConnectClient method connectorPlugins.
@Override
public ConnectResponse<List<ConnectorPluginInfo>> connectorPlugins() {
try {
LOG.debug("Issuing request to Kafka Connect at URI {} to list connector plugins", connectUri);
final ConnectResponse<List<ConnectorPluginInfo>> connectResponse = withRetries(() -> Request.get(resolveUri(CONNECTOR_PLUGINS)).setHeaders(requestHeaders).responseTimeout(Timeout.ofMilliseconds(requestTimeoutMs)).connectTimeout(Timeout.ofMilliseconds(requestTimeoutMs)).execute(httpClient).handleResponse(createHandler(HttpStatus.SC_OK, new TypeReference<List<ConnectorPluginInfo>>() {
}, Function.identity())));
connectResponse.error().ifPresent(error -> LOG.warn("Could not list connector plugins: {}.", error));
return connectResponse;
} catch (final Exception e) {
throw new KsqlServerException(e);
}
}
use of io.confluent.ksql.util.KsqlServerException in project ksql by confluentinc.
the class DefaultConnectClient method status.
@Override
public ConnectResponse<ConnectorStateInfo> status(final String connector) {
try {
LOG.debug("Issuing status request to Kafka Connect at URI {} with name {}", connectUri, connector);
final ConnectResponse<ConnectorStateInfo> connectResponse = withRetries(() -> Request.get(resolveUri(CONNECTORS + "/" + connector + STATUS)).setHeaders(requestHeaders).responseTimeout(Timeout.ofMilliseconds(requestTimeoutMs)).connectTimeout(Timeout.ofMilliseconds(requestTimeoutMs)).execute(httpClient).handleResponse(createHandler(HttpStatus.SC_OK, new TypeReference<ConnectorStateInfo>() {
}, Function.identity())));
connectResponse.error().ifPresent(error -> LOG.warn("Could not query status of connector {}: {}", connector, error));
return connectResponse;
} catch (final Exception e) {
throw new KsqlServerException(e);
}
}
use of io.confluent.ksql.util.KsqlServerException in project ksql by confluentinc.
the class DistributingExecutor method execute.
/**
* The transactional protocol for sending a command to the command topic is to
* initTransaction(), beginTransaction(), wait for commandRunner to finish processing all previous
* commands that were present at the start of the transaction, validate the current command,
* enqueue the command in the command topic, and commit the transaction.
* Only successfully committed commands can be read by the command topic consumer.
* If any exceptions are thrown during this protocol, the transaction is aborted.
* If a new transactional producer is initialized while the current transaction is incomplete,
* the old producer will be fenced off and unable to continue with its transaction.
*/
// CHECKSTYLE_RULES.OFF: NPathComplexity
public StatementExecutorResponse execute(final ConfiguredStatement<? extends Statement> statement, final KsqlExecutionContext executionContext, final KsqlSecurityContext securityContext) {
final String commandRunnerWarningString = commandRunnerWarning.get();
if (!commandRunnerWarningString.equals("")) {
throw new KsqlServerException("Failed to handle Ksql Statement." + System.lineSeparator() + commandRunnerWarningString);
}
final ConfiguredStatement<?> injected = injectorFactory.apply(executionContext, securityContext.getServiceContext()).inject(statement);
if (injected.getStatement() instanceof InsertInto) {
validateInsertIntoQueries(executionContext.getMetaStore(), (InsertInto) injected.getStatement());
}
final Optional<StatementExecutorResponse> response = checkIfNotExistsResponse(executionContext, statement);
if (response.isPresent()) {
return response.get();
}
checkAuthorization(injected, securityContext, executionContext);
final Producer<CommandId, Command> transactionalProducer = commandQueue.createTransactionalProducer();
try {
transactionalProducer.initTransactions();
} catch (final TimeoutException e) {
throw new KsqlServerException(errorHandler.transactionInitTimeoutErrorMessage(e), e);
} catch (final Exception e) {
throw new KsqlServerException(String.format("Could not write the statement '%s' into the command topic: " + e.getMessage(), statement.getStatementText()), e);
}
if (!rateLimiter.tryAcquire(1, TimeUnit.SECONDS)) {
throw new KsqlRestException(Errors.tooManyRequests("DDL/DML rate is crossing the configured rate limit of statements/second"));
}
CommandId commandId = null;
try {
transactionalProducer.beginTransaction();
commandQueue.waitForCommandConsumer();
commandId = commandIdAssigner.getCommandId(statement.getStatement());
final Command command = validatedCommandFactory.create(injected, executionContext.createSandbox(executionContext.getServiceContext()));
final QueuedCommandStatus queuedCommandStatus = commandQueue.enqueueCommand(commandId, command, transactionalProducer);
transactionalProducer.commitTransaction();
final CommandStatus commandStatus = queuedCommandStatus.tryWaitForFinalStatus(distributedCmdResponseTimeout);
return StatementExecutorResponse.handled(Optional.of(new CommandStatusEntity(injected.getStatementText(), queuedCommandStatus.getCommandId(), commandStatus, queuedCommandStatus.getCommandSequenceNumber(), getDeprecatedWarnings(executionContext.getMetaStore(), injected))));
} catch (final ProducerFencedException | OutOfOrderSequenceException | AuthorizationException e) {
// This catch doesn't abortTransaction() since doing that would throw another exception.
if (commandId != null) {
commandQueue.abortCommand(commandId);
}
throw new KsqlServerException(String.format("Could not write the statement '%s' into the command topic.", statement.getStatementText()), e);
} catch (final Exception e) {
transactionalProducer.abortTransaction();
if (commandId != null) {
commandQueue.abortCommand(commandId);
}
throw new KsqlServerException(String.format("Could not write the statement '%s' into the command topic.", statement.getStatementText()), e);
} finally {
transactionalProducer.close();
}
}
use of io.confluent.ksql.util.KsqlServerException in project ksql by confluentinc.
the class ValidatedCommandFactory method ensureDeserializable.
/**
* Ensure any command written to the command topic can be deserialized.
*
* <p>Any command that can't be deserialized is a bug. However, given a non-deserializable
* command will kill the command runner thread, this is a safety net to ensure commands written to
* the command topic can be deserialzied.
*
* @param command the command to test.
* @return the passed in command.
*/
private static Command ensureDeserializable(final Command command) {
try {
final String json = PlanJsonMapper.INSTANCE.get().writeValueAsString(command);
PlanJsonMapper.INSTANCE.get().readValue(json, Command.class);
return command;
} catch (final JsonProcessingException e) {
throw new KsqlServerException("Did not write the command to the command topic " + "as it could not be deserialized. This is a bug! Please raise a Github issue " + "containing the series of commands you ran to get to this point." + System.lineSeparator() + e.getMessage());
}
}
Aggregations