use of io.confluent.ksql.rest.entity.KsqlHostInfoEntity in project ksql by confluentinc.
the class ExplainExecutorTest method shouldExplainQueryId.
@Test
public void shouldExplainQueryId() {
// Given:
final ConfiguredStatement<Explain> explain = (ConfiguredStatement<Explain>) engine.configure("EXPLAIN id;");
final PersistentQueryMetadata metadata = givenPersistentQuery("id");
final KsqlEngine engine = mock(KsqlEngine.class);
when(engine.getPersistentQuery(metadata.getQueryId())).thenReturn(Optional.of(metadata));
// When:
final QueryDescriptionEntity query = (QueryDescriptionEntity) customExecutors.explain().execute(explain, sessionProperties, engine, this.engine.getServiceContext()).getEntity().orElseThrow(IllegalStateException::new);
// Then:
assertThat(query.getQueryDescription(), equalTo(QueryDescriptionFactory.forQueryMetadata(metadata, Collections.singletonMap(new KsqlHostInfoEntity(LOCAL_HOST), STATE))));
}
use of io.confluent.ksql.rest.entity.KsqlHostInfoEntity in project ksql by confluentinc.
the class LagReportingAgent method receiveHostLag.
/**
* Stores the host lag received from a remote Ksql server.
* @param lagReportingMessage The host lag information sent directly from the other node.
*/
public void receiveHostLag(final LagReportingMessage lagReportingMessage) {
final HostStoreLags hostStoreLags = lagReportingMessage.getHostStoreLags();
final long updateTimeMs = hostStoreLags.getUpdateTimeMs();
final KsqlHostInfoEntity KsqlHostInfoEntity = lagReportingMessage.getKsqlHost();
final KsqlHostInfo KsqlHostInfo = KsqlHostInfoEntity.toKsqlHost();
LOG.debug("Receive lag at: {} from host: {} lag: {} ", updateTimeMs, KsqlHostInfoEntity, hostStoreLags.getStateStoreLags());
receivedLagInfo.compute(KsqlHostInfo, (hi, previousHostLagInfo) -> previousHostLagInfo != null && previousHostLagInfo.getUpdateTimeMs() > updateTimeMs ? previousHostLagInfo : hostStoreLags);
}
use of io.confluent.ksql.rest.entity.KsqlHostInfoEntity in project ksql by confluentinc.
the class KsqlResourceTest method shouldShowQueriesExtended.
@Test
public void shouldShowQueriesExtended() {
// Given:
final Map<String, Object> overriddenProperties = Collections.singletonMap("ksql.streams.auto.offset.reset", "earliest");
final List<PersistentQueryMetadata> queryMetadata = createQueries("CREATE STREAM test_describe_1 AS SELECT * FROM test_stream;" + "CREATE STREAM test_describe_2 AS SELECT * FROM test_stream;", overriddenProperties);
// When:
final QueryDescriptionList descriptionList = makeSingleRequest("SHOW QUERIES EXTENDED;", QueryDescriptionList.class);
final Map<KsqlHostInfoEntity, KsqlConstants.KsqlQueryStatus> queryHostState = ImmutableMap.of(new KsqlHostInfoEntity(APPLICATION_HOST, APPLICATION_PORT), KsqlConstants.KsqlQueryStatus.RUNNING);
// Then:
assertThat(descriptionList.getQueryDescriptions(), containsInAnyOrder(QueryDescriptionFactory.forQueryMetadata(queryMetadata.get(0), queryHostState), QueryDescriptionFactory.forQueryMetadata(queryMetadata.get(1), queryHostState)));
}
use of io.confluent.ksql.rest.entity.KsqlHostInfoEntity in project ksql by confluentinc.
the class ListQueriesExecutor method mergeExtended.
private static Map<QueryId, QueryDescription> mergeExtended(final Map<QueryId, QueryDescription> allResults, final Pair<Map<HostInfo, KsqlEntity>, Set<HostInfo>> remoteResults) {
final List<QueryDescription> remoteQueryDescriptions = remoteResults.getLeft().values().stream().map(QueryDescriptionList.class::cast).map(QueryDescriptionList::getQueryDescriptions).flatMap(List::stream).collect(Collectors.toList());
for (QueryDescription q : remoteQueryDescriptions) {
final QueryId queryId = q.getId();
// and the streams metadata task set
if (allResults.containsKey(queryId)) {
for (Map.Entry<KsqlHostInfoEntity, KsqlQueryStatus> entry : q.getKsqlHostQueryStatus().entrySet()) {
allResults.get(queryId).updateKsqlHostQueryStatus(entry.getKey(), entry.getValue());
}
allResults.get(queryId).updateTaskMetadata(q.getTasksMetadata());
} else {
allResults.put(queryId, q);
}
}
final Set<HostInfo> unresponsiveRemoteHosts = remoteResults.getRight();
for (HostInfo hostInfo : unresponsiveRemoteHosts) {
for (QueryDescription queryDescription : allResults.values()) {
queryDescription.updateKsqlHostQueryStatus(new KsqlHostInfoEntity(hostInfo.host(), hostInfo.port()), KsqlQueryStatus.UNRESPONSIVE);
}
}
return allResults;
}
use of io.confluent.ksql.rest.entity.KsqlHostInfoEntity in project ksql by confluentinc.
the class PullQuerySingleNodeFunctionalTest method restoreAfterClearState.
@Ignore
@Test
public void restoreAfterClearState() {
waitForStreamsMetadataToInitialize(REST_APP_0, ImmutableList.of(host0));
waitForRemoteServerToChangeStatus(REST_APP_0, host0, HighAvailabilityTestUtil.lagsReported(host0, Optional.empty(), 5));
// When:
final List<StreamedRow> rows_0 = makePullQueryRequest(REST_APP_0, sql, LAG_FILTER_3);
// Then:
assertThat(rows_0, hasSize(HEADER + 1));
KsqlHostInfoEntity host = rows_0.get(1).getSourceHost().get();
assertThat(host.getHost(), is(host0.getHost()));
assertThat(host.getPort(), is(host0.getPort()));
assertThat(rows_0.get(1).getRow(), is(not(Optional.empty())));
assertThat(rows_0.get(1).getRow().get().getColumns(), is(ImmutableList.of(KEY, 1)));
// Stop the server and blow away the state
LOG.info("Shutting down the server " + host0.toString());
REST_APP_0.stop();
String stateDir = (String) REST_APP_0.getBaseConfig().get(KSQL_STREAMS_PREFIX + StreamsConfig.STATE_DIR_CONFIG);
clearDir(stateDir);
// Pause incoming kafka consumption
APP_SHUTOFFS_0.setKafkaPauseOffset(2);
LOG.info("Restarting the server " + host0.toString());
REST_APP_0.start();
waitForStreamsMetadataToInitialize(REST_APP_0, ImmutableList.of(host0));
waitForRemoteServerToChangeStatus(REST_APP_0, host0, HighAvailabilityTestUtil.lagsReported(host0, Optional.of(2L), 5));
ClusterStatusResponse clusterStatusResponse = HighAvailabilityTestUtil.sendClusterStatusRequest(REST_APP_0);
Pair<Long, Long> pair = getOffsets(host0, clusterStatusResponse.getClusterStatus());
assertThat(pair.left, is(2L));
assertThat(pair.right, is(5L));
final List<StreamedRow> sameRows = makePullQueryRequest(REST_APP_0, sql, LAG_FILTER_3);
host = sameRows.get(1).getSourceHost().get();
assertThat(host.getHost(), is(host0.getHost()));
assertThat(host.getPort(), is(host0.getPort()));
assertThat(sameRows.get(1).getRow(), is(not(Optional.empty())));
// Still haven't gotten the update yet
assertThat(sameRows.get(1).getRow().get().getColumns(), is(ImmutableList.of(KEY, 1)));
// Row not found!
final List<StreamedRow> headerOnly = makePullQueryRequest(REST_APP_0, sqlKey3, LAG_FILTER_3);
assertThat(headerOnly.size(), is(1));
// Unpause incoming kafka consumption. We then expect active to catch back up.
APP_SHUTOFFS_0.setKafkaPauseOffset(-1);
waitForRemoteServerToChangeStatus(REST_APP_0, host0, HighAvailabilityTestUtil.lagsReported(host0, Optional.of(5L), 5));
clusterStatusResponse = HighAvailabilityTestUtil.sendClusterStatusRequest(REST_APP_0);
pair = getOffsets(host0, clusterStatusResponse.getClusterStatus());
assertThat(pair.left, is(5L));
assertThat(pair.right, is(5L));
final List<StreamedRow> updatedRows = makePullQueryRequest(REST_APP_0, sqlKey3, LAG_FILTER_3);
// Now it is found!
host = updatedRows.get(1).getSourceHost().get();
assertThat(host.getHost(), is(host0.getHost()));
assertThat(host.getPort(), is(host0.getPort()));
assertThat(updatedRows.get(1).getRow(), is(not(Optional.empty())));
assertThat(updatedRows.get(1).getRow().get().getColumns(), is(ImmutableList.of(KEY_3, 1)));
}
Aggregations