use of io.stackgres.apiweb.distributedlogs.FullTextSearchQuery in project stackgres by ongres.
the class NamespacedClusterLogsResource method logs.
/**
* Query distributed logs and return a list of {@code ClusterLogEntry}.
*/
@Operation(responses = { @ApiResponse(responseCode = "200", description = "OK", content = { @Content(mediaType = "application/json", array = @ArraySchema(schema = @Schema(implementation = ClusterLogEntryDto.class))) }) })
@CommonApiResponses
@GET
@Path("{name}/logs")
public List<ClusterLogEntryDto> logs(@PathParam("namespace") String namespace, @PathParam("name") String name, @QueryParam("records") Integer records, @QueryParam("from") String from, @QueryParam("to") String to, @QueryParam("sort") String sort, @QueryParam("text") String text, @QueryParam("logType") List<String> logType, @QueryParam("podName") List<String> podName, @QueryParam("role") List<String> role, @QueryParam("errorLevel") List<String> errorLevel, @QueryParam("userName") List<String> userName, @QueryParam("databaseName") List<String> databaseName, @QueryParam("fromInclusive") Boolean fromInclusive) {
final ClusterDto cluster = clusterFinder.findByNameAndNamespace(name, namespace).orElseThrow(NotFoundException::new);
final int calculatedRecords = records != null ? records : 50;
if (calculatedRecords <= 0) {
throw new BadRequestException("records should be a positive number");
}
final Optional<Tuple2<Instant, Integer>> fromTuple;
final Optional<Tuple2<Instant, Integer>> toTuple;
if (!Optional.ofNullable(cluster.getSpec()).map(ClusterSpec::getDistributedLogs).map(ClusterDistributedLogs::getDistributedLogs).isPresent()) {
throw new BadRequestException("Distributed logs are not configured for specified cluster");
}
final var filters = ImmutableMap.<String, ImmutableList<String>>builder();
addFilter("logType", logType, filters);
addFilter("podName", podName, filters);
addFilter("role", role, filters);
addFilter("errorLevel", errorLevel, filters);
addFilter("userName", userName, filters);
addFilter("databaseName", databaseName, filters);
try {
fromTuple = Optional.ofNullable(from).map(s -> s.split(",")).map(ss -> Tuple.tuple(ss[0], ss.length > 1 ? ss[1] : String.valueOf(Integer.valueOf(0)))).map(t -> t.map1(Instant::parse)).map(t -> t.map2(Integer::valueOf));
} catch (Exception ex) {
throw new BadRequestException("from should be a timestamp" + " or a timestamp and an index separated by character ','", ex);
}
try {
toTuple = Optional.ofNullable(to).map(s -> s.split(",")).map(ss -> Tuple.tuple(ss[0], ss.length > 1 ? ss[1] : String.valueOf(Integer.MAX_VALUE))).map(t -> t.map1(Instant::parse)).map(t -> t.map2(Integer::valueOf));
} catch (Exception ex) {
throw new BadRequestException("to should be a timestamp" + " or a timestamp and an index separated by character ','", ex);
}
if (sort != null && !sort.equals("asc") && !sort.equals("desc")) {
throw new BadRequestException("sort only accept asc or desc values");
}
DistributedLogsQueryParameters logs = ImmutableDistributedLogsQueryParameters.builder().cluster(cluster).records(calculatedRecords).fromTimeAndIndex(fromTuple).toTimeAndIndex(toTuple).filters(filters.build()).isSortAsc(Objects.equals("asc", sort)).fullTextSearchQuery(Optional.ofNullable(text).map(FullTextSearchQuery::new)).isFromInclusive(fromInclusive != null && fromInclusive).build();
return distributedLogsFetcher.logs(logs);
}
use of io.stackgres.apiweb.distributedlogs.FullTextSearchQuery in project stackgres by ongres.
the class ClusterResourceMockedTest method getLogsWithTextShouldNotFail.
@Test
void getLogsWithTextShouldNotFail() {
clusterMocks();
when(finder.findByNameAndNamespace(getResourceName(), getResourceNamespace())).thenReturn(Optional.of(customResources.getItems().get(0)));
doAnswer(new Answer<List<ClusterLogEntryDto>>() {
@Override
public List<ClusterLogEntryDto> answer(InvocationOnMock invocation) throws Throwable {
DistributedLogsQueryParameters parameters = invocation.getArgument(0);
assertNotNull(parameters);
checkDto(parameters.getCluster(), customResources.getItems().get(0));
assertEquals(50, parameters.getRecords());
assertEquals(Optional.empty(), parameters.getFromTimeAndIndex());
assertEquals(Optional.empty(), parameters.getToTimeAndIndex());
assertEquals(parameters.getFilters(), ImmutableMap.of());
assertEquals(parameters.getFullTextSearchQuery(), Optional.of(new FullTextSearchQuery("test")));
assertFalse(parameters.isSortAsc());
assertFalse(parameters.isFromInclusive());
return logList;
}
}).when(distributedLogsFetcher).logs(any());
Integer records = null;
String from = null;
String to = null;
String sort = null;
String text = "test";
List<String> logType = null;
List<String> podName = null;
List<String> role = null;
List<String> errorLevel = null;
List<String> userName = null;
List<String> databaseName = null;
Boolean fromInclusive = null;
List<ClusterLogEntryDto> logs = getClusterLogsResource().logs(getResourceNamespace(), getResourceName(), records, from, to, sort, text, logType, podName, role, errorLevel, userName, databaseName, fromInclusive);
assertIterableEquals(logList, logs);
}
use of io.stackgres.apiweb.distributedlogs.FullTextSearchQuery in project stackgres by ongres.
the class ClusterResourceMockedTest method getLogsWithEmptyTextShouldNotFail.
@Test
void getLogsWithEmptyTextShouldNotFail() {
clusterMocks();
when(finder.findByNameAndNamespace(getResourceName(), getResourceNamespace())).thenReturn(Optional.of(customResources.getItems().get(0)));
doAnswer(new Answer<List<ClusterLogEntryDto>>() {
@Override
public List<ClusterLogEntryDto> answer(InvocationOnMock invocation) throws Throwable {
DistributedLogsQueryParameters parameters = invocation.getArgument(0);
assertNotNull(parameters);
checkDto(parameters.getCluster(), customResources.getItems().get(0));
assertEquals(50, parameters.getRecords());
assertEquals(Optional.empty(), parameters.getFromTimeAndIndex());
assertEquals(Optional.empty(), parameters.getToTimeAndIndex());
assertEquals(parameters.getFilters(), ImmutableMap.of());
assertEquals(parameters.getFullTextSearchQuery(), Optional.of(new FullTextSearchQuery("")));
assertFalse(parameters.isSortAsc());
assertFalse(parameters.isFromInclusive());
return logList;
}
}).when(distributedLogsFetcher).logs(any());
Integer records = null;
String from = null;
String to = null;
String sort = null;
String text = "";
List<String> logType = null;
List<String> podName = null;
List<String> role = null;
List<String> errorLevel = null;
List<String> userName = null;
List<String> databaseName = null;
Boolean fromInclusive = null;
List<ClusterLogEntryDto> logs = getClusterLogsResource().logs(getResourceNamespace(), getResourceName(), records, from, to, sort, text, logType, podName, role, errorLevel, userName, databaseName, fromInclusive);
assertIterableEquals(logList, logs);
}
Aggregations