use of com.jayway.restassured.response.Header in project nakadi by zalando.
the class EventStreamReadingAT method whenReadFromTheEndThenLatestOffsetsInStream.
@Test(timeout = 10000)
@SuppressWarnings("unchecked")
public void whenReadFromTheEndThenLatestOffsetsInStream() {
// ACT //
// just stream without X-nakadi-cursors; that should make nakadi to read from the very end
final Response response = given().param("stream_timeout", "2").param("batch_flush_timeout", "2").when().get(streamEndpoint);
// ASSERT //
response.then().statusCode(HttpStatus.OK.value()).header(HttpHeaders.TRANSFER_ENCODING, "chunked");
final String body = response.print();
final List<Map<String, Object>> batches = deserializeBatches(body);
// validate amount of batches and structure of each batch
assertThat(batches, hasSize(PARTITIONS_NUM));
batches.forEach(batch -> validateBatchStructure(batch, DUMMY_EVENT));
// validate that the latest offsets in batches correspond to the newest offsets
final Set<Cursor> offsets = batches.stream().map(batch -> {
final Map<String, String> cursor = (Map<String, String>) batch.get("cursor");
return new Cursor(cursor.get("partition"), cursor.get("offset"));
}).collect(Collectors.toSet());
assertThat(offsets, equalTo(Sets.newHashSet(initialCursors)));
}
use of com.jayway.restassured.response.Header in project nakadi by zalando.
the class EventStreamReadingAT method whenAcceptEncodingGzipReceiveCompressedStream.
@Test(timeout = 10000)
public void whenAcceptEncodingGzipReceiveCompressedStream() throws ExecutionException, InterruptedException {
// ARRANGE //
// push events to one of the partitions
final int eventsPushed = 2;
kafkaHelper.writeMultipleMessageToPartition(TEST_PARTITION, topicName, DUMMY_EVENT, eventsPushed);
// ACT //
final Response response = given().header(new Header("X-nakadi-cursors", xNakadiCursors)).header(new Header("Accept-Encoding", "gzip")).param("batch_limit", "5").param("stream_timeout", "2").param("batch_flush_timeout", "2").when().get(streamEndpoint);
// ASSERT //
response.then().statusCode(HttpStatus.OK.value()).header(HttpHeaders.TRANSFER_ENCODING, "chunked");
response.then().header("Content-Encoding", "gzip");
}
use of com.jayway.restassured.response.Header in project nakadi by zalando.
the class EventStreamReadingAT method whenPushedAmountOfEventsMoreThanBatchSizeAndReadThenGetEventsInMultipleBatches.
@Test(timeout = 10000)
@SuppressWarnings("unchecked")
public void whenPushedAmountOfEventsMoreThanBatchSizeAndReadThenGetEventsInMultipleBatches() throws ExecutionException, InterruptedException {
// ARRANGE //
// push events to one of the partitions so that they don't fit into one branch
final int batchLimit = 5;
final int eventsPushed = 8;
kafkaHelper.writeMultipleMessageToPartition(TEST_PARTITION, topicName, DUMMY_EVENT, eventsPushed);
// ACT //
final Response response = given().header(new Header("X-nakadi-cursors", xNakadiCursors)).param("batch_limit", batchLimit).param("stream_timeout", "2").param("batch_flush_timeout", "2").when().get(streamEndpoint);
// ASSERT //
response.then().statusCode(HttpStatus.OK.value()).header(HttpHeaders.TRANSFER_ENCODING, "chunked");
final String body = response.print();
final List<Map<String, Object>> batches = deserializeBatches(body);
// validate amount of batches and structure of each batch
// for partition with events we should get 2 batches
assertThat(batches, hasSize(PARTITIONS_NUM + 1));
batches.forEach(batch -> validateBatchStructure(batch, DUMMY_EVENT));
// find the batches where we expect to see the messages we pushed
final List<Map<String, Object>> batchesToCheck = batches.stream().filter(isForPartition(TEST_PARTITION)).collect(Collectors.toList());
assertThat(batchesToCheck, hasSize(2));
// calculate the offset we expect to see in this batch in a stream
final Cursor partitionCursor = kafkaInitialNextOffsets.stream().filter(cursor -> TEST_PARTITION.equals(cursor.getPartition())).findFirst().orElseThrow(() -> new AssertionError("Failed to find cursor for needed partition"));
final String expectedOffset1 = TestUtils.toTimelineOffset(Long.parseLong(partitionCursor.getOffset()) - 1 + batchLimit);
final String expectedOffset2 = TestUtils.toTimelineOffset(Long.parseLong(partitionCursor.getOffset()) - 1 + eventsPushed);
// check that batches have offset, partition and events number we expect
validateBatch(batchesToCheck.get(0), TEST_PARTITION, expectedOffset1, batchLimit);
validateBatch(batchesToCheck.get(1), TEST_PARTITION, expectedOffset2, eventsPushed - batchLimit);
}
use of com.jayway.restassured.response.Header in project nakadi by zalando.
the class UserJourneyAT method userJourneyM1.
@SuppressWarnings("unchecked")
@Test(timeout = 15000)
public void userJourneyM1() throws InterruptedException, IOException {
// get event type
jsonRequestSpec().when().get("/event-types/" + eventTypeName).then().statusCode(OK.value()).body("name", equalTo(eventTypeName)).body("owning_application", equalTo(owningApp)).body("category", equalTo("undefined")).body("schema.type", equalTo("json_schema")).body("schema.schema", equalTo("{\"type\": \"object\", \"properties\": " + "{\"foo\": {\"type\": \"string\"}}, \"required\": [\"foo\"]}"));
// list event types
jsonRequestSpec().when().get("/event-types").then().statusCode(OK.value()).body("size()", Matchers.greaterThan(0)).body("name[0]", notNullValue()).body("owning_application[0]", notNullValue()).body("category[0]", notNullValue()).body("schema.type[0]", notNullValue()).body("schema.schema[0]", notNullValue());
final String updateEventTypeBody = getUpdateEventType();
// update event-type
jsonRequestSpec().body(updateEventTypeBody).when().put("/event-types/" + eventTypeName).then().body(equalTo("")).statusCode(OK.value());
// Updates should eventually cause a cache invalidation, so we must retry
executeWithRetry(() -> {
// get event type to check that update is done
jsonRequestSpec().when().get("/event-types/" + eventTypeName).then().statusCode(OK.value()).body("options.retention_time", equalTo(86400000));
}, new RetryForSpecifiedTimeStrategy<Void>(5000).withExceptionsThatForceRetry(AssertionError.class).withWaitBetweenEachTry(500));
// push two events to event-type
postEvents(EVENT1, EVENT2);
// get offsets for partition
jsonRequestSpec().when().get("/event-types/" + eventTypeName + "/partitions/0").then().statusCode(OK.value()).body("partition", equalTo("0")).body("oldest_available_offset", equalTo("001-0001-000000000000000000")).body("newest_available_offset", equalTo("001-0001-000000000000000001"));
// get offsets for all partitions
jsonRequestSpec().when().get("/event-types/" + eventTypeName + "/partitions").then().statusCode(OK.value()).body("size()", equalTo(1)).body("partition[0]", notNullValue()).body("oldest_available_offset[0]", notNullValue()).body("newest_available_offset[0]", notNullValue());
// read events
requestSpec().header(new Header("X-nakadi-cursors", "[{\"partition\": \"0\", \"offset\": \"BEGIN\"}]")).param("batch_limit", "2").param("stream_limit", "2").when().get("/event-types/" + eventTypeName + "/events").then().statusCode(OK.value()).body(equalTo("{\"cursor\":{\"partition\":\"0\",\"offset\":\"001-0001-000000000000000001\"}," + "\"events\":[" + EVENT1 + "," + EVENT2 + "]}\n"));
// get distance between cursors
jsonRequestSpec().body("[{\"initial_cursor\": {\"partition\": \"0\", \"offset\":\"001-0001-000000000000000000\"}, " + "\"final_cursor\": {\"partition\": \"0\", \"offset\":\"001-0001-000000000000000001\"}}]").when().post("/event-types/" + eventTypeName + "/cursor-distances").then().statusCode(OK.value()).body("size()", equalTo(1)).body("initial_cursor[0].offset", equalTo("001-0001-000000000000000000")).body("final_cursor[0].offset", equalTo("001-0001-000000000000000001")).body("distance[0]", equalTo(1));
// navigate between cursors
jsonRequestSpec().body("[{\"partition\": \"0\", \"offset\":\"001-0001-000000000000000000\", \"shift\": 1}, " + "{\"partition\": \"0\", \"offset\":\"001-0001-000000000000000001\", \"shift\": -1}]").when().post("/event-types/" + eventTypeName + "/shifted-cursors").then().statusCode(OK.value()).body("size()", equalTo(2)).body("offset[0]", equalTo("001-0001-000000000000000001")).body("offset[1]", equalTo("001-0001-000000000000000000"));
// query for lag
jsonRequestSpec().body("[{\"partition\": \"0\", \"offset\":\"001-0001-000000000000000000\"}]").when().post("/event-types/" + eventTypeName + "/cursors-lag").then().statusCode(OK.value()).body("size()", equalTo(1)).body("newest_available_offset[0]", equalTo("001-0001-000000000000000001")).body("oldest_available_offset[0]", equalTo("001-0001-000000000000000000")).body("unconsumed_events[0]", equalTo(1));
}
use of com.jayway.restassured.response.Header in project nakadi by zalando.
the class TimelineConsumptionTest method readCursors.
private static String[] readCursors(final String eventTypeName, final String startOffset, final int streamLimit) throws IOException {
final Response response = given().header(new Header("X-nakadi-cursors", "[{\"partition\": \"0\", \"offset\": \"" + startOffset + "\"}]")).param("batch_limit", "1").param("batch_flush_timeout", "1").param("stream_limit", streamLimit).param("stream_timeout", 60).when().get("/event-types/" + eventTypeName + "/events");
response.then().statusCode(HttpStatus.SC_OK);
final String[] events = response.print().split("\n");
final List<String> result = new ArrayList<>();
for (int i = 0; i < events.length; ++i) {
final ObjectNode batch = (ObjectNode) new ObjectMapper().readTree(events[i]);
if (batch.get("events") == null) {
continue;
}
final ObjectNode cursor = (ObjectNode) batch.get("cursor");
result.add(cursor.get("offset").asText());
}
return result.toArray(new String[result.size()]);
}
Aggregations