use of io.jans.scim.model.scim2.ErrorResponse in project kafka-rest by confluentinc.
the class ProduceActionIntegrationTest method produceAvroWithRecordSchemaSubjectStrategyAndSchemaVersion_returnsBadRequest.
@Test
public void produceAvroWithRecordSchemaSubjectStrategyAndSchemaVersion_returnsBadRequest() throws Exception {
String clusterId = testEnv.kafkaCluster().getClusterId();
String request = "{ \"key\": { \"subject_name_strategy\": \"RECORD_NAME\", \"schema_version\": 1 } }";
Response response = testEnv.kafkaRest().target().path("/v3/clusters/" + clusterId + "/topics/" + TOPIC_NAME + "/records").request().accept(MediaType.APPLICATION_JSON).post(Entity.entity(request, MediaType.APPLICATION_JSON));
assertEquals(Status.OK.getStatusCode(), response.getStatus());
ErrorResponse actual = response.readEntity(ErrorResponse.class);
assertEquals(400, actual.getErrorCode());
}
use of io.jans.scim.model.scim2.ErrorResponse in project kafka-rest by confluentinc.
the class ProduceActionIntegrationTest method produceAvroWithRawSchemaAndSchemaId_returnsBadRequest.
@Test
public void produceAvroWithRawSchemaAndSchemaId_returnsBadRequest() throws Exception {
String clusterId = testEnv.kafkaCluster().getClusterId();
String request = "{ \"key\": { \"schema\": \"{ \\\"type\\\": \\\"string\\\" }\", \"schema_id\": 1 } }";
Response response = testEnv.kafkaRest().target().path("/v3/clusters/" + clusterId + "/topics/" + TOPIC_NAME + "/records").request().accept(MediaType.APPLICATION_JSON).post(Entity.entity(request, MediaType.APPLICATION_JSON));
assertEquals(Status.OK.getStatusCode(), response.getStatus());
ErrorResponse actual = response.readEntity(ErrorResponse.class);
assertEquals(400, actual.getErrorCode());
}
use of io.jans.scim.model.scim2.ErrorResponse in project kafka-rest by confluentinc.
the class KafkaModuleOverridingTest method producerIsCreatedPerRequestAndDisposedAfterRequest.
@Test
public void producerIsCreatedPerRequestAndDisposedAfterRequest() {
Response response = kafkaRest.target().path("/v3/clusters/foo/topics/bar/records").request().header("X-Teapot-Context", "").accept(MediaType.APPLICATION_JSON).post(Entity.entity("{}", MediaType.APPLICATION_JSON_TYPE));
assertEquals(Response.Status.OK.getStatusCode(), response.getStatus());
ErrorResponse error = response.readEntity(ErrorResponse.class);
assertEquals(I_M_A_TEAPOT_STATUS_CODE, error.getErrorCode());
}
use of io.jans.scim.model.scim2.ErrorResponse in project kafka-rest by confluentinc.
the class ProduceActionTest method produceWithCountLimit.
@Test
public void produceWithCountLimit() throws Exception {
// config
final int TOTAL_NUMBER_OF_PRODUCE_CALLS = 2;
Properties properties = new Properties();
properties.put(PRODUCE_MAX_REQUESTS_PER_SECOND, "100");
properties.put(PRODUCE_MAX_BYTES_PER_SECOND, // first record is 25 bytes long
Integer.toString(30));
properties.put(PRODUCE_RATE_LIMIT_CACHE_EXPIRY_MS, "3600000");
properties.put(PRODUCE_RATE_LIMIT_ENABLED, "true");
// setup
ChunkedOutputFactory chunkedOutputFactory = mock(ChunkedOutputFactory.class);
ChunkedOutput<ResultOrError> mockedChunkedOutput = getChunkedOutput(chunkedOutputFactory, TOTAL_NUMBER_OF_PRODUCE_CALLS);
Provider<RequestRateLimiter> countLimitProvider = mock(Provider.class);
Provider<RequestRateLimiter> bytesLimitProvider = mock(Provider.class);
Provider<RequestRateLimiter> countLimiterGlobalProvider = mock(Provider.class);
Provider<RequestRateLimiter> bytesLimiterGlobalProvider = mock(Provider.class);
RequestRateLimiter countLimiterGlobal = mock(RequestRateLimiter.class);
RequestRateLimiter bytesLimiterGlobal = mock(RequestRateLimiter.class);
RequestRateLimiter rateLimiterForCount = mock(RequestRateLimiter.class);
RequestRateLimiter rateLimiterForBytes = mock(RequestRateLimiter.class);
expect(countLimitProvider.get()).andReturn(rateLimiterForCount);
expect(bytesLimitProvider.get()).andReturn(rateLimiterForBytes);
expect(countLimiterGlobalProvider.get()).andReturn(countLimiterGlobal);
expect(bytesLimiterGlobalProvider.get()).andReturn(bytesLimiterGlobal);
rateLimiterForCount.rateLimit(anyInt());
rateLimiterForBytes.rateLimit(anyInt());
bytesLimiterGlobal.rateLimit(anyInt());
countLimiterGlobal.rateLimit(anyInt());
expect(countLimiterGlobalProvider.get()).andReturn(countLimiterGlobal);
expect(bytesLimiterGlobalProvider.get()).andReturn(bytesLimiterGlobal);
bytesLimiterGlobal.rateLimit(anyInt());
countLimiterGlobal.rateLimit(anyInt());
rateLimiterForCount.rateLimit(anyInt());
EasyMock.expectLastCall().andThrow(new RateLimitExceededException());
replay(countLimitProvider, bytesLimitProvider, rateLimiterForCount, rateLimiterForBytes, countLimiterGlobal, bytesLimiterGlobal, countLimiterGlobalProvider, bytesLimiterGlobalProvider);
ProduceAction produceAction = getProduceAction(properties, chunkedOutputFactory, 1, countLimitProvider, bytesLimitProvider, countLimiterGlobalProvider, bytesLimiterGlobalProvider);
MappingIterator<ProduceRequest> requests = getProduceRequestsMappingIterator(TOTAL_NUMBER_OF_PRODUCE_CALLS);
// expected results
ProduceResponse produceResponse = getProduceResponse(0);
ResultOrError resultOrErrorOK = ResultOrError.result(produceResponse);
expect(mockedChunkedOutput.isClosed()).andReturn(false);
// successful first produce
mockedChunkedOutput.write(resultOrErrorOK);
mockedChunkedOutput.close();
ErrorResponse err = ErrorResponse.create(429, "Request rate limit exceeded: The rate limit of requests per second has been exceeded.");
ResultOrError resultOrErrorFail = ResultOrError.error(err);
expect(mockedChunkedOutput.isClosed()).andReturn(false);
// failing second produce
mockedChunkedOutput.write(resultOrErrorFail);
// error close
mockedChunkedOutput.close();
replay(mockedChunkedOutput, chunkedOutputFactory);
// run test
FakeAsyncResponse fakeAsyncResponse = new FakeAsyncResponse();
produceAction.produce(fakeAsyncResponse, "clusterId", "topicName", new JsonStream<>(() -> requests));
FakeAsyncResponse fakeAsyncResponse2 = new FakeAsyncResponse();
produceAction.produce(fakeAsyncResponse2, "clusterId", "topicName", new JsonStream<>(() -> requests));
// check results
verify(requests, mockedChunkedOutput, countLimitProvider, bytesLimitProvider, rateLimiterForCount, rateLimiterForBytes, countLimiterGlobal, bytesLimiterGlobal);
}
use of io.jans.scim.model.scim2.ErrorResponse in project kafka-rest by confluentinc.
the class ProduceActionIntegrationTest method produceJsonschemaWithRawSchemaAndInvalidData_throwsBadRequest.
@Test
public void produceJsonschemaWithRawSchemaAndInvalidData_throwsBadRequest() throws Exception {
String clusterId = testEnv.kafkaCluster().getClusterId();
ProduceRequest request = ProduceRequest.builder().setKey(ProduceRequestData.builder().setFormat(EmbeddedFormat.JSONSCHEMA).setRawSchema("{\"type\": \"string\"}").setData(IntNode.valueOf(1)).build()).setValue(ProduceRequestData.builder().setFormat(EmbeddedFormat.JSONSCHEMA).setRawSchema("{\"type\": \"string\"}").setData(IntNode.valueOf(2)).build()).setOriginalSize(0L).build();
Response response = testEnv.kafkaRest().target().path("/v3/clusters/" + clusterId + "/topics/" + TOPIC_NAME + "/records").request().accept(MediaType.APPLICATION_JSON).post(Entity.entity(request, MediaType.APPLICATION_JSON));
assertEquals(Status.OK.getStatusCode(), response.getStatus());
ErrorResponse actual = response.readEntity(ErrorResponse.class);
assertEquals(400, actual.getErrorCode());
}
Aggregations