use of java.util.Collections.singletonList in project kafka by apache.
the class KafkaAdminClientTest method testListOffsetsMaxTimestampUnsupportedMultipleOffsetSpec.
@Test
public void testListOffsetsMaxTimestampUnsupportedMultipleOffsetSpec() throws Exception {
Node node = new Node(0, "localhost", 8120);
List<Node> nodes = Collections.singletonList(node);
List<PartitionInfo> pInfos = new ArrayList<>();
pInfos.add(new PartitionInfo("foo", 0, node, new Node[] { node }, new Node[] { node }));
pInfos.add(new PartitionInfo("foo", 1, node, new Node[] { node }, new Node[] { node }));
final Cluster cluster = new Cluster("mockClusterId", nodes, pInfos, Collections.emptySet(), Collections.emptySet(), node);
final TopicPartition tp0 = new TopicPartition("foo", 0);
final TopicPartition tp1 = new TopicPartition("foo", 1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster, AdminClientConfig.RETRIES_CONFIG, "2")) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(ApiKeys.LIST_OFFSETS.id, (short) 0, (short) 6));
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
// listoffsets response from broker 0
env.kafkaClient().prepareUnsupportedVersionResponse(request -> request instanceof ListOffsetsRequest);
ListOffsetsTopicResponse topicResponse = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.NONE, -1L, 345L, 543);
ListOffsetsResponseData responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(topicResponse));
env.kafkaClient().prepareResponseFrom(// ensure that no max timestamp requests are retried
request -> request instanceof ListOffsetsRequest && ((ListOffsetsRequest) request).topics().stream().flatMap(t -> t.partitions().stream()).noneMatch(p -> p.timestamp() == ListOffsetsRequest.MAX_TIMESTAMP), new ListOffsetsResponse(responseData), node);
ListOffsetsResult result = env.adminClient().listOffsets(new HashMap<TopicPartition, OffsetSpec>() {
{
put(tp0, OffsetSpec.maxTimestamp());
put(tp1, OffsetSpec.latest());
}
});
TestUtils.assertFutureThrows(result.partitionResult(tp0), UnsupportedVersionException.class);
ListOffsetsResultInfo tp1Offset = result.partitionResult(tp1).get();
assertEquals(345L, tp1Offset.offset());
assertEquals(543, tp1Offset.leaderEpoch().get().intValue());
assertEquals(-1L, tp1Offset.timestamp());
}
}
use of java.util.Collections.singletonList in project kafka by apache.
the class FetcherTest method testReturnAbortedTransactionsinUncommittedMode.
@Test
public void testReturnAbortedTransactionsinUncommittedMode() {
buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED);
ByteBuffer buffer = ByteBuffer.allocate(1024);
int currentOffset = 0;
currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()));
abortTransaction(buffer, 1L, currentOffset);
buffer.flip();
List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.singletonList(new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(0));
MemoryRecords records = MemoryRecords.readableRecords(buffer);
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
assertTrue(fetchedRecords.containsKey(tp0));
}
use of java.util.Collections.singletonList in project kafka by apache.
the class FetcherTest method testConsumerPositionUpdatedWhenSkippingAbortedTransactions.
@Test
public void testConsumerPositionUpdatedWhenSkippingAbortedTransactions() {
buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
ByteBuffer buffer = ByteBuffer.allocate(1024);
long currentOffset = 0;
currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "abort1-1".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "abort1-2".getBytes(), "value".getBytes()));
currentOffset += abortTransaction(buffer, 1L, currentOffset);
buffer.flip();
List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.singletonList(new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(0));
MemoryRecords records = MemoryRecords.readableRecords(buffer);
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
// Ensure that we don't return any of the aborted records, but yet advance the consumer position.
assertFalse(fetchedRecords.containsKey(tp0));
assertEquals(currentOffset, subscriptions.position(tp0).offset);
}
use of java.util.Collections.singletonList in project dhis2-core by dhis2.
the class AbstractCrudController method patchObject.
// --------------------------------------------------------------------------
// PATCH
// --------------------------------------------------------------------------
/**
* Adds support for HTTP Patch using JSON Patch (RFC 6902), updated object
* is run through normal metadata importer and internally looks like a
* normal PUT (after the JSON Patch has been applied).
*
* For now we only support the official mimetype
* "application/json-patch+json" but in future releases we might also want
* to support "application/json" after the old patch behavior has been
* removed.
*/
@ResponseBody
@PatchMapping(path = "/{uid}", consumes = "application/json-patch+json")
public WebMessage patchObject(@PathVariable("uid") String pvUid, @RequestParam Map<String, String> rpParameters, @CurrentUser User currentUser, HttpServletRequest request) throws Exception {
WebOptions options = new WebOptions(rpParameters);
List<T> entities = getEntity(pvUid, options);
if (entities.isEmpty()) {
return notFound(getEntityClass(), pvUid);
}
final T persistedObject = entities.get(0);
if (!aclService.canUpdate(currentUser, persistedObject)) {
throw new UpdateAccessDeniedException("You don't have the proper permissions to update this object.");
}
manager.resetNonOwnerProperties(persistedObject);
prePatchEntity(persistedObject);
final JsonPatch patch = jsonMapper.readValue(request.getInputStream(), JsonPatch.class);
final T patchedObject = jsonPatchManager.apply(patch, persistedObject);
// we don't allow changing IDs
((BaseIdentifiableObject) patchedObject).setId(persistedObject.getId());
// we don't allow changing UIDs
((BaseIdentifiableObject) patchedObject).setUid(persistedObject.getUid());
// Only supports new Sharing format
((BaseIdentifiableObject) patchedObject).clearLegacySharingCollections();
prePatchEntity(persistedObject, patchedObject);
Map<String, List<String>> parameterValuesMap = contextService.getParameterValuesMap();
if (!parameterValuesMap.containsKey("importReportMode")) {
parameterValuesMap.put("importReportMode", Collections.singletonList("ERRORS_NOT_OWNER"));
}
MetadataImportParams params = importService.getParamsFromMap(parameterValuesMap);
params.setUser(currentUser).setImportStrategy(ImportStrategy.UPDATE).addObject(patchedObject);
ImportReport importReport = importService.importMetadata(params);
WebMessage webMessage = objectReport(importReport);
if (importReport.getStatus() == Status.OK) {
T entity = manager.get(getEntityClass(), pvUid);
postPatchEntity(entity);
} else {
webMessage.setStatus(Status.ERROR);
}
return webMessage;
}
use of java.util.Collections.singletonList in project open-kilda by telstra.
the class SwitchManagerTest method mockGetMetersRequest.
private void mockGetMetersRequest(List<Long> meterIds, boolean supportsPkts, long rate, long burstSize) throws Exception {
List<OFMeterConfig> meterConfigs = new ArrayList<>(meterIds.size());
for (Long meterId : meterIds) {
OFMeterBandDrop bandDrop = mock(OFMeterBandDrop.class);
expect(bandDrop.getRate()).andStubReturn(rate);
expect(bandDrop.getBurstSize()).andStubReturn(burstSize);
OFMeterConfig meterConfig = mock(OFMeterConfig.class);
expect(meterConfig.getEntries()).andStubReturn(Collections.singletonList(bandDrop));
expect(meterConfig.getMeterId()).andStubReturn(meterId);
Set<OFMeterFlags> flags = ImmutableSet.of(OFMeterFlags.STATS, OFMeterFlags.BURST, supportsPkts ? OFMeterFlags.PKTPS : OFMeterFlags.KBPS);
expect(meterConfig.getFlags()).andStubReturn(flags);
replay(bandDrop, meterConfig);
meterConfigs.add(meterConfig);
}
OFMeterConfigStatsReply statsReply = mock(OFMeterConfigStatsReply.class);
expect(statsReply.getEntries()).andStubReturn(meterConfigs);
ListenableFuture<List<OFMeterConfigStatsReply>> ofStatsFuture = mock(ListenableFuture.class);
expect(ofStatsFuture.get(anyLong(), anyObject())).andStubReturn(Collections.singletonList(statsReply));
replay(statsReply, ofStatsFuture);
expect(iofSwitch.writeStatsRequest(isA(OFMeterConfigStatsRequest.class))).andStubReturn(ofStatsFuture);
}
Aggregations