use of java.util.Arrays.asList in project kafka by apache.
the class KafkaAdminClientTest method testListOffsetsMaxTimestampUnsupportedMultipleOffsetSpec.
@Test
public void testListOffsetsMaxTimestampUnsupportedMultipleOffsetSpec() throws Exception {
Node node = new Node(0, "localhost", 8120);
List<Node> nodes = Collections.singletonList(node);
List<PartitionInfo> pInfos = new ArrayList<>();
pInfos.add(new PartitionInfo("foo", 0, node, new Node[] { node }, new Node[] { node }));
pInfos.add(new PartitionInfo("foo", 1, node, new Node[] { node }, new Node[] { node }));
final Cluster cluster = new Cluster("mockClusterId", nodes, pInfos, Collections.emptySet(), Collections.emptySet(), node);
final TopicPartition tp0 = new TopicPartition("foo", 0);
final TopicPartition tp1 = new TopicPartition("foo", 1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster, AdminClientConfig.RETRIES_CONFIG, "2")) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(ApiKeys.LIST_OFFSETS.id, (short) 0, (short) 6));
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
// listoffsets response from broker 0
env.kafkaClient().prepareUnsupportedVersionResponse(request -> request instanceof ListOffsetsRequest);
ListOffsetsTopicResponse topicResponse = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.NONE, -1L, 345L, 543);
ListOffsetsResponseData responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(topicResponse));
env.kafkaClient().prepareResponseFrom(// ensure that no max timestamp requests are retried
request -> request instanceof ListOffsetsRequest && ((ListOffsetsRequest) request).topics().stream().flatMap(t -> t.partitions().stream()).noneMatch(p -> p.timestamp() == ListOffsetsRequest.MAX_TIMESTAMP), new ListOffsetsResponse(responseData), node);
ListOffsetsResult result = env.adminClient().listOffsets(new HashMap<TopicPartition, OffsetSpec>() {
{
put(tp0, OffsetSpec.maxTimestamp());
put(tp1, OffsetSpec.latest());
}
});
TestUtils.assertFutureThrows(result.partitionResult(tp0), UnsupportedVersionException.class);
ListOffsetsResultInfo tp1Offset = result.partitionResult(tp1).get();
assertEquals(345L, tp1Offset.offset());
assertEquals(543, tp1Offset.leaderEpoch().get().intValue());
assertEquals(-1L, tp1Offset.timestamp());
}
}
use of java.util.Arrays.asList in project kafka by apache.
the class FetcherTest method testReadCommittedWithCompactedTopic.
@Test
public void testReadCommittedWithCompactedTopic() {
buildFetcher(OffsetResetStrategy.EARLIEST, new StringDeserializer(), new StringDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
ByteBuffer buffer = ByteBuffer.allocate(1024);
long pid1 = 1L;
long pid2 = 2L;
long pid3 = 3L;
appendTransactionalRecords(buffer, pid3, 3L, new SimpleRecord("3".getBytes(), "value".getBytes()), new SimpleRecord("4".getBytes(), "value".getBytes()));
appendTransactionalRecords(buffer, pid2, 15L, new SimpleRecord("15".getBytes(), "value".getBytes()), new SimpleRecord("16".getBytes(), "value".getBytes()), new SimpleRecord("17".getBytes(), "value".getBytes()));
appendTransactionalRecords(buffer, pid1, 22L, new SimpleRecord("22".getBytes(), "value".getBytes()), new SimpleRecord("23".getBytes(), "value".getBytes()));
abortTransaction(buffer, pid2, 28L);
appendTransactionalRecords(buffer, pid3, 30L, new SimpleRecord("30".getBytes(), "value".getBytes()), new SimpleRecord("31".getBytes(), "value".getBytes()), new SimpleRecord("32".getBytes(), "value".getBytes()));
commitTransaction(buffer, pid3, 35L);
appendTransactionalRecords(buffer, pid1, 39L, new SimpleRecord("39".getBytes(), "value".getBytes()), new SimpleRecord("40".getBytes(), "value".getBytes()));
// transaction from pid1 is aborted, but the marker is not included in the fetch
buffer.flip();
// send the fetch
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
assertEquals(1, fetcher.sendFetches());
// prepare the response. the aborted transactions begin at offsets which are no longer in the log
List<FetchResponseData.AbortedTransaction> abortedTransactions = Arrays.asList(new FetchResponseData.AbortedTransaction().setProducerId(pid2).setFirstOffset(6), new FetchResponseData.AbortedTransaction().setProducerId(pid1).setFirstOffset(0));
client.prepareResponse(fullFetchResponseWithAbortedTransactions(MemoryRecords.readableRecords(buffer), abortedTransactions, Errors.NONE, 100L, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<String, String>>> allFetchedRecords = fetchedRecords();
assertTrue(allFetchedRecords.containsKey(tp0));
List<ConsumerRecord<String, String>> fetchedRecords = allFetchedRecords.get(tp0);
assertEquals(5, fetchedRecords.size());
assertEquals(Arrays.asList(3L, 4L, 30L, 31L, 32L), collectRecordOffsets(fetchedRecords));
}
use of java.util.Arrays.asList in project kafka by apache.
the class FetcherTest method testConsumingViaIncrementalFetchRequests.
@Test
public void testConsumingViaIncrementalFetchRequests() {
buildFetcher(2);
List<ConsumerRecord<byte[], byte[]>> records;
assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1)));
subscriptions.seekValidated(tp0, new SubscriptionState.FetchPosition(0, Optional.empty(), metadata.currentLeader(tp0)));
subscriptions.seekValidated(tp1, new SubscriptionState.FetchPosition(1, Optional.empty(), metadata.currentLeader(tp1)));
// Fetch some records and establish an incremental fetch session.
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> partitions1 = new LinkedHashMap<>();
partitions1.put(tidp0, new FetchResponseData.PartitionData().setPartitionIndex(tp0.partition()).setHighWatermark(2).setLastStableOffset(2).setLogStartOffset(0).setRecords(this.records));
partitions1.put(tidp1, new FetchResponseData.PartitionData().setPartitionIndex(tp1.partition()).setHighWatermark(100).setLogStartOffset(0).setRecords(emptyRecords));
FetchResponse resp1 = FetchResponse.of(Errors.NONE, 0, 123, partitions1);
client.prepareResponse(resp1);
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
assertFalse(fetchedRecords.containsKey(tp1));
records = fetchedRecords.get(tp0);
assertEquals(2, records.size());
assertEquals(3L, subscriptions.position(tp0).offset);
assertEquals(1L, subscriptions.position(tp1).offset);
assertEquals(1, records.get(0).offset());
assertEquals(2, records.get(1).offset());
// There is still a buffered record.
assertEquals(0, fetcher.sendFetches());
fetchedRecords = fetchedRecords();
assertFalse(fetchedRecords.containsKey(tp1));
records = fetchedRecords.get(tp0);
assertEquals(1, records.size());
assertEquals(3, records.get(0).offset());
assertEquals(4L, subscriptions.position(tp0).offset);
// The second response contains no new records.
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> partitions2 = new LinkedHashMap<>();
FetchResponse resp2 = FetchResponse.of(Errors.NONE, 0, 123, partitions2);
client.prepareResponse(resp2);
assertEquals(1, fetcher.sendFetches());
consumerClient.poll(time.timer(0));
fetchedRecords = fetchedRecords();
assertTrue(fetchedRecords.isEmpty());
assertEquals(4L, subscriptions.position(tp0).offset);
assertEquals(1L, subscriptions.position(tp1).offset);
// The third response contains some new records for tp0.
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> partitions3 = new LinkedHashMap<>();
partitions3.put(tidp0, new FetchResponseData.PartitionData().setPartitionIndex(tp0.partition()).setHighWatermark(100).setLastStableOffset(4).setLogStartOffset(0).setRecords(this.nextRecords));
FetchResponse resp3 = FetchResponse.of(Errors.NONE, 0, 123, partitions3);
client.prepareResponse(resp3);
assertEquals(1, fetcher.sendFetches());
consumerClient.poll(time.timer(0));
fetchedRecords = fetchedRecords();
assertFalse(fetchedRecords.containsKey(tp1));
records = fetchedRecords.get(tp0);
assertEquals(2, records.size());
assertEquals(6L, subscriptions.position(tp0).offset);
assertEquals(1L, subscriptions.position(tp1).offset);
assertEquals(4, records.get(0).offset());
assertEquals(5, records.get(1).offset());
}
use of java.util.Arrays.asList in project ignite by apache.
the class MetricCommand method execute.
/**
* {@inheritDoc}
*/
@Override
public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception {
try {
Map<String, ?> res;
try (GridClient client = Command.startClient(clientCfg)) {
res = executeTaskByNameOnNode(client, VisorMetricTask.class.getName(), taskArg, nodeId, clientCfg);
}
if (res != null) {
List<List<?>> data = res.entrySet().stream().map(entry -> Arrays.asList(entry.getKey(), entry.getValue())).collect(Collectors.toList());
printTable(asList("metric", "value"), asList(STRING, STRING), data, log);
} else
log.info("No metric with specified name was found [name=" + taskArg.name() + "]");
return res;
} catch (Throwable e) {
log.severe("Failed to perform operation.");
log.severe(CommandLogger.errorMessage(e));
throw e;
}
}
use of java.util.Arrays.asList in project drools by kiegroup.
the class AccumulateMvelDialectTest method testAccFunctionOpaqueJoins.
// This is unsupported as the declared type Data is loosely typed
private void testAccFunctionOpaqueJoins(final PropertySpecificOption propertySpecificOption) {
final String drl = "package org.test; " + "import java.util.*; " + "global List list; " + "global List list2; " + "declare Tick " + " tick : int " + "end " + "declare Data " + " values : List " + " bias : int = 0 " + "end " + "rule Init " + "when " + "then " + " insert( new Data( Arrays.asList( 1, 2, 3 ), 1 ) ); " + " insert( new Data( Arrays.asList( 4, 5, 6 ), 2 ) ); " + " insert( new Tick( 0 ) );" + "end " + "rule Update " + " no-loop " + "when " + " $i : Integer() " + " $t : Tick() " + "then " + " System.out.println( 'Set tick to ' + $i ); " + " modify( $t ) { " + " setTick( $i ); " + " } " + "end " + "rule M " + " dialect 'mvel' " + "when " + " Tick( $index : tick ) " + " accumulate ( $data : Data( $bias : bias )," + " $tot : sum( ((Integer) $data.values[ $index ]) + $bias ) ) " + "then " + " System.out.println( $tot + ' for J ' + $index ); " + " list.add( $tot.intValue() ); " + "end " + "rule J " + "when " + " Tick( $index : tick ) " + " accumulate ( $data : Data( $bias : bias )," + " $tot : sum( ((Integer)$data.getValues().get( $index )) + $bias ) ) " + "then " + " System.out.println( $tot + ' for M ' + $index ); " + " list2.add( $tot.intValue() ); " + "end ";
final ReleaseId releaseId1 = KieServices.get().newReleaseId("org.kie", "accumulate-test", "1");
final Map<String, String> kieModuleConfigurationProperties = new HashMap<>();
kieModuleConfigurationProperties.put(PropertySpecificOption.PROPERTY_NAME, propertySpecificOption.toString());
final KieModule kieModule = KieUtil.getKieModuleFromDrls(releaseId1, kieBaseTestConfiguration, KieSessionTestConfiguration.STATEFUL_REALTIME, kieModuleConfigurationProperties, drl);
final KieContainer kieContainer = KieServices.get().newKieContainer(kieModule.getReleaseId());
final KieBase kbase = kieContainer.getKieBase();
final KieSession ks = kbase.newKieSession();
try {
final List list = new ArrayList();
ks.setGlobal("list", list);
final List list2 = new ArrayList();
ks.setGlobal("list2", list2);
// init data
ks.fireAllRules();
assertEquals(Collections.singletonList(8), list);
assertEquals(Collections.singletonList(8), list2);
ks.insert(1);
ks.fireAllRules();
assertEquals(asList(8, 10), list);
assertEquals(asList(8, 10), list2);
ks.insert(2);
ks.fireAllRules();
assertEquals(asList(8, 10, 12), list);
assertEquals(asList(8, 10, 12), list2);
} finally {
ks.dispose();
}
}
Aggregations