Search in sources :

Example 76 with Collection

use of java.util.Collection in project kafka by apache.

the class StreamThreadTest method testPartitionAssignmentChange.

@SuppressWarnings("unchecked")
@Test
public void testPartitionAssignmentChange() throws Exception {
    StreamsConfig config = new StreamsConfig(configProps());
    StateListenerStub stateListener = new StateListenerStub();
    TopologyBuilder builder = new TopologyBuilder().setApplicationId("X");
    builder.addSource("source1", "topic1");
    builder.addSource("source2", "topic2");
    builder.addSource("source3", "topic3");
    builder.addProcessor("processor", new MockProcessorSupplier(), "source2", "source3");
    MockClientSupplier mockClientSupplier = new MockClientSupplier();
    StreamThread thread = new StreamThread(builder, config, mockClientSupplier, applicationId, clientId, processId, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0) {

        @Override
        protected StreamTask createStreamTask(TaskId id, Collection<TopicPartition> partitionsForTask) {
            ProcessorTopology topology = builder.build(id.topicGroupId);
            return new TestStreamTask(id, applicationId, partitionsForTask, topology, consumer, producer, restoreConsumer, config, new MockStreamsMetrics(new Metrics()), stateDirectory);
        }
    };
    thread.setStateListener(stateListener);
    assertEquals(thread.state(), StreamThread.State.RUNNING);
    initPartitionGrouper(config, thread, mockClientSupplier);
    ConsumerRebalanceListener rebalanceListener = thread.rebalanceListener;
    assertTrue(thread.tasks().isEmpty());
    List<TopicPartition> revokedPartitions;
    List<TopicPartition> assignedPartitions;
    Set<TopicPartition> expectedGroup1;
    Set<TopicPartition> expectedGroup2;
    revokedPartitions = Collections.emptyList();
    assignedPartitions = Collections.singletonList(t1p1);
    expectedGroup1 = new HashSet<>(Arrays.asList(t1p1));
    rebalanceListener.onPartitionsRevoked(revokedPartitions);
    assertEquals(thread.state(), StreamThread.State.PARTITIONS_REVOKED);
    Assert.assertEquals(stateListener.numChanges, 1);
    Assert.assertEquals(stateListener.oldState, StreamThread.State.RUNNING);
    Assert.assertEquals(stateListener.newState, StreamThread.State.PARTITIONS_REVOKED);
    rebalanceListener.onPartitionsAssigned(assignedPartitions);
    assertEquals(thread.state(), StreamThread.State.RUNNING);
    Assert.assertEquals(stateListener.numChanges, 3);
    Assert.assertEquals(stateListener.oldState, StreamThread.State.ASSIGNING_PARTITIONS);
    Assert.assertEquals(stateListener.newState, StreamThread.State.RUNNING);
    assertTrue(thread.tasks().containsKey(task1));
    assertEquals(expectedGroup1, thread.tasks().get(task1).partitions());
    assertEquals(1, thread.tasks().size());
    revokedPartitions = assignedPartitions;
    assignedPartitions = Collections.singletonList(t1p2);
    expectedGroup2 = new HashSet<>(Arrays.asList(t1p2));
    rebalanceListener.onPartitionsRevoked(revokedPartitions);
    assertFalse(thread.tasks().containsKey(task1));
    assertEquals(0, thread.tasks().size());
    rebalanceListener.onPartitionsAssigned(assignedPartitions);
    assertTrue(thread.tasks().containsKey(task2));
    assertEquals(expectedGroup2, thread.tasks().get(task2).partitions());
    assertEquals(1, thread.tasks().size());
    revokedPartitions = assignedPartitions;
    assignedPartitions = Arrays.asList(t1p1, t1p2);
    expectedGroup1 = new HashSet<>(Collections.singleton(t1p1));
    expectedGroup2 = new HashSet<>(Collections.singleton(t1p2));
    rebalanceListener.onPartitionsRevoked(revokedPartitions);
    rebalanceListener.onPartitionsAssigned(assignedPartitions);
    assertTrue(thread.tasks().containsKey(task1));
    assertTrue(thread.tasks().containsKey(task2));
    assertEquals(expectedGroup1, thread.tasks().get(task1).partitions());
    assertEquals(expectedGroup2, thread.tasks().get(task2).partitions());
    assertEquals(2, thread.tasks().size());
    revokedPartitions = assignedPartitions;
    assignedPartitions = Arrays.asList(t2p1, t2p2, t3p1, t3p2);
    expectedGroup1 = new HashSet<>(Arrays.asList(t2p1, t3p1));
    expectedGroup2 = new HashSet<>(Arrays.asList(t2p2, t3p2));
    rebalanceListener.onPartitionsRevoked(revokedPartitions);
    rebalanceListener.onPartitionsAssigned(assignedPartitions);
    assertTrue(thread.tasks().containsKey(task4));
    assertTrue(thread.tasks().containsKey(task5));
    assertEquals(expectedGroup1, thread.tasks().get(task4).partitions());
    assertEquals(expectedGroup2, thread.tasks().get(task5).partitions());
    assertEquals(2, thread.tasks().size());
    revokedPartitions = assignedPartitions;
    assignedPartitions = Arrays.asList(t1p1, t2p1, t3p1);
    expectedGroup1 = new HashSet<>(Arrays.asList(t1p1));
    expectedGroup2 = new HashSet<>(Arrays.asList(t2p1, t3p1));
    rebalanceListener.onPartitionsRevoked(revokedPartitions);
    rebalanceListener.onPartitionsAssigned(assignedPartitions);
    assertTrue(thread.tasks().containsKey(task1));
    assertTrue(thread.tasks().containsKey(task4));
    assertEquals(expectedGroup1, thread.tasks().get(task1).partitions());
    assertEquals(expectedGroup2, thread.tasks().get(task4).partitions());
    assertEquals(2, thread.tasks().size());
    revokedPartitions = assignedPartitions;
    assignedPartitions = Arrays.asList(t1p1, t2p1, t3p1);
    expectedGroup1 = new HashSet<>(Arrays.asList(t1p1));
    expectedGroup2 = new HashSet<>(Arrays.asList(t2p1, t3p1));
    rebalanceListener.onPartitionsRevoked(revokedPartitions);
    rebalanceListener.onPartitionsAssigned(assignedPartitions);
    assertTrue(thread.tasks().containsKey(task1));
    assertTrue(thread.tasks().containsKey(task4));
    assertEquals(expectedGroup1, thread.tasks().get(task1).partitions());
    assertEquals(expectedGroup2, thread.tasks().get(task4).partitions());
    assertEquals(2, thread.tasks().size());
    revokedPartitions = assignedPartitions;
    assignedPartitions = Collections.emptyList();
    rebalanceListener.onPartitionsRevoked(revokedPartitions);
    rebalanceListener.onPartitionsAssigned(assignedPartitions);
    assertTrue(thread.tasks().isEmpty());
    thread.close();
    assertTrue((thread.state() == StreamThread.State.PENDING_SHUTDOWN) || (thread.state() == StreamThread.State.NOT_RUNNING));
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) TopologyBuilder(org.apache.kafka.streams.processor.TopologyBuilder) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) Metrics(org.apache.kafka.common.metrics.Metrics) StreamsMetrics(org.apache.kafka.streams.StreamsMetrics) MockProcessorSupplier(org.apache.kafka.test.MockProcessorSupplier) MockClientSupplier(org.apache.kafka.test.MockClientSupplier) TopicPartition(org.apache.kafka.common.TopicPartition) Collection(java.util.Collection) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 77 with Collection

use of java.util.Collection in project storm by apache.

the class ThroughputVsLatency method main.

public static void main(String[] args) throws Exception {
    long ratePerSecond = 500;
    if (args != null && args.length > 0) {
        ratePerSecond = Long.valueOf(args[0]);
    }
    int parallelism = 4;
    if (args != null && args.length > 1) {
        parallelism = Integer.valueOf(args[1]);
    }
    int numMins = 5;
    if (args != null && args.length > 2) {
        numMins = Integer.valueOf(args[2]);
    }
    String name = "wc-test";
    if (args != null && args.length > 3) {
        name = args[3];
    }
    Config conf = new Config();
    HttpForwardingMetricsServer metricServer = new HttpForwardingMetricsServer(conf) {

        @Override
        public void handle(TaskInfo taskInfo, Collection<DataPoint> dataPoints) {
            String worker = taskInfo.srcWorkerHost + ":" + taskInfo.srcWorkerPort;
            for (DataPoint dp : dataPoints) {
                if ("comp-lat-histo".equals(dp.name) && dp.value instanceof Histogram) {
                    synchronized (_histo) {
                        _histo.add((Histogram) dp.value);
                    }
                } else if ("CPU".equals(dp.name) && dp.value instanceof Map) {
                    Map<Object, Object> m = (Map<Object, Object>) dp.value;
                    Object sys = m.get("sys-ms");
                    if (sys instanceof Number) {
                        _systemCPU.getAndAdd(((Number) sys).longValue());
                    }
                    Object user = m.get("user-ms");
                    if (user instanceof Number) {
                        _userCPU.getAndAdd(((Number) user).longValue());
                    }
                } else if (dp.name.startsWith("GC/") && dp.value instanceof Map) {
                    Map<Object, Object> m = (Map<Object, Object>) dp.value;
                    Object count = m.get("count");
                    if (count instanceof Number) {
                        _gcCount.getAndAdd(((Number) count).longValue());
                    }
                    Object time = m.get("timeMs");
                    if (time instanceof Number) {
                        _gcMs.getAndAdd(((Number) time).longValue());
                    }
                } else if (dp.name.startsWith("memory/") && dp.value instanceof Map) {
                    Map<Object, Object> m = (Map<Object, Object>) dp.value;
                    Object val = m.get("usedBytes");
                    if (val instanceof Number) {
                        MemMeasure mm = _memoryBytes.get(worker);
                        if (mm == null) {
                            mm = new MemMeasure();
                            MemMeasure tmp = _memoryBytes.putIfAbsent(worker, mm);
                            mm = tmp == null ? mm : tmp;
                        }
                        mm.update(((Number) val).longValue());
                    }
                }
            }
        }
    };
    metricServer.serve();
    String url = metricServer.getUrl();
    C cluster = new C(conf);
    conf.setNumWorkers(parallelism);
    conf.registerMetricsConsumer(org.apache.storm.metric.LoggingMetricsConsumer.class);
    conf.registerMetricsConsumer(org.apache.storm.metric.HttpForwardingMetricsConsumer.class, url, 1);
    Map<String, String> workerMetrics = new HashMap<String, String>();
    if (!cluster.isLocal()) {
        //sigar uses JNI and does not work in local mode
        workerMetrics.put("CPU", "org.apache.storm.metrics.sigar.CPUMetric");
    }
    conf.put(Config.TOPOLOGY_WORKER_METRICS, workerMetrics);
    conf.put(Config.TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS, 10);
    conf.put(Config.TOPOLOGY_WORKER_GC_CHILDOPTS, "-XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:NewSize=128m -XX:CMSInitiatingOccupancyFraction=70 -XX:-CMSConcurrentMTEnabled");
    conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-Xmx2g");
    TopologyBuilder builder = new TopologyBuilder();
    int numEach = 4 * parallelism;
    builder.setSpout("spout", new FastRandomSentenceSpout(ratePerSecond / numEach), numEach);
    builder.setBolt("split", new SplitSentence(), numEach).shuffleGrouping("spout");
    builder.setBolt("count", new WordCount(), numEach).fieldsGrouping("split", new Fields("word"));
    try {
        cluster.submitTopology(name, conf, builder.createTopology());
        for (int i = 0; i < numMins * 2; i++) {
            Thread.sleep(30 * 1000);
            printMetrics(cluster, name);
        }
    } finally {
        kill(cluster, name);
    }
    System.exit(0);
}
Also used : HttpForwardingMetricsServer(org.apache.storm.metric.HttpForwardingMetricsServer) Histogram(org.HdrHistogram.Histogram) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) TopologyBuilder(org.apache.storm.topology.TopologyBuilder) Config(org.apache.storm.Config) DataPoint(org.apache.storm.metric.api.IMetricsConsumer.DataPoint) TaskInfo(org.apache.storm.metric.api.IMetricsConsumer.TaskInfo) Fields(org.apache.storm.tuple.Fields) DataPoint(org.apache.storm.metric.api.IMetricsConsumer.DataPoint) Collection(java.util.Collection) HashMap(java.util.HashMap) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Example 78 with Collection

use of java.util.Collection in project storm by apache.

the class Nimbus method mkAssignments.

private void mkAssignments(String scratchTopoId) throws Exception {
    if (!isLeader()) {
        LOG.info("not a leader, skipping assignments");
        return;
    }
    // get existing assignment (just the topologyToExecutorToNodePort map) -> default to {}
    // filter out ones which have a executor timeout
    // figure out available slots on cluster. add to that the used valid slots to get total slots. figure out how many executors should be in each slot (e.g., 4, 4, 4, 5)
    // only keep existing slots that satisfy one of those slots. for rest, reassign them across remaining slots
    // edge case for slots with no executor timeout but with supervisor timeout... just treat these as valid slots that can be reassigned to. worst comes to worse the executor will timeout and won't assign here next time around
    IStormClusterState state = stormClusterState;
    //read all the topologies
    Map<String, StormBase> bases;
    Map<String, TopologyDetails> tds = new HashMap<>();
    synchronized (submitLock) {
        bases = state.topologyBases();
        for (Iterator<Entry<String, StormBase>> it = bases.entrySet().iterator(); it.hasNext(); ) {
            Entry<String, StormBase> entry = it.next();
            String id = entry.getKey();
            try {
                tds.put(id, readTopologyDetails(id, entry.getValue()));
            } catch (KeyNotFoundException e) {
                //A race happened and it is probably not running
                it.remove();
            }
        }
    }
    Topologies topologies = new Topologies(tds);
    List<String> assignedTopologyIds = state.assignments(null);
    Map<String, Assignment> existingAssignments = new HashMap<>();
    for (String id : assignedTopologyIds) {
        // will be treated as free slot in the scheduler code.
        if (!id.equals(scratchTopoId)) {
            existingAssignments.put(id, state.assignmentInfo(id, null));
        }
    }
    // make the new assignments for topologies
    Map<String, SchedulerAssignment> newSchedulerAssignments = null;
    synchronized (schedLock) {
        newSchedulerAssignments = computeNewSchedulerAssignments(existingAssignments, topologies, bases, scratchTopoId);
        Map<String, Map<List<Long>, List<Object>>> topologyToExecutorToNodePort = computeNewTopoToExecToNodePort(newSchedulerAssignments, existingAssignments);
        for (String id : assignedTopologyIds) {
            if (!topologyToExecutorToNodePort.containsKey(id)) {
                topologyToExecutorToNodePort.put(id, null);
            }
        }
        Map<String, Map<List<Object>, List<Double>>> newAssignedWorkerToResources = computeTopoToNodePortToResources(newSchedulerAssignments);
        int nowSecs = Time.currentTimeSecs();
        Map<String, SupervisorDetails> basicSupervisorDetailsMap = basicSupervisorDetailsMap(state);
        //construct the final Assignments by adding start-times etc into it
        Map<String, Assignment> newAssignments = new HashMap<>();
        for (Entry<String, Map<List<Long>, List<Object>>> entry : topologyToExecutorToNodePort.entrySet()) {
            String topoId = entry.getKey();
            Map<List<Long>, List<Object>> execToNodePort = entry.getValue();
            Assignment existingAssignment = existingAssignments.get(topoId);
            Set<String> allNodes = new HashSet<>();
            if (execToNodePort != null) {
                for (List<Object> nodePort : execToNodePort.values()) {
                    allNodes.add((String) nodePort.get(0));
                }
            }
            Map<String, String> allNodeHost = new HashMap<>();
            if (existingAssignment != null) {
                allNodeHost.putAll(existingAssignment.get_node_host());
            }
            for (String node : allNodes) {
                String host = inimbus.getHostName(basicSupervisorDetailsMap, node);
                if (host != null) {
                    allNodeHost.put(node, host);
                }
            }
            Map<List<Long>, NodeInfo> execNodeInfo = null;
            if (existingAssignment != null) {
                execNodeInfo = existingAssignment.get_executor_node_port();
            }
            List<List<Long>> reassignExecutors = changedExecutors(execNodeInfo, execToNodePort);
            Map<List<Long>, Long> startTimes = new HashMap<>();
            if (existingAssignment != null) {
                startTimes.putAll(existingAssignment.get_executor_start_time_secs());
            }
            for (List<Long> id : reassignExecutors) {
                startTimes.put(id, (long) nowSecs);
            }
            Map<List<Object>, List<Double>> workerToResources = newAssignedWorkerToResources.get(topoId);
            Assignment newAssignment = new Assignment((String) conf.get(Config.STORM_LOCAL_DIR));
            Map<String, String> justAssignedKeys = new HashMap<>(allNodeHost);
            //Modifies justAssignedKeys
            justAssignedKeys.keySet().retainAll(allNodes);
            newAssignment.set_node_host(justAssignedKeys);
            //convert NodePort to NodeInfo (again!!!).
            Map<List<Long>, NodeInfo> execToNodeInfo = new HashMap<>();
            for (Entry<List<Long>, List<Object>> execAndNodePort : execToNodePort.entrySet()) {
                List<Object> nodePort = execAndNodePort.getValue();
                NodeInfo ni = new NodeInfo();
                ni.set_node((String) nodePort.get(0));
                ni.add_to_port((Long) nodePort.get(1));
                execToNodeInfo.put(execAndNodePort.getKey(), ni);
            }
            newAssignment.set_executor_node_port(execToNodeInfo);
            newAssignment.set_executor_start_time_secs(startTimes);
            //do another conversion (lets just make this all common)
            Map<NodeInfo, WorkerResources> workerResources = new HashMap<>();
            for (Entry<List<Object>, List<Double>> wr : workerToResources.entrySet()) {
                List<Object> nodePort = wr.getKey();
                NodeInfo ni = new NodeInfo();
                ni.set_node((String) nodePort.get(0));
                ni.add_to_port((Long) nodePort.get(1));
                List<Double> r = wr.getValue();
                WorkerResources resources = new WorkerResources();
                resources.set_mem_on_heap(r.get(0));
                resources.set_mem_off_heap(r.get(1));
                resources.set_cpu(r.get(2));
                workerResources.put(ni, resources);
            }
            newAssignment.set_worker_resources(workerResources);
            newAssignments.put(topoId, newAssignment);
        }
        if (!newAssignments.equals(existingAssignments)) {
            LOG.debug("RESETTING id->resources and id->worker-resources cache!");
            idToResources.set(new HashMap<>());
            idToWorkerResources.set(new HashMap<>());
        }
        // only log/set when there's been a change to the assignment
        for (Entry<String, Assignment> entry : newAssignments.entrySet()) {
            String topoId = entry.getKey();
            Assignment assignment = entry.getValue();
            Assignment existingAssignment = existingAssignments.get(topoId);
            //NOT Used TopologyDetails topologyDetails = topologies.getById(topoId);
            if (assignment.equals(existingAssignment)) {
                LOG.debug("Assignment for {} hasn't changed", topoId);
            } else {
                LOG.info("Setting new assignment for topology id {}: {}", topoId, assignment);
                state.setAssignment(topoId, assignment);
            }
        }
        Map<String, Collection<WorkerSlot>> addedSlots = new HashMap<>();
        for (Entry<String, Assignment> entry : newAssignments.entrySet()) {
            String topoId = entry.getKey();
            Assignment assignment = entry.getValue();
            Assignment existingAssignment = existingAssignments.get(topoId);
            if (existingAssignment == null) {
                existingAssignment = new Assignment();
                existingAssignment.set_executor_node_port(new HashMap<>());
                existingAssignment.set_executor_start_time_secs(new HashMap<>());
            }
            Set<WorkerSlot> newSlots = newlyAddedSlots(existingAssignment, assignment);
            addedSlots.put(topoId, newSlots);
        }
        inimbus.assignSlots(topologies, addedSlots);
    }
}
Also used : HashMap(java.util.HashMap) StormBase(org.apache.storm.generated.StormBase) Assignment(org.apache.storm.generated.Assignment) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) ArrayList(java.util.ArrayList) List(java.util.List) IStormClusterState(org.apache.storm.cluster.IStormClusterState) HashSet(java.util.HashSet) WorkerResources(org.apache.storm.generated.WorkerResources) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) NodeInfo(org.apache.storm.generated.NodeInfo) AtomicLong(java.util.concurrent.atomic.AtomicLong) Collection(java.util.Collection) Map(java.util.Map) TimeCacheMap(org.apache.storm.utils.TimeCacheMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) Entry(java.util.Map.Entry) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) DataPoint(org.apache.storm.metric.api.DataPoint) KeyNotFoundException(org.apache.storm.generated.KeyNotFoundException)

Example 79 with Collection

use of java.util.Collection in project storm by apache.

the class Nimbus method makeClusterMetricsConsumerExecutors.

@SuppressWarnings("unchecked")
private static List<ClusterMetricsConsumerExecutor> makeClusterMetricsConsumerExecutors(Map<String, Object> conf) {
    Collection<Map<String, Object>> consumers = (Collection<Map<String, Object>>) conf.get(Config.STORM_CLUSTER_METRICS_CONSUMER_REGISTER);
    List<ClusterMetricsConsumerExecutor> ret = new ArrayList<>();
    if (consumers != null) {
        for (Map<String, Object> consumer : consumers) {
            ret.add(new ClusterMetricsConsumerExecutor((String) consumer.get("class"), consumer.get("argument")));
        }
    }
    return ret;
}
Also used : ArrayList(java.util.ArrayList) Collection(java.util.Collection) ClusterMetricsConsumerExecutor(org.apache.storm.metric.ClusterMetricsConsumerExecutor) Map(java.util.Map) TimeCacheMap(org.apache.storm.utils.TimeCacheMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap)

Example 80 with Collection

use of java.util.Collection in project kafka by apache.

the class DistributedHerderTest method testAccessors.

@Test
public void testAccessors() throws Exception {
    EasyMock.expect(member.memberId()).andStubReturn("leader");
    expectRebalance(1, Collections.<String>emptyList(), Collections.<ConnectorTaskId>emptyList());
    expectPostRebalanceCatchup(SNAPSHOT);
    member.wakeup();
    PowerMock.expectLastCall().anyTimes();
    // list connectors, get connector info, get connector config, get task configs
    member.poll(EasyMock.anyInt());
    PowerMock.expectLastCall();
    PowerMock.replayAll();
    FutureCallback<Collection<String>> listConnectorsCb = new FutureCallback<>();
    herder.connectors(listConnectorsCb);
    FutureCallback<ConnectorInfo> connectorInfoCb = new FutureCallback<>();
    herder.connectorInfo(CONN1, connectorInfoCb);
    FutureCallback<Map<String, String>> connectorConfigCb = new FutureCallback<>();
    herder.connectorConfig(CONN1, connectorConfigCb);
    FutureCallback<List<TaskInfo>> taskConfigsCb = new FutureCallback<>();
    herder.taskConfigs(CONN1, taskConfigsCb);
    herder.tick();
    assertTrue(listConnectorsCb.isDone());
    assertEquals(Collections.singleton(CONN1), listConnectorsCb.get());
    assertTrue(connectorInfoCb.isDone());
    ConnectorInfo info = new ConnectorInfo(CONN1, CONN1_CONFIG, Arrays.asList(TASK0, TASK1, TASK2));
    assertEquals(info, connectorInfoCb.get());
    assertTrue(connectorConfigCb.isDone());
    assertEquals(CONN1_CONFIG, connectorConfigCb.get());
    assertTrue(taskConfigsCb.isDone());
    assertEquals(Arrays.asList(new TaskInfo(TASK0, TASK_CONFIG), new TaskInfo(TASK1, TASK_CONFIG), new TaskInfo(TASK2, TASK_CONFIG)), taskConfigsCb.get());
    PowerMock.verifyAll();
}
Also used : TaskInfo(org.apache.kafka.connect.runtime.rest.entities.TaskInfo) ConnectorInfo(org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo) Collection(java.util.Collection) Collections.singletonList(java.util.Collections.singletonList) List(java.util.List) ArrayList(java.util.ArrayList) Map(java.util.Map) HashMap(java.util.HashMap) FutureCallback(org.apache.kafka.connect.util.FutureCallback) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Aggregations

Collection (java.util.Collection)2848 ArrayList (java.util.ArrayList)801 Map (java.util.Map)581 Test (org.junit.Test)537 HashMap (java.util.HashMap)479 List (java.util.List)387 Iterator (java.util.Iterator)325 HashSet (java.util.HashSet)279 IOException (java.io.IOException)258 Set (java.util.Set)250 File (java.io.File)114 Collectors (java.util.stream.Collectors)95 LinkedHashMap (java.util.LinkedHashMap)90 LinkedList (java.util.LinkedList)82 Test (org.testng.annotations.Test)78 NotNull (org.jetbrains.annotations.NotNull)75 Region (org.apache.geode.cache.Region)71 Collections (java.util.Collections)67 Field (java.lang.reflect.Field)65 Logger (org.slf4j.Logger)63