Search in sources :

Example 16 with Set

use of java.util.Set in project flink by apache.

the class InternalWindowFunctionTest method testInternalAggregateProcessWindowFunction.

@SuppressWarnings("unchecked")
@Test
public void testInternalAggregateProcessWindowFunction() throws Exception {
    AggregateProcessWindowFunctionMock mock = mock(AggregateProcessWindowFunctionMock.class);
    InternalAggregateProcessWindowFunction<Long, Set<Long>, Map<Long, Long>, String, Long, TimeWindow> windowFunction = new InternalAggregateProcessWindowFunction<>(new AggregateFunction<Long, Set<Long>, Map<Long, Long>>() {

        private static final long serialVersionUID = 1L;

        @Override
        public Set<Long> createAccumulator() {
            return new HashSet<>();
        }

        @Override
        public void add(Long value, Set<Long> accumulator) {
            accumulator.add(value);
        }

        @Override
        public Map<Long, Long> getResult(Set<Long> accumulator) {
            Map<Long, Long> result = new HashMap<>();
            for (Long in : accumulator) {
                result.put(in, in);
            }
            return result;
        }

        @Override
        public Set<Long> merge(Set<Long> a, Set<Long> b) {
            a.addAll(b);
            return a;
        }
    }, mock);
    // check setOutputType
    TypeInformation<String> stringType = BasicTypeInfo.STRING_TYPE_INFO;
    ExecutionConfig execConf = new ExecutionConfig();
    execConf.setParallelism(42);
    StreamingFunctionUtils.setOutputType(windowFunction, stringType, execConf);
    verify(mock).setOutputType(stringType, execConf);
    // check open
    Configuration config = new Configuration();
    windowFunction.open(config);
    verify(mock).open(config);
    // check setRuntimeContext
    RuntimeContext rCtx = mock(RuntimeContext.class);
    windowFunction.setRuntimeContext(rCtx);
    verify(mock).setRuntimeContext(rCtx);
    // check apply
    TimeWindow w = mock(TimeWindow.class);
    Collector<String> c = (Collector<String>) mock(Collector.class);
    List<Long> args = new LinkedList<>();
    args.add(23L);
    args.add(24L);
    windowFunction.apply(42L, w, args, c);
    verify(mock).process(eq(42L), (AggregateProcessWindowFunctionMock.Context) anyObject(), (Iterable) argThat(containsInAnyOrder(allOf(hasEntry(is(23L), is(23L)), hasEntry(is(24L), is(24L))))), eq(c));
    // check close
    windowFunction.close();
    verify(mock).close();
}
Also used : HashSet(java.util.HashSet) Set(java.util.Set) Configuration(org.apache.flink.configuration.Configuration) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) InternalAggregateProcessWindowFunction(org.apache.flink.streaming.runtime.operators.windowing.functions.InternalAggregateProcessWindowFunction) TimeWindow(org.apache.flink.streaming.api.windowing.windows.TimeWindow) LinkedList(java.util.LinkedList) Collector(org.apache.flink.util.Collector) RuntimeContext(org.apache.flink.api.common.functions.RuntimeContext) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.Test)

Example 17 with Set

use of java.util.Set in project flink by apache.

the class HeapInternalTimerServiceTest method testTimerAssignmentToKeyGroups.

@Test
public void testTimerAssignmentToKeyGroups() {
    int totalNoOfTimers = 100;
    int totalNoOfKeyGroups = 100;
    int startKeyGroupIdx = 0;
    // we have 0 to 99
    int endKeyGroupIdx = totalNoOfKeyGroups - 1;
    @SuppressWarnings("unchecked") Set<InternalTimer<Integer, String>>[] expectedNonEmptyTimerSets = new HashSet[totalNoOfKeyGroups];
    TestKeyContext keyContext = new TestKeyContext();
    HeapInternalTimerService<Integer, String> timerService = new HeapInternalTimerService<>(totalNoOfKeyGroups, new KeyGroupRange(startKeyGroupIdx, endKeyGroupIdx), keyContext, new TestProcessingTimeService());
    timerService.startTimerService(IntSerializer.INSTANCE, StringSerializer.INSTANCE, mock(Triggerable.class));
    for (int i = 0; i < totalNoOfTimers; i++) {
        // create the timer to be registered
        InternalTimer<Integer, String> timer = new InternalTimer<>(10 + i, i, "hello_world_" + i);
        int keyGroupIdx = KeyGroupRangeAssignment.assignToKeyGroup(timer.getKey(), totalNoOfKeyGroups);
        // add it in the adequate expected set of timers per keygroup
        Set<InternalTimer<Integer, String>> timerSet = expectedNonEmptyTimerSets[keyGroupIdx];
        if (timerSet == null) {
            timerSet = new HashSet<>();
            expectedNonEmptyTimerSets[keyGroupIdx] = timerSet;
        }
        timerSet.add(timer);
        // register the timer as both processing and event time one
        keyContext.setCurrentKey(timer.getKey());
        timerService.registerEventTimeTimer(timer.getNamespace(), timer.getTimestamp());
        timerService.registerProcessingTimeTimer(timer.getNamespace(), timer.getTimestamp());
    }
    Set<InternalTimer<Integer, String>>[] eventTimeTimers = timerService.getEventTimeTimersPerKeyGroup();
    Set<InternalTimer<Integer, String>>[] processingTimeTimers = timerService.getProcessingTimeTimersPerKeyGroup();
    // finally verify that the actual timers per key group sets are the expected ones.
    for (int i = 0; i < expectedNonEmptyTimerSets.length; i++) {
        Set<InternalTimer<Integer, String>> expected = expectedNonEmptyTimerSets[i];
        Set<InternalTimer<Integer, String>> actualEvent = eventTimeTimers[i];
        Set<InternalTimer<Integer, String>> actualProcessing = processingTimeTimers[i];
        if (expected == null) {
            Assert.assertNull(actualEvent);
            Assert.assertNull(actualProcessing);
        } else {
            Assert.assertArrayEquals(expected.toArray(), actualEvent.toArray());
            Assert.assertArrayEquals(expected.toArray(), actualProcessing.toArray());
        }
    }
}
Also used : HashSet(java.util.HashSet) Set(java.util.Set) KeyGroupRange(org.apache.flink.runtime.state.KeyGroupRange) TestProcessingTimeService(org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 18 with Set

use of java.util.Set in project flink by apache.

the class SingleInputNode method getAlternativePlans.

@Override
public List<PlanNode> getAlternativePlans(CostEstimator estimator) {
    // check if we have a cached version
    if (this.cachedPlans != null) {
        return this.cachedPlans;
    }
    boolean childrenSkippedDueToReplicatedInput = false;
    // calculate alternative sub-plans for predecessor
    final List<? extends PlanNode> subPlans = getPredecessorNode().getAlternativePlans(estimator);
    final Set<RequestedGlobalProperties> intGlobal = this.inConn.getInterestingProperties().getGlobalProperties();
    // calculate alternative sub-plans for broadcast inputs
    final List<Set<? extends NamedChannel>> broadcastPlanChannels = new ArrayList<Set<? extends NamedChannel>>();
    List<DagConnection> broadcastConnections = getBroadcastConnections();
    List<String> broadcastConnectionNames = getBroadcastConnectionNames();
    for (int i = 0; i < broadcastConnections.size(); i++) {
        DagConnection broadcastConnection = broadcastConnections.get(i);
        String broadcastConnectionName = broadcastConnectionNames.get(i);
        List<PlanNode> broadcastPlanCandidates = broadcastConnection.getSource().getAlternativePlans(estimator);
        // wrap the plan candidates in named channels
        HashSet<NamedChannel> broadcastChannels = new HashSet<NamedChannel>(broadcastPlanCandidates.size());
        for (PlanNode plan : broadcastPlanCandidates) {
            NamedChannel c = new NamedChannel(broadcastConnectionName, plan);
            DataExchangeMode exMode = DataExchangeMode.select(broadcastConnection.getDataExchangeMode(), ShipStrategyType.BROADCAST, broadcastConnection.isBreakingPipeline());
            c.setShipStrategy(ShipStrategyType.BROADCAST, exMode);
            broadcastChannels.add(c);
        }
        broadcastPlanChannels.add(broadcastChannels);
    }
    final RequestedGlobalProperties[] allValidGlobals;
    {
        Set<RequestedGlobalProperties> pairs = new HashSet<RequestedGlobalProperties>();
        for (OperatorDescriptorSingle ods : getPossibleProperties()) {
            pairs.addAll(ods.getPossibleGlobalProperties());
        }
        allValidGlobals = pairs.toArray(new RequestedGlobalProperties[pairs.size()]);
    }
    final ArrayList<PlanNode> outputPlans = new ArrayList<PlanNode>();
    final ExecutionMode executionMode = this.inConn.getDataExchangeMode();
    final int parallelism = getParallelism();
    final int inParallelism = getPredecessorNode().getParallelism();
    final boolean parallelismChange = inParallelism != parallelism;
    final boolean breaksPipeline = this.inConn.isBreakingPipeline();
    // create all candidates
    for (PlanNode child : subPlans) {
        if (child.getGlobalProperties().isFullyReplicated()) {
            // fully replicated input is always locally forwarded if the parallelism is not changed
            if (parallelismChange) {
                // can not continue with this child
                childrenSkippedDueToReplicatedInput = true;
                continue;
            } else {
                this.inConn.setShipStrategy(ShipStrategyType.FORWARD);
            }
        }
        if (this.inConn.getShipStrategy() == null) {
            // pick the strategy ourselves
            for (RequestedGlobalProperties igps : intGlobal) {
                final Channel c = new Channel(child, this.inConn.getMaterializationMode());
                igps.parameterizeChannel(c, parallelismChange, executionMode, breaksPipeline);
                // ship strategy preserves/establishes them even under changing parallelisms
                if (parallelismChange && !c.getShipStrategy().isNetworkStrategy()) {
                    c.getGlobalProperties().reset();
                }
                // requested properties
                for (RequestedGlobalProperties rgps : allValidGlobals) {
                    if (rgps.isMetBy(c.getGlobalProperties())) {
                        c.setRequiredGlobalProps(rgps);
                        addLocalCandidates(c, broadcastPlanChannels, igps, outputPlans, estimator);
                        break;
                    }
                }
            }
        } else {
            // hint fixed the strategy
            final Channel c = new Channel(child, this.inConn.getMaterializationMode());
            final ShipStrategyType shipStrategy = this.inConn.getShipStrategy();
            final DataExchangeMode exMode = DataExchangeMode.select(executionMode, shipStrategy, breaksPipeline);
            if (this.keys != null) {
                c.setShipStrategy(shipStrategy, this.keys.toFieldList(), exMode);
            } else {
                c.setShipStrategy(shipStrategy, exMode);
            }
            if (parallelismChange) {
                c.adjustGlobalPropertiesForFullParallelismChange();
            }
            // check whether we meet any of the accepted properties
            for (RequestedGlobalProperties rgps : allValidGlobals) {
                if (rgps.isMetBy(c.getGlobalProperties())) {
                    addLocalCandidates(c, broadcastPlanChannels, rgps, outputPlans, estimator);
                    break;
                }
            }
        }
    }
    if (outputPlans.isEmpty()) {
        if (childrenSkippedDueToReplicatedInput) {
            throw new CompilerException("No plan meeting the requirements could be created @ " + this + ". Most likely reason: Invalid use of replicated input.");
        } else {
            throw new CompilerException("No plan meeting the requirements could be created @ " + this + ". Most likely reason: Too restrictive plan hints.");
        }
    }
    // cost and prune the plans
    for (PlanNode node : outputPlans) {
        estimator.costOperator(node);
    }
    prunePlanAlternatives(outputPlans);
    outputPlans.trimToSize();
    this.cachedPlans = outputPlans;
    return outputPlans;
}
Also used : HashSet(java.util.HashSet) Set(java.util.Set) FieldSet(org.apache.flink.api.common.operators.util.FieldSet) ArrayList(java.util.ArrayList) ShipStrategyType(org.apache.flink.runtime.operators.shipping.ShipStrategyType) OperatorDescriptorSingle(org.apache.flink.optimizer.operators.OperatorDescriptorSingle) PlanNode(org.apache.flink.optimizer.plan.PlanNode) SingleInputPlanNode(org.apache.flink.optimizer.plan.SingleInputPlanNode) DataExchangeMode(org.apache.flink.runtime.io.network.DataExchangeMode) CompilerException(org.apache.flink.optimizer.CompilerException) HashSet(java.util.HashSet) RequestedGlobalProperties(org.apache.flink.optimizer.dataproperties.RequestedGlobalProperties) Channel(org.apache.flink.optimizer.plan.Channel) NamedChannel(org.apache.flink.optimizer.plan.NamedChannel) ExecutionMode(org.apache.flink.api.common.ExecutionMode) NamedChannel(org.apache.flink.optimizer.plan.NamedChannel)

Example 19 with Set

use of java.util.Set in project hadoop by apache.

the class JMXJsonServlet method writeObject.

private void writeObject(JsonGenerator jg, Object value) throws IOException {
    if (value == null) {
        jg.writeNull();
    } else {
        Class<?> c = value.getClass();
        if (c.isArray()) {
            jg.writeStartArray();
            int len = Array.getLength(value);
            for (int j = 0; j < len; j++) {
                Object item = Array.get(value, j);
                writeObject(jg, item);
            }
            jg.writeEndArray();
        } else if (value instanceof Number) {
            Number n = (Number) value;
            jg.writeNumber(n.toString());
        } else if (value instanceof Boolean) {
            Boolean b = (Boolean) value;
            jg.writeBoolean(b);
        } else if (value instanceof CompositeData) {
            CompositeData cds = (CompositeData) value;
            CompositeType comp = cds.getCompositeType();
            Set<String> keys = comp.keySet();
            jg.writeStartObject();
            for (String key : keys) {
                writeAttribute(jg, key, cds.get(key));
            }
            jg.writeEndObject();
        } else if (value instanceof TabularData) {
            TabularData tds = (TabularData) value;
            jg.writeStartArray();
            for (Object entry : tds.values()) {
                writeObject(jg, entry);
            }
            jg.writeEndArray();
        } else {
            jg.writeString(value.toString());
        }
    }
}
Also used : Set(java.util.Set) CompositeData(javax.management.openmbean.CompositeData) CompositeType(javax.management.openmbean.CompositeType) TabularData(javax.management.openmbean.TabularData)

Example 20 with Set

use of java.util.Set in project flink by apache.

the class ExecutionGraphConstructionTest method testCoLocationConstraintCreation.

@Test
public void testCoLocationConstraintCreation() {
    try {
        final JobID jobId = new JobID();
        final String jobName = "Co-Location Constraint Sample Job";
        final Configuration cfg = new Configuration();
        // simple group of two, cyclic
        JobVertex v1 = new JobVertex("vertex1");
        JobVertex v2 = new JobVertex("vertex2");
        v1.setParallelism(6);
        v2.setParallelism(4);
        v1.setInvokableClass(AbstractInvokable.class);
        v2.setInvokableClass(AbstractInvokable.class);
        SlotSharingGroup sl1 = new SlotSharingGroup();
        v1.setSlotSharingGroup(sl1);
        v2.setSlotSharingGroup(sl1);
        v2.setStrictlyCoLocatedWith(v1);
        v1.setStrictlyCoLocatedWith(v2);
        // complex forked dependency pattern
        JobVertex v3 = new JobVertex("vertex3");
        JobVertex v4 = new JobVertex("vertex4");
        JobVertex v5 = new JobVertex("vertex5");
        JobVertex v6 = new JobVertex("vertex6");
        JobVertex v7 = new JobVertex("vertex7");
        v3.setParallelism(3);
        v4.setParallelism(3);
        v5.setParallelism(3);
        v6.setParallelism(3);
        v7.setParallelism(3);
        v3.setInvokableClass(AbstractInvokable.class);
        v4.setInvokableClass(AbstractInvokable.class);
        v5.setInvokableClass(AbstractInvokable.class);
        v6.setInvokableClass(AbstractInvokable.class);
        v7.setInvokableClass(AbstractInvokable.class);
        SlotSharingGroup sl2 = new SlotSharingGroup();
        v3.setSlotSharingGroup(sl2);
        v4.setSlotSharingGroup(sl2);
        v5.setSlotSharingGroup(sl2);
        v6.setSlotSharingGroup(sl2);
        v7.setSlotSharingGroup(sl2);
        v4.setStrictlyCoLocatedWith(v3);
        v5.setStrictlyCoLocatedWith(v4);
        v6.setStrictlyCoLocatedWith(v3);
        v3.setStrictlyCoLocatedWith(v7);
        // isolated vertex
        JobVertex v8 = new JobVertex("vertex8");
        v8.setParallelism(2);
        v8.setInvokableClass(AbstractInvokable.class);
        JobGraph jg = new JobGraph(jobId, jobName, v1, v2, v3, v4, v5, v6, v7, v8);
        ExecutionGraph eg = new ExecutionGraph(TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), jobId, jobName, cfg, new SerializedValue<>(new ExecutionConfig()), AkkaUtils.getDefaultTimeout(), new NoRestartStrategy(), new Scheduler(TestingUtils.defaultExecutionContext()));
        eg.attachJobGraph(jg.getVerticesSortedTopologicallyFromSources());
        // check the v1 / v2 co location hints ( assumes parallelism(v1) >= parallelism(v2) )
        {
            ExecutionVertex[] v1s = eg.getJobVertex(v1.getID()).getTaskVertices();
            ExecutionVertex[] v2s = eg.getJobVertex(v2.getID()).getTaskVertices();
            Set<CoLocationConstraint> all = new HashSet<CoLocationConstraint>();
            for (int i = 0; i < v2.getParallelism(); i++) {
                assertNotNull(v1s[i].getLocationConstraint());
                assertNotNull(v2s[i].getLocationConstraint());
                assertTrue(v1s[i].getLocationConstraint() == v2s[i].getLocationConstraint());
                all.add(v1s[i].getLocationConstraint());
            }
            for (int i = v2.getParallelism(); i < v1.getParallelism(); i++) {
                assertNotNull(v1s[i].getLocationConstraint());
                all.add(v1s[i].getLocationConstraint());
            }
            assertEquals("not all co location constraints are distinct", v1.getParallelism(), all.size());
        }
        // check the v1 / v2 co location hints ( assumes parallelism(v1) >= parallelism(v2) )
        {
            ExecutionVertex[] v3s = eg.getJobVertex(v3.getID()).getTaskVertices();
            ExecutionVertex[] v4s = eg.getJobVertex(v4.getID()).getTaskVertices();
            ExecutionVertex[] v5s = eg.getJobVertex(v5.getID()).getTaskVertices();
            ExecutionVertex[] v6s = eg.getJobVertex(v6.getID()).getTaskVertices();
            ExecutionVertex[] v7s = eg.getJobVertex(v7.getID()).getTaskVertices();
            Set<CoLocationConstraint> all = new HashSet<CoLocationConstraint>();
            for (int i = 0; i < v3.getParallelism(); i++) {
                assertNotNull(v3s[i].getLocationConstraint());
                assertTrue(v3s[i].getLocationConstraint() == v4s[i].getLocationConstraint());
                assertTrue(v4s[i].getLocationConstraint() == v5s[i].getLocationConstraint());
                assertTrue(v5s[i].getLocationConstraint() == v6s[i].getLocationConstraint());
                assertTrue(v6s[i].getLocationConstraint() == v7s[i].getLocationConstraint());
                all.add(v3s[i].getLocationConstraint());
            }
            assertEquals("not all co location constraints are distinct", v3.getParallelism(), all.size());
        }
        // check the v8 has no co location hints
        {
            ExecutionVertex[] v8s = eg.getJobVertex(v8.getID()).getTaskVertices();
            for (int i = 0; i < v8.getParallelism(); i++) {
                assertNull(v8s[i].getLocationConstraint());
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : CoLocationConstraint(org.apache.flink.runtime.jobmanager.scheduler.CoLocationConstraint) HashSet(java.util.HashSet) Set(java.util.Set) IntermediateDataSet(org.apache.flink.runtime.jobgraph.IntermediateDataSet) Configuration(org.apache.flink.configuration.Configuration) Scheduler(org.apache.flink.runtime.jobmanager.scheduler.Scheduler) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) NoRestartStrategy(org.apache.flink.runtime.executiongraph.restart.NoRestartStrategy) JobException(org.apache.flink.runtime.JobException) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) SlotSharingGroup(org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup) JobID(org.apache.flink.api.common.JobID) Test(org.junit.Test)

Aggregations

Set (java.util.Set)6789 HashSet (java.util.HashSet)4372 HashMap (java.util.HashMap)2090 Map (java.util.Map)1865 Iterator (java.util.Iterator)1774 ArrayList (java.util.ArrayList)1113 List (java.util.List)980 Test (org.junit.Test)920 TreeSet (java.util.TreeSet)536 IOException (java.io.IOException)501 SSOException (com.iplanet.sso.SSOException)467 LinkedHashSet (java.util.LinkedHashSet)418 SMSException (com.sun.identity.sm.SMSException)347 IdRepoException (com.sun.identity.idm.IdRepoException)268 Collection (java.util.Collection)259 ImmutableSet (com.google.common.collect.ImmutableSet)256 File (java.io.File)245 SSOToken (com.iplanet.sso.SSOToken)226 Collectors (java.util.stream.Collectors)219 Test (org.testng.annotations.Test)209