Search in sources :

Example 11 with Entry

use of java.util.Map.Entry in project hadoop by apache.

the class StageAllocatorGreedyRLE method computeStageAllocation.

@Override
public Map<ReservationInterval, Resource> computeStageAllocation(Plan plan, Map<Long, Resource> planLoads, RLESparseResourceAllocation planModifications, ReservationRequest rr, long stageEarliestStart, long stageDeadline, String user, ReservationId oldId) throws PlanningException {
    // abort early if the interval is not satisfiable
    if (stageEarliestStart + rr.getDuration() > stageDeadline) {
        return null;
    }
    Map<ReservationInterval, Resource> allocationRequests = new HashMap<ReservationInterval, Resource>();
    Resource totalCapacity = plan.getTotalCapacity();
    // compute the gang as a resource and get the duration
    Resource sizeOfGang = Resources.multiply(rr.getCapability(), rr.getConcurrency());
    long dur = rr.getDuration();
    long step = plan.getStep();
    // ceil the duration to the next multiple of the plan step
    if (dur % step != 0) {
        dur += (step - (dur % step));
    }
    // we know for sure that this division has no remainder (part of contract
    // with user, validate before
    int gangsToPlace = rr.getNumContainers() / rr.getConcurrency();
    // get available resources from plan
    RLESparseResourceAllocation netRLERes = plan.getAvailableResourceOverTime(user, oldId, stageEarliestStart, stageDeadline);
    // remove plan modifications
    netRLERes = RLESparseResourceAllocation.merge(plan.getResourceCalculator(), totalCapacity, netRLERes, planModifications, RLEOperator.subtract, stageEarliestStart, stageDeadline);
    // an invalid range of times
    while (gangsToPlace > 0 && stageEarliestStart + dur <= stageDeadline) {
        // as we run along we remember how many gangs we can fit, and what
        // was the most constraining moment in time (we will restart just
        // after that to place the next batch)
        int maxGang = gangsToPlace;
        long minPoint = -1;
        // focus our attention to a time-range under consideration
        NavigableMap<Long, Resource> partialMap = netRLERes.getRangeOverlapping(stageEarliestStart, stageDeadline).getCumulative();
        // revert the map for right-to-left allocation
        if (!allocateLeft) {
            partialMap = partialMap.descendingMap();
        }
        Iterator<Entry<Long, Resource>> netIt = partialMap.entrySet().iterator();
        long oldT = stageDeadline;
        // interval (with outside loop)
        while (maxGang > 0 && netIt.hasNext()) {
            long t;
            Resource curAvailRes;
            Entry<Long, Resource> e = netIt.next();
            if (allocateLeft) {
                t = Math.max(e.getKey(), stageEarliestStart);
                curAvailRes = e.getValue();
            } else {
                t = oldT;
                oldT = e.getKey();
                //attention: higher means lower, because we reversed the map direction
                curAvailRes = partialMap.higherEntry(t).getValue();
            }
            // check exit/skip conditions/
            if (curAvailRes == null) {
                //skip undefined regions (should not happen beside borders)
                continue;
            }
            if (exitCondition(t, stageEarliestStart, stageDeadline, dur)) {
                break;
            }
            // compute maximum number of gangs we could fit
            int curMaxGang = (int) Math.floor(Resources.divide(plan.getResourceCalculator(), totalCapacity, curAvailRes, sizeOfGang));
            curMaxGang = Math.min(gangsToPlace, curMaxGang);
            // the minimum (useful for next attempts)
            if (curMaxGang <= maxGang) {
                maxGang = curMaxGang;
                minPoint = t;
            }
        }
        // update data structures that retain the progress made so far
        gangsToPlace = trackProgress(planModifications, rr, stageEarliestStart, stageDeadline, allocationRequests, dur, gangsToPlace, maxGang);
        // reset the next range of time-intervals to deal with
        if (allocateLeft) {
            // end of this allocation
            if (partialMap.higherKey(minPoint) == null) {
                stageEarliestStart = stageEarliestStart + dur;
            } else {
                stageEarliestStart = Math.min(partialMap.higherKey(minPoint), stageEarliestStart + dur);
            }
        } else {
            // same as above moving right-to-left
            if (partialMap.higherKey(minPoint) == null) {
                stageDeadline = stageDeadline - dur;
            } else {
                stageDeadline = Math.max(partialMap.higherKey(minPoint), stageDeadline - dur);
            }
        }
    }
    // if no gangs are left to place we succeed and return the allocation
    if (gangsToPlace == 0) {
        return allocationRequests;
    } else {
        // for ANY).
        for (Map.Entry<ReservationInterval, Resource> tempAllocation : allocationRequests.entrySet()) {
            planModifications.removeInterval(tempAllocation.getKey(), tempAllocation.getValue());
        }
        // and return null to signal failure in this allocation
        return null;
    }
}
Also used : HashMap(java.util.HashMap) Resource(org.apache.hadoop.yarn.api.records.Resource) ReservationInterval(org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationInterval) Entry(java.util.Map.Entry) RLESparseResourceAllocation(org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation) HashMap(java.util.HashMap) NavigableMap(java.util.NavigableMap) Map(java.util.Map)

Example 12 with Entry

use of java.util.Map.Entry in project hadoop by apache.

the class RLESparseResourceAllocation method merge.

private static NavigableMap<Long, Resource> merge(ResourceCalculator resCalc, Resource clusterResource, NavigableMap<Long, Resource> a, NavigableMap<Long, Resource> b, long start, long end, RLEOperator operator) throws PlanningException {
    // handle special cases of empty input
    if (a == null || a.isEmpty()) {
        if (operator == RLEOperator.subtract || operator == RLEOperator.subtractTestNonNegative) {
            return negate(operator, b);
        } else {
            return b;
        }
    }
    if (b == null || b.isEmpty()) {
        return a;
    }
    // define iterators and support variables
    Iterator<Entry<Long, Resource>> aIt = a.entrySet().iterator();
    Iterator<Entry<Long, Resource>> bIt = b.entrySet().iterator();
    Entry<Long, Resource> curA = aIt.next();
    Entry<Long, Resource> curB = bIt.next();
    Entry<Long, Resource> lastA = null;
    Entry<Long, Resource> lastB = null;
    boolean aIsDone = false;
    boolean bIsDone = false;
    TreeMap<Long, Resource> out = new TreeMap<Long, Resource>();
    while (!(curA.equals(lastA) && curB.equals(lastB))) {
        Resource outRes;
        long time = -1;
        // curA is smaller than curB
        if (bIsDone || (curA.getKey() < curB.getKey() && !aIsDone)) {
            outRes = combineValue(operator, resCalc, clusterResource, curA, lastB);
            time = (curA.getKey() < start) ? start : curA.getKey();
            lastA = curA;
            if (aIt.hasNext()) {
                curA = aIt.next();
            } else {
                aIsDone = true;
            }
        } else {
            // curB is smaller than curA
            if (aIsDone || (curA.getKey() > curB.getKey() && !bIsDone)) {
                outRes = combineValue(operator, resCalc, clusterResource, lastA, curB);
                time = (curB.getKey() < start) ? start : curB.getKey();
                lastB = curB;
                if (bIt.hasNext()) {
                    curB = bIt.next();
                } else {
                    bIsDone = true;
                }
            } else {
                // curA is equal to curB
                outRes = combineValue(operator, resCalc, clusterResource, curA, curB);
                time = (curA.getKey() < start) ? start : curA.getKey();
                lastA = curA;
                if (aIt.hasNext()) {
                    curA = aIt.next();
                } else {
                    aIsDone = true;
                }
                lastB = curB;
                if (bIt.hasNext()) {
                    curB = bIt.next();
                } else {
                    bIsDone = true;
                }
            }
        }
        // add to out if not redundant
        addIfNeeded(out, time, outRes);
    }
    addIfNeeded(out, end, null);
    return out;
}
Also used : Entry(java.util.Map.Entry) Resource(org.apache.hadoop.yarn.api.records.Resource) TreeMap(java.util.TreeMap)

Example 13 with Entry

use of java.util.Map.Entry in project hadoop by apache.

the class TestRLESparseResourceAllocation method testSkyline.

@Test
public void testSkyline() {
    ResourceCalculator resCalc = new DefaultResourceCalculator();
    RLESparseResourceAllocation rleSparseVector = new RLESparseResourceAllocation(resCalc);
    int[] alloc = { 0, 5, 10, 10, 5, 0 };
    int start = 100;
    Set<Entry<ReservationInterval, Resource>> inputs = generateAllocation(start, alloc, true).entrySet();
    for (Entry<ReservationInterval, Resource> ip : inputs) {
        rleSparseVector.addInterval(ip.getKey(), ip.getValue());
    }
    LOG.info(rleSparseVector.toString());
    Assert.assertFalse(rleSparseVector.isEmpty());
    Assert.assertEquals(Resource.newInstance(0, 0), rleSparseVector.getCapacityAtTime(99));
    Assert.assertEquals(Resource.newInstance(0, 0), rleSparseVector.getCapacityAtTime(start + alloc.length + 1));
    for (int i = 0; i < alloc.length; i++) {
        Assert.assertEquals(Resource.newInstance(1024 * (alloc[i] + i), (alloc[i] + i)), rleSparseVector.getCapacityAtTime(start + i));
    }
    Assert.assertEquals(Resource.newInstance(0, 0), rleSparseVector.getCapacityAtTime(start + alloc.length + 2));
    for (Entry<ReservationInterval, Resource> ip : inputs) {
        rleSparseVector.removeInterval(ip.getKey(), ip.getValue());
    }
    LOG.info(rleSparseVector.toString());
    for (int i = 0; i < alloc.length; i++) {
        Assert.assertEquals(Resource.newInstance(0, 0), rleSparseVector.getCapacityAtTime(start + i));
    }
    Assert.assertTrue(rleSparseVector.isEmpty());
}
Also used : DefaultResourceCalculator(org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator) ResourceCalculator(org.apache.hadoop.yarn.util.resource.ResourceCalculator) Entry(java.util.Map.Entry) DefaultResourceCalculator(org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator) Resource(org.apache.hadoop.yarn.api.records.Resource) Test(org.junit.Test)

Example 14 with Entry

use of java.util.Map.Entry in project hadoop by apache.

the class TestRLESparseResourceAllocation method testToIntervalMap.

@Test
public void testToIntervalMap() {
    ResourceCalculator resCalc = new DefaultResourceCalculator();
    RLESparseResourceAllocation rleSparseVector = new RLESparseResourceAllocation(resCalc);
    Map<ReservationInterval, Resource> mapAllocations;
    // Check empty
    mapAllocations = rleSparseVector.toIntervalMap();
    Assert.assertTrue(mapAllocations.isEmpty());
    // Check full
    int[] alloc = { 0, 5, 10, 10, 5, 0, 5, 0 };
    int start = 100;
    Set<Entry<ReservationInterval, Resource>> inputs = generateAllocation(start, alloc, false).entrySet();
    for (Entry<ReservationInterval, Resource> ip : inputs) {
        rleSparseVector.addInterval(ip.getKey(), ip.getValue());
    }
    mapAllocations = rleSparseVector.toIntervalMap();
    Assert.assertTrue(mapAllocations.size() == 5);
    for (Entry<ReservationInterval, Resource> entry : mapAllocations.entrySet()) {
        ReservationInterval interval = entry.getKey();
        Resource resource = entry.getValue();
        if (interval.getStartTime() == 101L) {
            Assert.assertTrue(interval.getEndTime() == 102L);
            Assert.assertEquals(resource, Resource.newInstance(5 * 1024, 5));
        } else if (interval.getStartTime() == 102L) {
            Assert.assertTrue(interval.getEndTime() == 104L);
            Assert.assertEquals(resource, Resource.newInstance(10 * 1024, 10));
        } else if (interval.getStartTime() == 104L) {
            Assert.assertTrue(interval.getEndTime() == 105L);
            Assert.assertEquals(resource, Resource.newInstance(5 * 1024, 5));
        } else if (interval.getStartTime() == 105L) {
            Assert.assertTrue(interval.getEndTime() == 106L);
            Assert.assertEquals(resource, Resource.newInstance(0 * 1024, 0));
        } else if (interval.getStartTime() == 106L) {
            Assert.assertTrue(interval.getEndTime() == 107L);
            Assert.assertEquals(resource, Resource.newInstance(5 * 1024, 5));
        } else {
            Assert.fail();
        }
    }
}
Also used : DefaultResourceCalculator(org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator) ResourceCalculator(org.apache.hadoop.yarn.util.resource.ResourceCalculator) Entry(java.util.Map.Entry) DefaultResourceCalculator(org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator) Resource(org.apache.hadoop.yarn.api.records.Resource) Test(org.junit.Test)

Example 15 with Entry

use of java.util.Map.Entry in project storm by apache.

the class Testing method completeTopology.

/**
     * Run a topology to completion capturing all of the messages that are emitted.  This only works when all of the spouts are
     * instances of {@link org.apache.storm.testing.CompletableSpout} or are overwritten by MockedSources in param
     * @param cluster the cluster to submit the topology to
     * @param topology the topology itself
     * @param param parameters to describe how to complete a topology.
     * @return a map of the component to the list of tuples it emitted. 
     * @throws InterruptedException
     * @throws TException on any error from nimbus.
     */
public static Map<String, List<FixedTuple>> completeTopology(ILocalCluster cluster, StormTopology topology, CompleteTopologyParam param) throws TException, InterruptedException {
    Map<String, List<FixedTuple>> ret = null;
    IStormClusterState state = cluster.getClusterState();
    CapturedTopology<StormTopology> capTopo = captureTopology(topology);
    topology = capTopo.topology;
    String topoName = param.getTopologyName();
    if (topoName == null) {
        topoName = "topologytest-" + Utils.uuid();
    }
    Map<String, SpoutSpec> spouts = topology.get_spouts();
    MockedSources ms = param.getMockedSources();
    if (ms != null) {
        for (Entry<String, List<FixedTuple>> mocked : ms.getData().entrySet()) {
            FixedTupleSpout newSpout = new FixedTupleSpout(mocked.getValue());
            spouts.get(mocked.getKey()).set_spout_object(Thrift.serializeComponentObject(newSpout));
        }
    }
    List<Object> spoutObjects = spouts.values().stream().map((spec) -> Thrift.deserializeComponentObject(spec.get_spout_object())).collect(Collectors.toList());
    for (Object o : spoutObjects) {
        if (!(o instanceof CompletableSpout)) {
            throw new RuntimeException("Cannot complete topology unless every spout is a CompletableSpout (or mocked to be); failed by " + o);
        }
    }
    for (Object spout : spoutObjects) {
        ((CompletableSpout) spout).startup();
    }
    cluster.submitTopology(topoName, param.getStormConf(), topology);
    if (Time.isSimulating()) {
        cluster.advanceClusterTime(11);
    }
    String topoId = state.getTopoId(topoName).get();
    //Give the topology time to come up without using it to wait for the spouts to complete
    simulateWait(cluster);
    Integer timeoutMs = param.getTimeoutMs();
    if (timeoutMs == null) {
        timeoutMs = TEST_TIMEOUT_MS;
    }
    whileTimeout(timeoutMs, () -> !isEvery(spoutObjects, (o) -> ((CompletableSpout) o).isExhausted()), () -> {
        try {
            simulateWait(cluster);
        } catch (Exception e) {
            throw new RuntimeException();
        }
    });
    KillOptions killOpts = new KillOptions();
    killOpts.set_wait_secs(0);
    cluster.killTopologyWithOpts(topoName, killOpts);
    whileTimeout(timeoutMs, () -> state.assignmentInfo(topoId, null) != null, () -> {
        try {
            simulateWait(cluster);
        } catch (Exception e) {
            throw new RuntimeException();
        }
    });
    if (param.getCleanupState()) {
        for (Object o : spoutObjects) {
            ((CompletableSpout) o).clean();
        }
        ret = capTopo.capturer.getAndRemoveResults();
    } else {
        ret = capTopo.capturer.getAndClearResults();
    }
    return ret;
}
Also used : SimulatedTime(org.apache.storm.utils.Time.SimulatedTime) TopologyContext(org.apache.storm.task.TopologyContext) LoggerFactory(org.slf4j.LoggerFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) INimbus(org.apache.storm.scheduler.INimbus) ArrayList(java.util.ArrayList) Bolt(org.apache.storm.generated.Bolt) Tuple(org.apache.storm.tuple.Tuple) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StormTopology(org.apache.storm.generated.StormTopology) Map(java.util.Map) CompleteTopologyParam(org.apache.storm.testing.CompleteTopologyParam) MkTupleParam(org.apache.storm.testing.MkTupleParam) FixedTupleSpout(org.apache.storm.testing.FixedTupleSpout) KillOptions(org.apache.storm.generated.KillOptions) MockedSources(org.apache.storm.testing.MockedSources) TestJob(org.apache.storm.testing.TestJob) MkClusterParam(org.apache.storm.testing.MkClusterParam) Logger(org.slf4j.Logger) Predicate(java.util.function.Predicate) Collection(java.util.Collection) CompletableSpout(org.apache.storm.testing.CompletableSpout) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Grouping(org.apache.storm.generated.Grouping) StreamInfo(org.apache.storm.generated.StreamInfo) FixedTuple(org.apache.storm.testing.FixedTuple) TException(org.apache.thrift.TException) Fields(org.apache.storm.tuple.Fields) IStormClusterState(org.apache.storm.cluster.IStormClusterState) Utils(org.apache.storm.utils.Utils) Collectors(java.util.stream.Collectors) GlobalStreamId(org.apache.storm.generated.GlobalStreamId) Time(org.apache.storm.utils.Time) TupleCaptureBolt(org.apache.storm.testing.TupleCaptureBolt) List(java.util.List) RegisteredGlobalState(org.apache.storm.utils.RegisteredGlobalState) ConfigUtils(org.apache.storm.utils.ConfigUtils) TupleImpl(org.apache.storm.tuple.TupleImpl) SpoutSpec(org.apache.storm.generated.SpoutSpec) Entry(java.util.Map.Entry) TrackedTopology(org.apache.storm.testing.TrackedTopology) StormTopology(org.apache.storm.generated.StormTopology) CompletableSpout(org.apache.storm.testing.CompletableSpout) TException(org.apache.thrift.TException) FixedTupleSpout(org.apache.storm.testing.FixedTupleSpout) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MockedSources(org.apache.storm.testing.MockedSources) SpoutSpec(org.apache.storm.generated.SpoutSpec) ArrayList(java.util.ArrayList) List(java.util.List) IStormClusterState(org.apache.storm.cluster.IStormClusterState) KillOptions(org.apache.storm.generated.KillOptions)

Aggregations

Entry (java.util.Map.Entry)1041 HashMap (java.util.HashMap)295 Map (java.util.Map)288 ArrayList (java.util.ArrayList)258 List (java.util.List)177 Iterator (java.util.Iterator)113 IOException (java.io.IOException)109 Test (org.junit.Test)77 Set (java.util.Set)68 LinkedHashMap (java.util.LinkedHashMap)64 HashSet (java.util.HashSet)62 File (java.io.File)56 Collection (java.util.Collection)42 TreeMap (java.util.TreeMap)36 Properties (java.util.Properties)35 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)35 TestSuite (junit.framework.TestSuite)33 LinkedList (java.util.LinkedList)31 NamedIcon (jmri.jmrit.catalog.NamedIcon)28 Collectors (java.util.stream.Collectors)27