Search in sources :

Example 46 with Entry

use of java.util.Map.Entry in project hadoop by apache.

the class StageAllocatorGreedyRLE method computeStageAllocation.

@Override
public Map<ReservationInterval, Resource> computeStageAllocation(Plan plan, Map<Long, Resource> planLoads, RLESparseResourceAllocation planModifications, ReservationRequest rr, long stageEarliestStart, long stageDeadline, String user, ReservationId oldId) throws PlanningException {
    // abort early if the interval is not satisfiable
    if (stageEarliestStart + rr.getDuration() > stageDeadline) {
        return null;
    }
    Map<ReservationInterval, Resource> allocationRequests = new HashMap<ReservationInterval, Resource>();
    Resource totalCapacity = plan.getTotalCapacity();
    // compute the gang as a resource and get the duration
    Resource sizeOfGang = Resources.multiply(rr.getCapability(), rr.getConcurrency());
    long dur = rr.getDuration();
    long step = plan.getStep();
    // ceil the duration to the next multiple of the plan step
    if (dur % step != 0) {
        dur += (step - (dur % step));
    }
    // we know for sure that this division has no remainder (part of contract
    // with user, validate before
    int gangsToPlace = rr.getNumContainers() / rr.getConcurrency();
    // get available resources from plan
    RLESparseResourceAllocation netRLERes = plan.getAvailableResourceOverTime(user, oldId, stageEarliestStart, stageDeadline);
    // remove plan modifications
    netRLERes = RLESparseResourceAllocation.merge(plan.getResourceCalculator(), totalCapacity, netRLERes, planModifications, RLEOperator.subtract, stageEarliestStart, stageDeadline);
    // an invalid range of times
    while (gangsToPlace > 0 && stageEarliestStart + dur <= stageDeadline) {
        // as we run along we remember how many gangs we can fit, and what
        // was the most constraining moment in time (we will restart just
        // after that to place the next batch)
        int maxGang = gangsToPlace;
        long minPoint = -1;
        // focus our attention to a time-range under consideration
        NavigableMap<Long, Resource> partialMap = netRLERes.getRangeOverlapping(stageEarliestStart, stageDeadline).getCumulative();
        // revert the map for right-to-left allocation
        if (!allocateLeft) {
            partialMap = partialMap.descendingMap();
        }
        Iterator<Entry<Long, Resource>> netIt = partialMap.entrySet().iterator();
        long oldT = stageDeadline;
        // interval (with outside loop)
        while (maxGang > 0 && netIt.hasNext()) {
            long t;
            Resource curAvailRes;
            Entry<Long, Resource> e = netIt.next();
            if (allocateLeft) {
                t = Math.max(e.getKey(), stageEarliestStart);
                curAvailRes = e.getValue();
            } else {
                t = oldT;
                oldT = e.getKey();
                //attention: higher means lower, because we reversed the map direction
                curAvailRes = partialMap.higherEntry(t).getValue();
            }
            // check exit/skip conditions/
            if (curAvailRes == null) {
                //skip undefined regions (should not happen beside borders)
                continue;
            }
            if (exitCondition(t, stageEarliestStart, stageDeadline, dur)) {
                break;
            }
            // compute maximum number of gangs we could fit
            int curMaxGang = (int) Math.floor(Resources.divide(plan.getResourceCalculator(), totalCapacity, curAvailRes, sizeOfGang));
            curMaxGang = Math.min(gangsToPlace, curMaxGang);
            // the minimum (useful for next attempts)
            if (curMaxGang <= maxGang) {
                maxGang = curMaxGang;
                minPoint = t;
            }
        }
        // update data structures that retain the progress made so far
        gangsToPlace = trackProgress(planModifications, rr, stageEarliestStart, stageDeadline, allocationRequests, dur, gangsToPlace, maxGang);
        // reset the next range of time-intervals to deal with
        if (allocateLeft) {
            // end of this allocation
            if (partialMap.higherKey(minPoint) == null) {
                stageEarliestStart = stageEarliestStart + dur;
            } else {
                stageEarliestStart = Math.min(partialMap.higherKey(minPoint), stageEarliestStart + dur);
            }
        } else {
            // same as above moving right-to-left
            if (partialMap.higherKey(minPoint) == null) {
                stageDeadline = stageDeadline - dur;
            } else {
                stageDeadline = Math.max(partialMap.higherKey(minPoint), stageDeadline - dur);
            }
        }
    }
    // if no gangs are left to place we succeed and return the allocation
    if (gangsToPlace == 0) {
        return allocationRequests;
    } else {
        // for ANY).
        for (Map.Entry<ReservationInterval, Resource> tempAllocation : allocationRequests.entrySet()) {
            planModifications.removeInterval(tempAllocation.getKey(), tempAllocation.getValue());
        }
        // and return null to signal failure in this allocation
        return null;
    }
}
Also used : HashMap(java.util.HashMap) Resource(org.apache.hadoop.yarn.api.records.Resource) ReservationInterval(org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationInterval) Entry(java.util.Map.Entry) RLESparseResourceAllocation(org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation) HashMap(java.util.HashMap) NavigableMap(java.util.NavigableMap) Map(java.util.Map)

Example 47 with Entry

use of java.util.Map.Entry in project hadoop by apache.

the class RLESparseResourceAllocation method merge.

private static NavigableMap<Long, Resource> merge(ResourceCalculator resCalc, Resource clusterResource, NavigableMap<Long, Resource> a, NavigableMap<Long, Resource> b, long start, long end, RLEOperator operator) throws PlanningException {
    // handle special cases of empty input
    if (a == null || a.isEmpty()) {
        if (operator == RLEOperator.subtract || operator == RLEOperator.subtractTestNonNegative) {
            return negate(operator, b);
        } else {
            return b;
        }
    }
    if (b == null || b.isEmpty()) {
        return a;
    }
    // define iterators and support variables
    Iterator<Entry<Long, Resource>> aIt = a.entrySet().iterator();
    Iterator<Entry<Long, Resource>> bIt = b.entrySet().iterator();
    Entry<Long, Resource> curA = aIt.next();
    Entry<Long, Resource> curB = bIt.next();
    Entry<Long, Resource> lastA = null;
    Entry<Long, Resource> lastB = null;
    boolean aIsDone = false;
    boolean bIsDone = false;
    TreeMap<Long, Resource> out = new TreeMap<Long, Resource>();
    while (!(curA.equals(lastA) && curB.equals(lastB))) {
        Resource outRes;
        long time = -1;
        // curA is smaller than curB
        if (bIsDone || (curA.getKey() < curB.getKey() && !aIsDone)) {
            outRes = combineValue(operator, resCalc, clusterResource, curA, lastB);
            time = (curA.getKey() < start) ? start : curA.getKey();
            lastA = curA;
            if (aIt.hasNext()) {
                curA = aIt.next();
            } else {
                aIsDone = true;
            }
        } else {
            // curB is smaller than curA
            if (aIsDone || (curA.getKey() > curB.getKey() && !bIsDone)) {
                outRes = combineValue(operator, resCalc, clusterResource, lastA, curB);
                time = (curB.getKey() < start) ? start : curB.getKey();
                lastB = curB;
                if (bIt.hasNext()) {
                    curB = bIt.next();
                } else {
                    bIsDone = true;
                }
            } else {
                // curA is equal to curB
                outRes = combineValue(operator, resCalc, clusterResource, curA, curB);
                time = (curA.getKey() < start) ? start : curA.getKey();
                lastA = curA;
                if (aIt.hasNext()) {
                    curA = aIt.next();
                } else {
                    aIsDone = true;
                }
                lastB = curB;
                if (bIt.hasNext()) {
                    curB = bIt.next();
                } else {
                    bIsDone = true;
                }
            }
        }
        // add to out if not redundant
        addIfNeeded(out, time, outRes);
    }
    addIfNeeded(out, end, null);
    return out;
}
Also used : Entry(java.util.Map.Entry) Resource(org.apache.hadoop.yarn.api.records.Resource) TreeMap(java.util.TreeMap)

Example 48 with Entry

use of java.util.Map.Entry in project hadoop by apache.

the class FSImageTestUtil method assertPropertiesFilesSame.

/**
   * Assert that a set of properties files all contain the same data.
   *
   * @param propFiles the files to compare.
   * @param ignoredProperties the property names to be ignored during
   *                          comparison.
   * @throws IOException if the files cannot be opened or read
   * @throws AssertionError if the files differ
   */
public static void assertPropertiesFilesSame(File[] propFiles, Set<String> ignoredProperties) throws IOException {
    Set<Map.Entry<Object, Object>> prevProps = null;
    for (File f : propFiles) {
        Properties props;
        FileInputStream is = new FileInputStream(f);
        try {
            props = new Properties();
            props.load(is);
        } finally {
            IOUtils.closeStream(is);
        }
        if (prevProps == null) {
            prevProps = props.entrySet();
        } else {
            Set<Entry<Object, Object>> diff = Sets.symmetricDifference(prevProps, props.entrySet());
            Iterator<Entry<Object, Object>> it = diff.iterator();
            while (it.hasNext()) {
                Entry<Object, Object> entry = it.next();
                if (ignoredProperties != null && ignoredProperties.contains(entry.getKey())) {
                    continue;
                }
                fail("Properties file " + f + " differs from " + propFiles[0]);
            }
        }
    }
}
Also used : Entry(java.util.Map.Entry) Properties(java.util.Properties) RandomAccessFile(java.io.RandomAccessFile) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) FSImageFile(org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile) File(java.io.File) FileInputStream(java.io.FileInputStream)

Example 49 with Entry

use of java.util.Map.Entry in project hadoop by apache.

the class TestRpcCallCache method testCacheFunctionality.

@Test
public void testCacheFunctionality() throws UnknownHostException {
    RpcCallCache cache = new RpcCallCache("Test", 10);
    // Add 20 entries to the cache and only last 10 should be retained
    int size = 0;
    for (int clientId = 0; clientId < 20; clientId++) {
        InetAddress clientIp = InetAddress.getByName("1.1.1." + clientId);
        System.out.println("Adding " + clientIp);
        cache.checkOrAddToCache(clientIp, 0);
        size = Math.min(++size, 10);
        System.out.println("Cache size " + cache.size());
        // Ensure the cache size is correct
        assertEquals(size, cache.size());
        // Ensure the cache entries are correct
        int startEntry = Math.max(clientId - 10 + 1, 0);
        Iterator<Entry<ClientRequest, CacheEntry>> iterator = cache.iterator();
        for (int i = 0; i < size; i++) {
            ClientRequest key = iterator.next().getKey();
            System.out.println("Entry " + key.getClientId());
            assertEquals(InetAddress.getByName("1.1.1." + (startEntry + i)), key.getClientId());
        }
        // Ensure cache entries are returned as in progress.
        for (int i = 0; i < size; i++) {
            CacheEntry e = cache.checkOrAddToCache(InetAddress.getByName("1.1.1." + (startEntry + i)), 0);
            assertNotNull(e);
            assertTrue(e.isInProgress());
            assertFalse(e.isCompleted());
        }
    }
}
Also used : Entry(java.util.Map.Entry) CacheEntry(org.apache.hadoop.oncrpc.RpcCallCache.CacheEntry) CacheEntry(org.apache.hadoop.oncrpc.RpcCallCache.CacheEntry) InetAddress(java.net.InetAddress) ClientRequest(org.apache.hadoop.oncrpc.RpcCallCache.ClientRequest) Test(org.junit.Test)

Example 50 with Entry

use of java.util.Map.Entry in project MVCHelper by LuckyJayce.

the class ABSTestCaseFragment method updateRight.

private void updateRight() {
    TestCaseData data = datas.get(selectPosition);
    // paramsRecyclerView.setText(param(data));
    resultTextView.setText(data.result);
    switch(data.status) {
        case -1:
            resultStateTextView.setText("ERROR");
            break;
        case 0:
            resultStateTextView.setText("");
            break;
        case 1:
            resultStateTextView.setText("RUNING");
            break;
        case 2:
            resultStateTextView.setText("SUCCESS");
            break;
    }
    // paramsAdapter.notifyDataSetChanged();
    tasksAdapter.notifyDataSetChangedHF();
    String json;
    json = gson.toJson(data.task);
    map2 = gson.fromJson(json, map2.getClass());
    Object task = data.task;
    Object object = task;
    for (ParamLine line : lines.values()) {
        line.cancel();
    }
    lines.clear();
    paramsRecyclerView.removeAllViews();
    for (int i = 0; i < map2.size(); i++) {
        ParamLine paramLine = new ParamLine();
        View itemView = inflater.inflate(R.layout.testcase_param_item, paramsRecyclerView, false);
        paramsRecyclerView.addView(itemView);
        paramLine.itemView = itemView;
        paramLine.keyTextView = (TextView) itemView.findViewById(R.id.textView1);
        paramLine.valueEditText = (EditText) itemView.findViewById(R.id.editText1);
        paramLine.valueGetButton = (Button) itemView.findViewById(R.id.testcase_param_item_paramGet_button);
        Object value = map2.valueAt(i);
        String key = map2.keyAt(i);
        paramLine.key = key;
        paramLine.keyTextView.setText(key);
        paramLine.valueEditText.removeTextChangedListener(paramLine.textWatcher);
        paramLine.valueEditText.setText(String.valueOf(value));
        paramLine.valueEditText.addTextChangedListener(paramLine.textWatcher);
        paramLine.valueGetButton.setOnClickListener(paramLine.onClickListener);
        boolean has = false;
        if (data.paramGets.containsKey(key)) {
            has = true;
            paramLine.paramGetTask = data.paramGets.get(key);
        }
        if (!has) {
            out: for (Entry<String[], IAsyncTask<Map<String, String>>> entry : data.paramGetsMap.entrySet()) {
                for (String param : entry.getKey()) {
                    if (param.equals(key)) {
                        has = true;
                        paramLine.paramGetTaskMap = entry.getValue();
                        break out;
                    }
                }
            }
        }
        if (has) {
            paramLine.valueGetButton.setVisibility(View.VISIBLE);
        } else {
            paramLine.valueGetButton.setVisibility(View.GONE);
        }
        try {
            Field field = object.getClass().getDeclaredField(key);
            field.setAccessible(true);
            paramLine.field = field;
        } catch (Exception e) {
            e.printStackTrace();
        }
        lines.put(key, paramLine);
    }
}
Also used : Field(java.lang.reflect.Field) Entry(java.util.Map.Entry) View(android.view.View) TextView(android.widget.TextView) RecyclerView(android.support.v7.widget.RecyclerView) Map(java.util.Map) ArrayListMap(com.shizhefei.utils.ArrayListMap)

Aggregations

Entry (java.util.Map.Entry)2862 Map (java.util.Map)804 HashMap (java.util.HashMap)786 ArrayList (java.util.ArrayList)749 List (java.util.List)579 IOException (java.io.IOException)314 Iterator (java.util.Iterator)311 Test (org.junit.Test)308 Set (java.util.Set)294 HashSet (java.util.HashSet)271 LinkedHashMap (java.util.LinkedHashMap)194 Collection (java.util.Collection)186 Collectors (java.util.stream.Collectors)179 File (java.io.File)146 TreeMap (java.util.TreeMap)125 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)114 Key (org.apache.accumulo.core.data.Key)112 Value (org.apache.accumulo.core.data.Value)111 Collections (java.util.Collections)104 LinkedList (java.util.LinkedList)103