Search in sources :

Example 46 with TreeSet

use of java.util.TreeSet in project hadoop by apache.

the class FifoCandidatesSelector method selectCandidates.

@Override
public Map<ApplicationAttemptId, Set<RMContainer>> selectCandidates(Map<ApplicationAttemptId, Set<RMContainer>> selectedCandidates, Resource clusterResource, Resource totalPreemptionAllowed) {
    // Calculate how much resources we need to preempt
    preemptableAmountCalculator.computeIdealAllocation(clusterResource, totalPreemptionAllowed);
    // Previous selectors (with higher priority) could have already
    // selected containers. We need to deduct preemptable resources
    // based on already selected candidates.
    CapacitySchedulerPreemptionUtils.deductPreemptableResourcesBasedSelectedCandidates(preemptionContext, selectedCandidates);
    List<RMContainer> skippedAMContainerlist = new ArrayList<>();
    // Loop all leaf queues
    for (String queueName : preemptionContext.getLeafQueueNames()) {
        // check if preemption disabled for the queue
        if (preemptionContext.getQueueByPartition(queueName, RMNodeLabelsManager.NO_LABEL).preemptionDisabled) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("skipping from queue=" + queueName + " because it's a non-preemptable queue");
            }
            continue;
        }
        // compute resToObtainByPartition considered inter-queue preemption
        LeafQueue leafQueue = preemptionContext.getQueueByPartition(queueName, RMNodeLabelsManager.NO_LABEL).leafQueue;
        Map<String, Resource> resToObtainByPartition = CapacitySchedulerPreemptionUtils.getResToObtainByPartitionForLeafQueue(preemptionContext, queueName, clusterResource);
        try {
            leafQueue.getReadLock().lock();
            // go through all ignore-partition-exclusivity containers first to make
            // sure such containers will be preemptionCandidates first
            Map<String, TreeSet<RMContainer>> ignorePartitionExclusivityContainers = leafQueue.getIgnoreExclusivityRMContainers();
            for (String partition : resToObtainByPartition.keySet()) {
                if (ignorePartitionExclusivityContainers.containsKey(partition)) {
                    TreeSet<RMContainer> rmContainers = ignorePartitionExclusivityContainers.get(partition);
                    // application's containers will be preemptionCandidates first.
                    for (RMContainer c : rmContainers.descendingSet()) {
                        if (CapacitySchedulerPreemptionUtils.isContainerAlreadySelected(c, selectedCandidates)) {
                            // Skip already selected containers
                            continue;
                        }
                        boolean preempted = CapacitySchedulerPreemptionUtils.tryPreemptContainerAndDeductResToObtain(rc, preemptionContext, resToObtainByPartition, c, clusterResource, selectedCandidates, totalPreemptionAllowed);
                        if (!preempted) {
                            continue;
                        }
                    }
                }
            }
            // preempt other containers
            Resource skippedAMSize = Resource.newInstance(0, 0);
            Iterator<FiCaSchedulerApp> desc = leafQueue.getOrderingPolicy().getPreemptionIterator();
            while (desc.hasNext()) {
                FiCaSchedulerApp fc = desc.next();
                // more preemption is needed
                if (resToObtainByPartition.isEmpty()) {
                    break;
                }
                preemptFrom(fc, clusterResource, resToObtainByPartition, skippedAMContainerlist, skippedAMSize, selectedCandidates, totalPreemptionAllowed);
            }
            // Can try preempting AMContainers (still saving atmost
            // maxAMCapacityForThisQueue AMResource's) if more resources are
            // required to be preemptionCandidates from this Queue.
            Resource maxAMCapacityForThisQueue = Resources.multiply(Resources.multiply(clusterResource, leafQueue.getAbsoluteCapacity()), leafQueue.getMaxAMResourcePerQueuePercent());
            preemptAMContainers(clusterResource, selectedCandidates, skippedAMContainerlist, resToObtainByPartition, skippedAMSize, maxAMCapacityForThisQueue, totalPreemptionAllowed);
        } finally {
            leafQueue.getReadLock().unlock();
        }
    }
    return selectedCandidates;
}
Also used : ArrayList(java.util.ArrayList) Resource(org.apache.hadoop.yarn.api.records.Resource) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) LeafQueue(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue) TreeSet(java.util.TreeSet) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)

Example 47 with TreeSet

use of java.util.TreeSet in project hadoop by apache.

the class TestHBaseStorageFlowRunCompaction method testProcessSummationMoreCellsSumFinal.

@Test
public void testProcessSummationMoreCellsSumFinal() throws IOException {
    FlowScanner fs = getFlowScannerForTestingCompaction();
    // note down the current timestamp
    long currentTimestamp = System.currentTimeMillis();
    long cellValue1 = 1236L;
    long cellValue2 = 28L;
    List<Tag> tags = new ArrayList<>();
    Tag t = new Tag(AggregationOperation.SUM_FINAL.getTagType(), "application_1234588888_999888");
    tags.add(t);
    byte[] tagByteArray = Tag.fromList(tags);
    SortedSet<Cell> currentColumnCells = new TreeSet<Cell>(KeyValue.COMPARATOR);
    // create a cell with a VERY old timestamp and attribute SUM_FINAL
    Cell c1 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily, aQualifier, 120L, Bytes.toBytes(cellValue1), tagByteArray);
    currentColumnCells.add(c1);
    tags = new ArrayList<>();
    t = new Tag(AggregationOperation.SUM.getTagType(), "application_100000000001_119101");
    tags.add(t);
    tagByteArray = Tag.fromList(tags);
    // create a cell with a VERY old timestamp but has attribute SUM
    Cell c2 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily, aQualifier, 130L, Bytes.toBytes(cellValue2), tagByteArray);
    currentColumnCells.add(c2);
    List<Cell> cells = fs.processSummationMajorCompaction(currentColumnCells, new LongConverter(), currentTimestamp);
    assertNotNull(cells);
    // we should be getting back two cells
    // one is the flow sum cell
    // another is the cell with SUM attribute
    assertEquals(2, cells.size());
    Cell returnedCell = cells.get(0);
    assertNotNull(returnedCell);
    long inputTs1 = c1.getTimestamp();
    long inputTs2 = c2.getTimestamp();
    long returnTs = returnedCell.getTimestamp();
    long returnValue = Bytes.toLong(CellUtil.cloneValue(returnedCell));
    // current timestamp
    if (returnValue == cellValue2) {
        assertTrue(returnTs == inputTs2);
    } else if (returnValue == cellValue1) {
        assertTrue(returnTs >= currentTimestamp);
        assertTrue(returnTs != inputTs1);
    } else {
        // raise a failure since we expect only these two values back
        Assert.fail();
    }
}
Also used : LongConverter(org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) Tag(org.apache.hadoop.hbase.Tag) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 48 with TreeSet

use of java.util.TreeSet in project hadoop by apache.

the class TestHBaseStorageFlowRunCompaction method testProcessSummationOneCellSumFinal.

@Test
public void testProcessSummationOneCellSumFinal() throws IOException {
    FlowScanner fs = getFlowScannerForTestingCompaction();
    // note down the current timestamp
    long currentTimestamp = System.currentTimeMillis();
    List<Tag> tags = new ArrayList<>();
    Tag t = new Tag(AggregationOperation.SUM_FINAL.getTagType(), "application_123458888888_999888");
    tags.add(t);
    byte[] tagByteArray = Tag.fromList(tags);
    SortedSet<Cell> currentColumnCells = new TreeSet<Cell>(KeyValue.COMPARATOR);
    // create a cell with a VERY old timestamp
    Cell c1 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily, aQualifier, 120L, Bytes.toBytes(1110L), tagByteArray);
    currentColumnCells.add(c1);
    List<Cell> cells = fs.processSummationMajorCompaction(currentColumnCells, new LongConverter(), currentTimestamp);
    assertNotNull(cells);
    // we should not get the same cell back
    // but we get back the flow cell
    assertEquals(1, cells.size());
    Cell returnedCell = cells.get(0);
    // it's NOT the same cell
    assertNotEquals(c1, returnedCell);
    long inputTs = c1.getTimestamp();
    long returnTs = returnedCell.getTimestamp();
    // the returned Ts will be far greater than input ts as well as the noted
    // current timestamp
    assertTrue(returnTs > inputTs);
    assertTrue(returnTs >= currentTimestamp);
}
Also used : LongConverter(org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) Tag(org.apache.hadoop.hbase.Tag) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 49 with TreeSet

use of java.util.TreeSet in project hadoop by apache.

the class TimelineEntityReader method readEntities.

/**
   * Reads and deserializes a set of timeline entities from the HBase storage.
   * It goes through all the results available, and returns the number of
   * entries as specified in the limit in the entity's natural sort order.
   *
   * @param hbaseConf HBase Configuration.
   * @param conn HBase Connection.
   * @return a set of <cite>TimelineEntity</cite> objects.
   * @throws IOException if any exception is encountered while reading entities.
   */
public Set<TimelineEntity> readEntities(Configuration hbaseConf, Connection conn) throws IOException {
    validateParams();
    augmentParams(hbaseConf, conn);
    NavigableSet<TimelineEntity> entities = new TreeSet<>();
    FilterList filterList = createFilterList();
    if (LOG.isDebugEnabled() && filterList != null) {
        LOG.debug("FilterList created for scan is - " + filterList);
    }
    ResultScanner results = getResults(hbaseConf, conn, filterList);
    try {
        for (Result result : results) {
            TimelineEntity entity = parseEntity(result);
            if (entity == null) {
                continue;
            }
            entities.add(entity);
            if (!sortedKeys) {
                if (entities.size() > filters.getLimit()) {
                    entities.pollLast();
                }
            } else {
                if (entities.size() == filters.getLimit()) {
                    break;
                }
            }
        }
        return entities;
    } finally {
        results.close();
    }
}
Also used : ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) TreeSet(java.util.TreeSet) FilterList(org.apache.hadoop.hbase.filter.FilterList) TimelineEntity(org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity) Result(org.apache.hadoop.hbase.client.Result)

Example 50 with TreeSet

use of java.util.TreeSet in project hbase by apache.

the class FirstKeyValueMatchingQualifiersFilter method parseFrom.

/**
   * @param pbBytes A pb serialized {@link FirstKeyValueMatchingQualifiersFilter} instance
   * @return An instance of {@link FirstKeyValueMatchingQualifiersFilter} made from <code>bytes</code>
   * @throws DeserializationException
   * @see #toByteArray
   */
public static FirstKeyValueMatchingQualifiersFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
    FilterProtos.FirstKeyValueMatchingQualifiersFilter proto;
    try {
        proto = FilterProtos.FirstKeyValueMatchingQualifiersFilter.parseFrom(pbBytes);
    } catch (InvalidProtocolBufferException e) {
        throw new DeserializationException(e);
    }
    TreeSet<byte[]> qualifiers = new TreeSet<>(Bytes.BYTES_COMPARATOR);
    for (ByteString qualifier : proto.getQualifiersList()) {
        qualifiers.add(qualifier.toByteArray());
    }
    return new FirstKeyValueMatchingQualifiersFilter(qualifiers);
}
Also used : TreeSet(java.util.TreeSet) ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) InvalidProtocolBufferException(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException) FilterProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException)

Aggregations

TreeSet (java.util.TreeSet)3785 ArrayList (java.util.ArrayList)833 Test (org.junit.Test)544 HashMap (java.util.HashMap)500 HashSet (java.util.HashSet)428 Set (java.util.Set)422 Map (java.util.Map)401 IOException (java.io.IOException)374 File (java.io.File)339 List (java.util.List)320 TreeMap (java.util.TreeMap)229 Iterator (java.util.Iterator)189 SortedSet (java.util.SortedSet)186 LinkedList (java.util.LinkedList)110 LinkedHashSet (java.util.LinkedHashSet)106 Date (java.util.Date)94 Collection (java.util.Collection)90 Comparator (java.util.Comparator)85 Test (org.testng.annotations.Test)81 Text (org.apache.hadoop.io.Text)79