use of java.util.NavigableMap in project flink by apache.
the class FlinkRelDecorrelator method decorrelateRel.
/**
* Rewrites a {@link LogicalAggregate}.
*
* @param rel Aggregate to rewrite
*/
public Frame decorrelateRel(LogicalAggregate rel) {
if (rel.getGroupType() != Aggregate.Group.SIMPLE) {
throw new AssertionError(Bug.CALCITE_461_FIXED);
}
// Aggregate itself should not reference cor vars.
assert !cm.mapRefRelToCorVar.containsKey(rel);
final RelNode oldInput = rel.getInput();
final Frame frame = getInvoke(oldInput, rel);
if (frame == null) {
// If input has not been rewritten, do not rewrite this rel.
return null;
}
final RelNode newInput = frame.r;
// map from newInput
Map<Integer, Integer> mapNewInputToProjOutputPos = Maps.newHashMap();
final int oldGroupKeyCount = rel.getGroupSet().cardinality();
// Project projects the original expressions,
// plus any correlated variables the input wants to pass along.
final List<Pair<RexNode, String>> projects = Lists.newArrayList();
List<RelDataTypeField> newInputOutput = newInput.getRowType().getFieldList();
int newPos = 0;
// oldInput has the original group by keys in the front.
final NavigableMap<Integer, RexLiteral> omittedConstants = new TreeMap<>();
for (int i = 0; i < oldGroupKeyCount; i++) {
final RexLiteral constant = projectedLiteral(newInput, i);
if (constant != null) {
// Exclude constants. Aggregate({true}) occurs because Aggregate({})
// would generate 1 row even when applied to an empty table.
omittedConstants.put(i, constant);
continue;
}
int newInputPos = frame.oldToNewOutputPos.get(i);
projects.add(RexInputRef.of2(newInputPos, newInputOutput));
mapNewInputToProjOutputPos.put(newInputPos, newPos);
newPos++;
}
final SortedMap<Correlation, Integer> mapCorVarToOutputPos = new TreeMap<>();
if (!frame.corVarOutputPos.isEmpty()) {
// position oldGroupKeyCount.
for (Map.Entry<Correlation, Integer> entry : frame.corVarOutputPos.entrySet()) {
projects.add(RexInputRef.of2(entry.getValue(), newInputOutput));
mapCorVarToOutputPos.put(entry.getKey(), newPos);
mapNewInputToProjOutputPos.put(entry.getValue(), newPos);
newPos++;
}
}
// add the remaining fields
final int newGroupKeyCount = newPos;
for (int i = 0; i < newInputOutput.size(); i++) {
if (!mapNewInputToProjOutputPos.containsKey(i)) {
projects.add(RexInputRef.of2(i, newInputOutput));
mapNewInputToProjOutputPos.put(i, newPos);
newPos++;
}
}
assert newPos == newInputOutput.size();
// This Project will be what the old input maps to,
// replacing any previous mapping from old input).
RelNode newProject = RelOptUtil.createProject(newInput, projects, false);
// update mappings:
// oldInput ----> newInput
//
// newProject
// |
// oldInput ----> newInput
//
// is transformed to
//
// oldInput ----> newProject
// |
// newInput
Map<Integer, Integer> combinedMap = Maps.newHashMap();
for (Integer oldInputPos : frame.oldToNewOutputPos.keySet()) {
combinedMap.put(oldInputPos, mapNewInputToProjOutputPos.get(frame.oldToNewOutputPos.get(oldInputPos)));
}
register(oldInput, newProject, combinedMap, mapCorVarToOutputPos);
// now it's time to rewrite the Aggregate
final ImmutableBitSet newGroupSet = ImmutableBitSet.range(newGroupKeyCount);
List<AggregateCall> newAggCalls = Lists.newArrayList();
List<AggregateCall> oldAggCalls = rel.getAggCallList();
int oldInputOutputFieldCount = rel.getGroupSet().cardinality();
int newInputOutputFieldCount = newGroupSet.cardinality();
int i = -1;
for (AggregateCall oldAggCall : oldAggCalls) {
++i;
List<Integer> oldAggArgs = oldAggCall.getArgList();
List<Integer> aggArgs = Lists.newArrayList();
// for the argument.
for (int oldPos : oldAggArgs) {
aggArgs.add(combinedMap.get(oldPos));
}
final int filterArg = oldAggCall.filterArg < 0 ? oldAggCall.filterArg : combinedMap.get(oldAggCall.filterArg);
newAggCalls.add(oldAggCall.adaptTo(newProject, aggArgs, filterArg, oldGroupKeyCount, newGroupKeyCount));
// The old to new output position mapping will be the same as that
// of newProject, plus any aggregates that the oldAgg produces.
combinedMap.put(oldInputOutputFieldCount + i, newInputOutputFieldCount + i);
}
relBuilder.push(LogicalAggregate.create(newProject, false, newGroupSet, null, newAggCalls));
if (!omittedConstants.isEmpty()) {
final List<RexNode> postProjects = new ArrayList<>(relBuilder.fields());
for (Map.Entry<Integer, RexLiteral> entry : omittedConstants.descendingMap().entrySet()) {
postProjects.add(entry.getKey() + frame.corVarOutputPos.size(), entry.getValue());
}
relBuilder.project(postProjects);
}
// located at the same position as the input newProject.
return register(rel, relBuilder.build(), combinedMap, mapCorVarToOutputPos);
}
use of java.util.NavigableMap in project hadoop by apache.
the class StageAllocatorGreedyRLE method computeStageAllocation.
@Override
public Map<ReservationInterval, Resource> computeStageAllocation(Plan plan, Map<Long, Resource> planLoads, RLESparseResourceAllocation planModifications, ReservationRequest rr, long stageEarliestStart, long stageDeadline, String user, ReservationId oldId) throws PlanningException {
// abort early if the interval is not satisfiable
if (stageEarliestStart + rr.getDuration() > stageDeadline) {
return null;
}
Map<ReservationInterval, Resource> allocationRequests = new HashMap<ReservationInterval, Resource>();
Resource totalCapacity = plan.getTotalCapacity();
// compute the gang as a resource and get the duration
Resource sizeOfGang = Resources.multiply(rr.getCapability(), rr.getConcurrency());
long dur = rr.getDuration();
long step = plan.getStep();
// ceil the duration to the next multiple of the plan step
if (dur % step != 0) {
dur += (step - (dur % step));
}
// we know for sure that this division has no remainder (part of contract
// with user, validate before
int gangsToPlace = rr.getNumContainers() / rr.getConcurrency();
// get available resources from plan
RLESparseResourceAllocation netRLERes = plan.getAvailableResourceOverTime(user, oldId, stageEarliestStart, stageDeadline);
// remove plan modifications
netRLERes = RLESparseResourceAllocation.merge(plan.getResourceCalculator(), totalCapacity, netRLERes, planModifications, RLEOperator.subtract, stageEarliestStart, stageDeadline);
// an invalid range of times
while (gangsToPlace > 0 && stageEarliestStart + dur <= stageDeadline) {
// as we run along we remember how many gangs we can fit, and what
// was the most constraining moment in time (we will restart just
// after that to place the next batch)
int maxGang = gangsToPlace;
long minPoint = -1;
// focus our attention to a time-range under consideration
NavigableMap<Long, Resource> partialMap = netRLERes.getRangeOverlapping(stageEarliestStart, stageDeadline).getCumulative();
// revert the map for right-to-left allocation
if (!allocateLeft) {
partialMap = partialMap.descendingMap();
}
Iterator<Entry<Long, Resource>> netIt = partialMap.entrySet().iterator();
long oldT = stageDeadline;
// interval (with outside loop)
while (maxGang > 0 && netIt.hasNext()) {
long t;
Resource curAvailRes;
Entry<Long, Resource> e = netIt.next();
if (allocateLeft) {
t = Math.max(e.getKey(), stageEarliestStart);
curAvailRes = e.getValue();
} else {
t = oldT;
oldT = e.getKey();
//attention: higher means lower, because we reversed the map direction
curAvailRes = partialMap.higherEntry(t).getValue();
}
// check exit/skip conditions/
if (curAvailRes == null) {
//skip undefined regions (should not happen beside borders)
continue;
}
if (exitCondition(t, stageEarliestStart, stageDeadline, dur)) {
break;
}
// compute maximum number of gangs we could fit
int curMaxGang = (int) Math.floor(Resources.divide(plan.getResourceCalculator(), totalCapacity, curAvailRes, sizeOfGang));
curMaxGang = Math.min(gangsToPlace, curMaxGang);
// the minimum (useful for next attempts)
if (curMaxGang <= maxGang) {
maxGang = curMaxGang;
minPoint = t;
}
}
// update data structures that retain the progress made so far
gangsToPlace = trackProgress(planModifications, rr, stageEarliestStart, stageDeadline, allocationRequests, dur, gangsToPlace, maxGang);
// reset the next range of time-intervals to deal with
if (allocateLeft) {
// end of this allocation
if (partialMap.higherKey(minPoint) == null) {
stageEarliestStart = stageEarliestStart + dur;
} else {
stageEarliestStart = Math.min(partialMap.higherKey(minPoint), stageEarliestStart + dur);
}
} else {
// same as above moving right-to-left
if (partialMap.higherKey(minPoint) == null) {
stageDeadline = stageDeadline - dur;
} else {
stageDeadline = Math.max(partialMap.higherKey(minPoint), stageDeadline - dur);
}
}
}
// if no gangs are left to place we succeed and return the allocation
if (gangsToPlace == 0) {
return allocationRequests;
} else {
// for ANY).
for (Map.Entry<ReservationInterval, Resource> tempAllocation : allocationRequests.entrySet()) {
planModifications.removeInterval(tempAllocation.getKey(), tempAllocation.getValue());
}
// and return null to signal failure in this allocation
return null;
}
}
use of java.util.NavigableMap in project hadoop by apache.
the class CapacityOverTimePolicy method validate.
/**
* The validation algorithm walks over the RLE encoded allocation and
* checks that for all transition points (when the start or end of the
* checking window encounters a value in the RLE). At this point it
* checkes whether the integral computed exceeds the quota limit. Note that
* this might not find the exact time of a violation, but if a violation
* exists it will find it. The advantage is a much lower number of checks
* as compared to time-slot by time-slot checks.
*
* @param plan the plan to validate against
* @param reservation the reservation allocation to test.
* @throws PlanningException if the validation fails.
*/
@Override
public void validate(Plan plan, ReservationAllocation reservation) throws PlanningException {
// cluster limits, and 3) maxInst (via override of available)
try {
super.validate(plan, reservation);
} catch (PlanningException p) {
//wrap it in proper quota exception
throw new PlanningQuotaException(p);
}
//---- check for integral violations of capacity --------
// Gather a view of what to check (curr allocation of user, minus old
// version of this reservation, plus new version)
RLESparseResourceAllocation consumptionForUserOverTime = plan.getConsumptionForUserOverTime(reservation.getUser(), reservation.getStartTime() - validWindow, reservation.getEndTime() + validWindow);
ReservationAllocation old = plan.getReservationById(reservation.getReservationId());
if (old != null) {
consumptionForUserOverTime = RLESparseResourceAllocation.merge(plan.getResourceCalculator(), plan.getTotalCapacity(), consumptionForUserOverTime, old.getResourcesOverTime(), RLEOperator.add, reservation.getStartTime() - validWindow, reservation.getEndTime() + validWindow);
}
RLESparseResourceAllocation resRLE = reservation.getResourcesOverTime();
RLESparseResourceAllocation toCheck = RLESparseResourceAllocation.merge(plan.getResourceCalculator(), plan.getTotalCapacity(), consumptionForUserOverTime, resRLE, RLEOperator.add, Long.MIN_VALUE, Long.MAX_VALUE);
NavigableMap<Long, Resource> integralUp = new TreeMap<>();
NavigableMap<Long, Resource> integralDown = new TreeMap<>();
long prevTime = toCheck.getEarliestStartTime();
IntegralResource prevResource = new IntegralResource(0L, 0L);
IntegralResource runningTot = new IntegralResource(0L, 0L);
// add intermediate points
Map<Long, Resource> temp = new TreeMap<>();
for (Map.Entry<Long, Resource> pointToCheck : toCheck.getCumulative().entrySet()) {
Long timeToCheck = pointToCheck.getKey();
Resource resourceToCheck = pointToCheck.getValue();
Long nextPoint = toCheck.getCumulative().higherKey(timeToCheck);
if (nextPoint == null || toCheck.getCumulative().get(nextPoint) == null) {
continue;
}
for (int i = 1; i <= (nextPoint - timeToCheck) / validWindow; i++) {
temp.put(timeToCheck + (i * validWindow), resourceToCheck);
}
}
temp.putAll(toCheck.getCumulative());
// compute point-wise integral for the up-fronts and down-fronts
for (Map.Entry<Long, Resource> currPoint : temp.entrySet()) {
Long currTime = currPoint.getKey();
Resource currResource = currPoint.getValue();
//add to running total current contribution
prevResource.multiplyBy(currTime - prevTime);
runningTot.add(prevResource);
integralUp.put(currTime, normalizeToResource(runningTot, validWindow));
integralDown.put(currTime + validWindow, normalizeToResource(runningTot, validWindow));
if (currResource != null) {
prevResource.memory = currResource.getMemorySize();
prevResource.vcores = currResource.getVirtualCores();
} else {
prevResource.memory = 0L;
prevResource.vcores = 0L;
}
prevTime = currTime;
}
// compute final integral as delta of up minus down transitions
RLESparseResourceAllocation intUp = new RLESparseResourceAllocation(integralUp, plan.getResourceCalculator());
RLESparseResourceAllocation intDown = new RLESparseResourceAllocation(integralDown, plan.getResourceCalculator());
RLESparseResourceAllocation integral = RLESparseResourceAllocation.merge(plan.getResourceCalculator(), plan.getTotalCapacity(), intUp, intDown, RLEOperator.subtract, Long.MIN_VALUE, Long.MAX_VALUE);
// define over-time integral limit
// note: this is aligned with the normalization done above
NavigableMap<Long, Resource> tlimit = new TreeMap<>();
Resource maxAvgRes = Resources.multiply(plan.getTotalCapacity(), maxAvg);
tlimit.put(toCheck.getEarliestStartTime() - validWindow, maxAvgRes);
RLESparseResourceAllocation targetLimit = new RLESparseResourceAllocation(tlimit, plan.getResourceCalculator());
// compare using merge() limit with integral
try {
RLESparseResourceAllocation.merge(plan.getResourceCalculator(), plan.getTotalCapacity(), targetLimit, integral, RLEOperator.subtractTestNonNegative, reservation.getStartTime() - validWindow, reservation.getEndTime() + validWindow);
} catch (PlanningException p) {
throw new PlanningQuotaException("Integral (avg over time) quota capacity " + maxAvg + " over a window of " + validWindow / 1000 + " seconds, " + " would be exceeded by accepting reservation: " + reservation.getReservationId(), p);
}
}
use of java.util.NavigableMap in project hbase by apache.
the class HelloHBase method getAndPrintRowContents.
/**
* Invokes Table#get and prints out the contents of the retrieved row.
*
* @param table Standard Table object
* @throws IOException If IO problem encountered
*/
static void getAndPrintRowContents(final Table table) throws IOException {
Result row = table.get(new Get(MY_ROW_ID));
System.out.println("Row [" + Bytes.toString(row.getRow()) + "] was retrieved from Table [" + table.getName().getNameAsString() + "] in HBase, with the following content:");
for (Entry<byte[], NavigableMap<byte[], byte[]>> colFamilyEntry : row.getNoVersionMap().entrySet()) {
String columnFamilyName = Bytes.toString(colFamilyEntry.getKey());
System.out.println(" Columns in Column Family [" + columnFamilyName + "]:");
for (Entry<byte[], byte[]> columnNameAndValueMap : colFamilyEntry.getValue().entrySet()) {
System.out.println(" Value of Column [" + columnFamilyName + ":" + Bytes.toString(columnNameAndValueMap.getKey()) + "] == " + Bytes.toString(columnNameAndValueMap.getValue()));
}
}
}
use of java.util.NavigableMap in project hbase by apache.
the class HBaseTestCase method assertResultEquals.
protected void assertResultEquals(final HRegion region, final byte[] row, final byte[] family, final byte[] qualifier, final long timestamp, final byte[] value) throws IOException {
Get get = new Get(row);
get.setTimeStamp(timestamp);
Result res = region.get(get);
NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> map = res.getMap();
byte[] res_value = map.get(family).get(qualifier).get(timestamp);
if (value == null) {
assertEquals(Bytes.toString(family) + " " + Bytes.toString(qualifier) + " at timestamp " + timestamp, null, res_value);
} else {
if (res_value == null) {
fail(Bytes.toString(family) + " " + Bytes.toString(qualifier) + " at timestamp " + timestamp + "\" was expected to be \"" + Bytes.toStringBinary(value) + " but was null");
}
if (res_value != null) {
assertEquals(Bytes.toString(family) + " " + Bytes.toString(qualifier) + " at timestamp " + timestamp, value, new String(res_value));
}
}
}
Aggregations