use of org.apache.flink.shaded.guava30.com.google.common.collect.Iterators in project alluxio by Alluxio.
the class DefaultBlockIterator method getIteratorInternal.
/**
* Internal utility to get sorted block iterator for a given location and order.
*/
private Iterator<Pair<Long, BlockSortedField>> getIteratorInternal(BlockStoreLocation location, BlockOrder order) {
// Gather each directory location that is under requested location.
List<BlockStoreLocation> locations = mPerDirOrderedSets.keySet().stream().filter((dirLocation) -> dirLocation.belongsTo(location)).collect(Collectors.toList());
// For offline order providers, update total order for each dirty location.
if (!mBlockAnnotator.isOnlineSorter()) {
if (mUnorderedLocations.stream().anyMatch((dirtyLocation) -> dirtyLocation.belongsTo(location))) {
LOG.debug("Updating total order for directories that belong to {}", location);
updateTotalOrder(locations);
}
}
// Gather iterators per each directory based on given order.
List<Iterator<Pair<Long, BlockSortedField>>> iteratorList = new ArrayList<>(mPerDirOrderedSets.size());
for (BlockStoreLocation dirLocation : locations) {
switch(order) {
case NATURAL:
iteratorList.add(mPerDirOrderedSets.get(dirLocation).getAscendingIterator());
break;
case REVERSE:
iteratorList.add(mPerDirOrderedSets.get(dirLocation).getDescendingIterator());
break;
default:
throw new IllegalArgumentException(String.format("Unsupported sort order: %s", order.name()));
}
}
// Return a merge-sorted iterator for gathered iterators.
return Iterators.mergeSorted(iteratorList, Comparator.comparing(Pair::getSecond, order.comparator()));
}
use of org.apache.flink.shaded.guava30.com.google.common.collect.Iterators in project druid by druid-io.
the class VersionedIntervalTimeline method isOvershadowed.
public boolean isOvershadowed(Interval interval, VersionType version, ObjectType object) {
lock.readLock().lock();
try {
TimelineEntry entry = completePartitionsTimeline.get(interval);
if (entry != null) {
final int majorVersionCompare = versionComparator.compare(version, entry.getVersion());
if (majorVersionCompare == 0) {
for (PartitionChunk<ObjectType> chunk : entry.partitionHolder) {
if (chunk.getObject().overshadows(object)) {
return true;
}
}
return false;
} else {
return majorVersionCompare < 0;
}
}
Interval lower = completePartitionsTimeline.floorKey(new Interval(interval.getStart(), DateTimes.MAX));
if (lower == null || !lower.overlaps(interval)) {
return false;
}
Interval prev = null;
Interval curr = lower;
do {
if (// no further keys
curr == null || // a discontinuity
(prev != null && curr.getStartMillis() > prev.getEndMillis())) {
return false;
}
final TimelineEntry timelineEntry = completePartitionsTimeline.get(curr);
final int versionCompare = versionComparator.compare(version, timelineEntry.getVersion());
// lower or same version
if (versionCompare > 0) {
return false;
} else if (versionCompare == 0) {
// Intentionally use the Iterators API instead of the stream API for performance.
// noinspection ConstantConditions
final boolean nonOvershadowedObject = Iterators.all(timelineEntry.partitionHolder.iterator(), chunk -> !chunk.getObject().overshadows(object));
if (nonOvershadowedObject) {
return false;
}
}
prev = curr;
curr = completePartitionsTimeline.higherKey(curr);
} while (interval.getEndMillis() > prev.getEndMillis());
return true;
} finally {
lock.readLock().unlock();
}
}
use of org.apache.flink.shaded.guava30.com.google.common.collect.Iterators in project CorfuDB by CorfuDB.
the class CheckpointWriter method appendObjectState.
/** Append zero or more CONTINUATION records to this
* object's stream. Each will contain a fraction of
* the state of the object that we're checkpointing
* (up to batchSize items at a time).
*
* Corfu client transaction management, if desired, is the
* caller's responsibility.
*
* The Iterators class appears to preserve the laziness
* of Stream processing; we don't wish to use more
* memory than strictly necessary to generate the
* checkpoint. NOTE: It would be even more useful if
* the map had a lazy iterator: the eagerness of
* map.keySet().stream() is not ideal, but at least
* it should be much smaller than the entire map.
*
* NOTE: The postAppendFunc lambda is executed in the
* current thread context, i.e., inside of a Corfu
* transaction, and that transaction will be *aborted*
* at the end of this function. Any Corfu data
* modifying ops will be undone by the TXAbort().
*
* @return Stream of global log addresses of the
* CONTINUATION records written.
*/
public List<Long> appendObjectState() {
ImmutableMap<CheckpointEntry.CheckpointDictKey, String> mdKV = ImmutableMap.copyOf(this.mdKV);
List<Long> continuationAddresses = new ArrayList<>();
Iterators.partition(map.keySet().stream().map(k -> {
return new SMREntry("put", new Object[] { keyMutator.apply(k), valueMutator.apply(map.get(k)) }, serializer);
}).iterator(), batchSize).forEachRemaining(entries -> {
MultiSMREntry smrEntries = new MultiSMREntry();
for (int i = 0; i < ((List) entries).size(); i++) {
smrEntries.addTo((SMREntry) ((List) entries).get(i));
}
CheckpointEntry cp = new CheckpointEntry(CheckpointEntry.CheckpointEntryType.CONTINUATION, author, checkpointID, mdKV, smrEntries);
long pos = sv.append(Collections.singleton(streamID), cp, null);
postAppendFunc.accept(cp, pos);
continuationAddresses.add(pos);
numEntries++;
numBytes += cp.getSmrEntriesBytes();
});
return continuationAddresses;
}
use of org.apache.flink.shaded.guava30.com.google.common.collect.Iterators in project accumulo by apache.
the class ReplicationIT method verifyReplicationTableConfig.
@Test
public void verifyReplicationTableConfig() throws AccumuloException, TableNotFoundException, AccumuloSecurityException {
TableOperations tops = getConnector().tableOperations();
Map<String, EnumSet<IteratorScope>> iterators = tops.listIterators(ReplicationTable.NAME);
// verify combiners are only iterators (no versioning)
Assert.assertEquals(1, iterators.size());
// look for combiner
Assert.assertTrue(iterators.containsKey(ReplicationTable.COMBINER_NAME));
Assert.assertTrue(iterators.get(ReplicationTable.COMBINER_NAME).containsAll(EnumSet.allOf(IteratorScope.class)));
for (IteratorScope scope : EnumSet.allOf(IteratorScope.class)) {
IteratorSetting is = tops.getIteratorSetting(ReplicationTable.NAME, ReplicationTable.COMBINER_NAME, scope);
Assert.assertEquals(30, is.getPriority());
Assert.assertEquals(StatusCombiner.class.getName(), is.getIteratorClass());
Assert.assertEquals(1, is.getOptions().size());
Assert.assertTrue(is.getOptions().containsKey("columns"));
String cols = is.getOptions().get("columns");
Column statusSectionCol = new Column(StatusSection.NAME);
Column workSectionCol = new Column(WorkSection.NAME);
Assert.assertEquals(ColumnSet.encodeColumns(statusSectionCol.getColumnFamily(), statusSectionCol.getColumnQualifier()) + "," + ColumnSet.encodeColumns(workSectionCol.getColumnFamily(), workSectionCol.getColumnQualifier()), cols);
}
boolean foundLocalityGroups = false;
boolean foundLocalityGroupDef1 = false;
boolean foundLocalityGroupDef2 = false;
boolean foundFormatter = false;
Joiner j = Joiner.on(",");
for (Entry<String, String> p : tops.getProperties(ReplicationTable.NAME)) {
String key = p.getKey();
String val = p.getValue();
// STATUS_LG_NAME, STATUS_LG_COLFAMS, WORK_LG_NAME, WORK_LG_COLFAMS
if (key.equals(Property.TABLE_FORMATTER_CLASS.getKey()) && val.equals(StatusFormatter.class.getName())) {
// look for formatter
foundFormatter = true;
} else if (key.equals(Property.TABLE_LOCALITY_GROUPS.getKey()) && val.equals(j.join(ReplicationTable.LOCALITY_GROUPS.keySet()))) {
// look for locality groups enabled
foundLocalityGroups = true;
} else if (key.startsWith(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey())) {
// look for locality group column family definitions
if (key.equals(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + ReplicationTable.STATUS_LG_NAME) && val.equals(j.join(Iterables.transform(ReplicationTable.STATUS_LG_COLFAMS, text -> text.toString())))) {
foundLocalityGroupDef1 = true;
} else if (key.equals(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + ReplicationTable.WORK_LG_NAME) && val.equals(j.join(Iterables.transform(ReplicationTable.WORK_LG_COLFAMS, text -> text.toString())))) {
foundLocalityGroupDef2 = true;
}
}
}
Assert.assertTrue(foundLocalityGroups);
Assert.assertTrue(foundLocalityGroupDef1);
Assert.assertTrue(foundLocalityGroupDef2);
Assert.assertTrue(foundFormatter);
}
Aggregations