Search in sources :

Example 1 with Iterators

use of org.apache.flink.shaded.guava30.com.google.common.collect.Iterators in project alluxio by Alluxio.

the class DefaultBlockIterator method getIteratorInternal.

/**
 * Internal utility to get sorted block iterator for a given location and order.
 */
private Iterator<Pair<Long, BlockSortedField>> getIteratorInternal(BlockStoreLocation location, BlockOrder order) {
    // Gather each directory location that is under requested location.
    List<BlockStoreLocation> locations = mPerDirOrderedSets.keySet().stream().filter((dirLocation) -> dirLocation.belongsTo(location)).collect(Collectors.toList());
    // For offline order providers, update total order for each dirty location.
    if (!mBlockAnnotator.isOnlineSorter()) {
        if (mUnorderedLocations.stream().anyMatch((dirtyLocation) -> dirtyLocation.belongsTo(location))) {
            LOG.debug("Updating total order for directories that belong to {}", location);
            updateTotalOrder(locations);
        }
    }
    // Gather iterators per each directory based on given order.
    List<Iterator<Pair<Long, BlockSortedField>>> iteratorList = new ArrayList<>(mPerDirOrderedSets.size());
    for (BlockStoreLocation dirLocation : locations) {
        switch(order) {
            case NATURAL:
                iteratorList.add(mPerDirOrderedSets.get(dirLocation).getAscendingIterator());
                break;
            case REVERSE:
                iteratorList.add(mPerDirOrderedSets.get(dirLocation).getDescendingIterator());
                break;
            default:
                throw new IllegalArgumentException(String.format("Unsupported sort order: %s", order.name()));
        }
    }
    // Return a merge-sorted iterator for gathered iterators.
    return Iterators.mergeSorted(iteratorList, Comparator.comparing(Pair::getSecond, order.comparator()));
}
Also used : Arrays(java.util.Arrays) BlockStoreLocation(alluxio.worker.block.BlockStoreLocation) Logger(org.slf4j.Logger) BlockMetadataManager(alluxio.worker.block.BlockMetadataManager) Iterator(java.util.Iterator) ConcurrentHashSet(alluxio.collections.ConcurrentHashSet) LoggerFactory(org.slf4j.LoggerFactory) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) Pair(alluxio.collections.Pair) Function(java.util.function.Function) Collectors(java.util.stream.Collectors) Iterators(com.google.common.collect.Iterators) ArrayList(java.util.ArrayList) StorageTier(alluxio.worker.block.meta.StorageTier) List(java.util.List) StorageDir(alluxio.worker.block.meta.StorageDir) Map(java.util.Map) AbstractBlockStoreEventListener(alluxio.worker.block.AbstractBlockStoreEventListener) BlockStoreEventListener(alluxio.worker.block.BlockStoreEventListener) Comparator(java.util.Comparator) Iterator(java.util.Iterator) ArrayList(java.util.ArrayList) BlockStoreLocation(alluxio.worker.block.BlockStoreLocation)

Example 2 with Iterators

use of org.apache.flink.shaded.guava30.com.google.common.collect.Iterators in project druid by druid-io.

the class VersionedIntervalTimeline method isOvershadowed.

public boolean isOvershadowed(Interval interval, VersionType version, ObjectType object) {
    lock.readLock().lock();
    try {
        TimelineEntry entry = completePartitionsTimeline.get(interval);
        if (entry != null) {
            final int majorVersionCompare = versionComparator.compare(version, entry.getVersion());
            if (majorVersionCompare == 0) {
                for (PartitionChunk<ObjectType> chunk : entry.partitionHolder) {
                    if (chunk.getObject().overshadows(object)) {
                        return true;
                    }
                }
                return false;
            } else {
                return majorVersionCompare < 0;
            }
        }
        Interval lower = completePartitionsTimeline.floorKey(new Interval(interval.getStart(), DateTimes.MAX));
        if (lower == null || !lower.overlaps(interval)) {
            return false;
        }
        Interval prev = null;
        Interval curr = lower;
        do {
            if (// no further keys
            curr == null || // a discontinuity
            (prev != null && curr.getStartMillis() > prev.getEndMillis())) {
                return false;
            }
            final TimelineEntry timelineEntry = completePartitionsTimeline.get(curr);
            final int versionCompare = versionComparator.compare(version, timelineEntry.getVersion());
            // lower or same version
            if (versionCompare > 0) {
                return false;
            } else if (versionCompare == 0) {
                // Intentionally use the Iterators API instead of the stream API for performance.
                // noinspection ConstantConditions
                final boolean nonOvershadowedObject = Iterators.all(timelineEntry.partitionHolder.iterator(), chunk -> !chunk.getObject().overshadows(object));
                if (nonOvershadowedObject) {
                    return false;
                }
            }
            prev = curr;
            curr = completePartitionsTimeline.higherKey(curr);
        } while (interval.getEndMillis() > prev.getEndMillis());
        return true;
    } finally {
        lock.readLock().unlock();
    }
}
Also used : Comparators(org.apache.druid.java.util.common.guava.Comparators) CollectionUtils(org.apache.druid.utils.CollectionUtils) HashMap(java.util.HashMap) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) Iterators(com.google.common.collect.Iterators) ArrayList(java.util.ArrayList) PartitionChunk(org.apache.druid.timeline.partition.PartitionChunk) Interval(org.joda.time.Interval) FluentIterable(com.google.common.collect.FluentIterable) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) UOE(org.apache.druid.java.util.common.UOE) StreamSupport(java.util.stream.StreamSupport) Nullable(javax.annotation.Nullable) DateTimes(org.apache.druid.java.util.common.DateTimes) IdentityHashMap(java.util.IdentityHashMap) Iterator(java.util.Iterator) GuardedBy(com.google.errorprone.annotations.concurrent.GuardedBy) Collection(java.util.Collection) Set(java.util.Set) NavigableMap(java.util.NavigableMap) Collectors(java.util.stream.Collectors) Objects(java.util.Objects) PartitionHolder(org.apache.druid.timeline.partition.PartitionHolder) List(java.util.List) TreeMap(java.util.TreeMap) Entry(java.util.Map.Entry) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Comparator(java.util.Comparator) Interval(org.joda.time.Interval)

Example 3 with Iterators

use of org.apache.flink.shaded.guava30.com.google.common.collect.Iterators in project CorfuDB by CorfuDB.

the class CheckpointWriter method appendObjectState.

/** Append zero or more CONTINUATION records to this
     *  object's stream.  Each will contain a fraction of
     *  the state of the object that we're checkpointing
     *  (up to batchSize items at a time).
     *
     *  Corfu client transaction management, if desired, is the
     *  caller's responsibility.
     *
     *  The Iterators class appears to preserve the laziness
     *  of Stream processing; we don't wish to use more
     *  memory than strictly necessary to generate the
     *  checkpoint.  NOTE: It would be even more useful if
     *  the map had a lazy iterator: the eagerness of
     *  map.keySet().stream() is not ideal, but at least
     *  it should be much smaller than the entire map.
     *
     *  NOTE: The postAppendFunc lambda is executed in the
     *  current thread context, i.e., inside of a Corfu
     *  transaction, and that transaction will be *aborted*
     *  at the end of this function.  Any Corfu data
     *  modifying ops will be undone by the TXAbort().
     *
     * @return Stream of global log addresses of the
     * CONTINUATION records written.
     */
public List<Long> appendObjectState() {
    ImmutableMap<CheckpointEntry.CheckpointDictKey, String> mdKV = ImmutableMap.copyOf(this.mdKV);
    List<Long> continuationAddresses = new ArrayList<>();
    Iterators.partition(map.keySet().stream().map(k -> {
        return new SMREntry("put", new Object[] { keyMutator.apply(k), valueMutator.apply(map.get(k)) }, serializer);
    }).iterator(), batchSize).forEachRemaining(entries -> {
        MultiSMREntry smrEntries = new MultiSMREntry();
        for (int i = 0; i < ((List) entries).size(); i++) {
            smrEntries.addTo((SMREntry) ((List) entries).get(i));
        }
        CheckpointEntry cp = new CheckpointEntry(CheckpointEntry.CheckpointEntryType.CONTINUATION, author, checkpointID, mdKV, smrEntries);
        long pos = sv.append(Collections.singleton(streamID), cp, null);
        postAppendFunc.accept(cp, pos);
        continuationAddresses.add(pos);
        numEntries++;
        numBytes += cp.getSmrEntriesBytes();
    });
    return continuationAddresses;
}
Also used : Setter(lombok.Setter) java.util(java.util) TransactionalContext(org.corfudb.runtime.object.transactions.TransactionalContext) Getter(lombok.Getter) ImmutableMap(com.google.common.collect.ImmutableMap) LocalDateTime(java.time.LocalDateTime) AbstractTransactionalContext(org.corfudb.runtime.object.transactions.AbstractTransactionalContext) SMRMap(org.corfudb.runtime.collections.SMRMap) StreamsView(org.corfudb.runtime.view.StreamsView) Function(java.util.function.Function) Iterators(com.google.common.collect.Iterators) CheckpointEntry(org.corfudb.protocols.logprotocol.CheckpointEntry) MultiSMREntry(org.corfudb.protocols.logprotocol.MultiSMREntry) Consumer(java.util.function.Consumer) SMREntry(org.corfudb.protocols.logprotocol.SMREntry) TokenResponse(org.corfudb.protocols.wireprotocol.TokenResponse) BiConsumer(java.util.function.BiConsumer) TransactionType(org.corfudb.runtime.object.transactions.TransactionType) ISerializer(org.corfudb.util.serializer.ISerializer) Serializers(org.corfudb.util.serializer.Serializers) CheckpointEntry(org.corfudb.protocols.logprotocol.CheckpointEntry) MultiSMREntry(org.corfudb.protocols.logprotocol.MultiSMREntry) MultiSMREntry(org.corfudb.protocols.logprotocol.MultiSMREntry) SMREntry(org.corfudb.protocols.logprotocol.SMREntry)

Example 4 with Iterators

use of org.apache.flink.shaded.guava30.com.google.common.collect.Iterators in project accumulo by apache.

the class ReplicationIT method verifyReplicationTableConfig.

@Test
public void verifyReplicationTableConfig() throws AccumuloException, TableNotFoundException, AccumuloSecurityException {
    TableOperations tops = getConnector().tableOperations();
    Map<String, EnumSet<IteratorScope>> iterators = tops.listIterators(ReplicationTable.NAME);
    // verify combiners are only iterators (no versioning)
    Assert.assertEquals(1, iterators.size());
    // look for combiner
    Assert.assertTrue(iterators.containsKey(ReplicationTable.COMBINER_NAME));
    Assert.assertTrue(iterators.get(ReplicationTable.COMBINER_NAME).containsAll(EnumSet.allOf(IteratorScope.class)));
    for (IteratorScope scope : EnumSet.allOf(IteratorScope.class)) {
        IteratorSetting is = tops.getIteratorSetting(ReplicationTable.NAME, ReplicationTable.COMBINER_NAME, scope);
        Assert.assertEquals(30, is.getPriority());
        Assert.assertEquals(StatusCombiner.class.getName(), is.getIteratorClass());
        Assert.assertEquals(1, is.getOptions().size());
        Assert.assertTrue(is.getOptions().containsKey("columns"));
        String cols = is.getOptions().get("columns");
        Column statusSectionCol = new Column(StatusSection.NAME);
        Column workSectionCol = new Column(WorkSection.NAME);
        Assert.assertEquals(ColumnSet.encodeColumns(statusSectionCol.getColumnFamily(), statusSectionCol.getColumnQualifier()) + "," + ColumnSet.encodeColumns(workSectionCol.getColumnFamily(), workSectionCol.getColumnQualifier()), cols);
    }
    boolean foundLocalityGroups = false;
    boolean foundLocalityGroupDef1 = false;
    boolean foundLocalityGroupDef2 = false;
    boolean foundFormatter = false;
    Joiner j = Joiner.on(",");
    for (Entry<String, String> p : tops.getProperties(ReplicationTable.NAME)) {
        String key = p.getKey();
        String val = p.getValue();
        // STATUS_LG_NAME, STATUS_LG_COLFAMS, WORK_LG_NAME, WORK_LG_COLFAMS
        if (key.equals(Property.TABLE_FORMATTER_CLASS.getKey()) && val.equals(StatusFormatter.class.getName())) {
            // look for formatter
            foundFormatter = true;
        } else if (key.equals(Property.TABLE_LOCALITY_GROUPS.getKey()) && val.equals(j.join(ReplicationTable.LOCALITY_GROUPS.keySet()))) {
            // look for locality groups enabled
            foundLocalityGroups = true;
        } else if (key.startsWith(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey())) {
            // look for locality group column family definitions
            if (key.equals(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + ReplicationTable.STATUS_LG_NAME) && val.equals(j.join(Iterables.transform(ReplicationTable.STATUS_LG_COLFAMS, text -> text.toString())))) {
                foundLocalityGroupDef1 = true;
            } else if (key.equals(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + ReplicationTable.WORK_LG_NAME) && val.equals(j.join(Iterables.transform(ReplicationTable.WORK_LG_COLFAMS, text -> text.toString())))) {
                foundLocalityGroupDef2 = true;
            }
        }
    }
    Assert.assertTrue(foundLocalityGroups);
    Assert.assertTrue(foundLocalityGroupDef1);
    Assert.assertTrue(foundLocalityGroupDef2);
    Assert.assertTrue(foundFormatter);
}
Also used : StatusCombiner(org.apache.accumulo.server.replication.StatusCombiner) Arrays(java.util.Arrays) TableOfflineException(org.apache.accumulo.core.client.TableOfflineException) ZooCache(org.apache.accumulo.fate.zookeeper.ZooCache) FileSystem(org.apache.hadoop.fs.FileSystem) URISyntaxException(java.net.URISyntaxException) LoggerFactory(org.slf4j.LoggerFactory) ReplicaSystemFactory(org.apache.accumulo.server.replication.ReplicaSystemFactory) Status(org.apache.accumulo.server.replication.proto.Replication.Status) Text(org.apache.hadoop.io.Text) Instance(org.apache.accumulo.core.client.Instance) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Mutation(org.apache.accumulo.core.data.Mutation) ColumnSet(org.apache.accumulo.core.iterators.conf.ColumnSet) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) HashMultimap(com.google.common.collect.HashMultimap) MiniAccumuloConfigImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) Path(org.apache.hadoop.fs.Path) Value(org.apache.accumulo.core.data.Value) URI(java.net.URI) ReplicationSection(org.apache.accumulo.core.metadata.schema.MetadataSchema.ReplicationSection) TextFormat(com.google.protobuf.TextFormat) EnumSet(java.util.EnumSet) Property(org.apache.accumulo.core.conf.Property) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) ServerType(org.apache.accumulo.minicluster.ServerType) StatusUtil(org.apache.accumulo.server.replication.StatusUtil) Table(org.apache.accumulo.core.client.impl.Table) ZooUtil(org.apache.accumulo.core.zookeeper.ZooUtil) ZooLock(org.apache.accumulo.fate.zookeeper.ZooLock) ZooReaderWriter(org.apache.accumulo.server.zookeeper.ZooReaderWriter) ZooCacheFactory(org.apache.accumulo.fate.zookeeper.ZooCacheFactory) Set(java.util.Set) UUID(java.util.UUID) IteratorScope(org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope) TabletsSection(org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection) Sets(com.google.common.collect.Sets) WalStateManager(org.apache.accumulo.server.log.WalStateManager) List(java.util.List) MetadataSchema(org.apache.accumulo.core.metadata.schema.MetadataSchema) Pair(org.apache.accumulo.core.util.Pair) Entry(java.util.Map.Entry) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance) UtilWaitThread.sleepUninterruptibly(org.apache.accumulo.fate.util.UtilWaitThread.sleepUninterruptibly) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) Scanner(org.apache.accumulo.core.client.Scanner) ZooKeeperInstance(org.apache.accumulo.core.client.ZooKeeperInstance) Joiner(com.google.common.base.Joiner) StatusFormatter(org.apache.accumulo.server.replication.StatusFormatter) Iterables(com.google.common.collect.Iterables) ConfigurableMacBase(org.apache.accumulo.test.functional.ConfigurableMacBase) Column(org.apache.accumulo.core.client.IteratorSetting.Column) ProtobufUtil(org.apache.accumulo.core.protobuf.ProtobufUtil) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) WalState(org.apache.accumulo.server.log.WalStateManager.WalState) ReplicationTableUtil(org.apache.accumulo.server.util.ReplicationTableUtil) Multimap(com.google.common.collect.Multimap) ReplicationTableOfflineException(org.apache.accumulo.core.replication.ReplicationTableOfflineException) Connector(org.apache.accumulo.core.client.Connector) StatusSection(org.apache.accumulo.core.replication.ReplicationSchema.StatusSection) Iterators(com.google.common.collect.Iterators) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) TablePermission(org.apache.accumulo.core.security.TablePermission) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) Key(org.apache.accumulo.core.data.Key) NoSuchElementException(java.util.NoSuchElementException) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) RawLocalFileSystem(org.apache.hadoop.fs.RawLocalFileSystem) UTF_8(java.nio.charset.StandardCharsets.UTF_8) LogColumnFamily(org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily) Test(org.junit.Test) Constants(org.apache.accumulo.core.Constants) WorkSection(org.apache.accumulo.core.replication.ReplicationSchema.WorkSection) Authorizations(org.apache.accumulo.core.security.Authorizations) AccumuloException(org.apache.accumulo.core.client.AccumuloException) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) SimpleGarbageCollector(org.apache.accumulo.gc.SimpleGarbageCollector) TableOperations(org.apache.accumulo.core.client.admin.TableOperations) Range(org.apache.accumulo.core.data.Range) TimeUnit(java.util.concurrent.TimeUnit) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Assert(org.junit.Assert) Joiner(com.google.common.base.Joiner) TableOperations(org.apache.accumulo.core.client.admin.TableOperations) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) Column(org.apache.accumulo.core.client.IteratorSetting.Column) EnumSet(java.util.EnumSet) StatusCombiner(org.apache.accumulo.server.replication.StatusCombiner) IteratorScope(org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope) Test(org.junit.Test)

Aggregations

Iterators (com.google.common.collect.Iterators)4 ArrayList (java.util.ArrayList)3 Iterator (java.util.Iterator)3 List (java.util.List)3 Map (java.util.Map)3 Set (java.util.Set)3 Arrays (java.util.Arrays)2 Comparator (java.util.Comparator)2 Entry (java.util.Map.Entry)2 Function (java.util.function.Function)2 Collectors (java.util.stream.Collectors)2 ConcurrentHashSet (alluxio.collections.ConcurrentHashSet)1 Pair (alluxio.collections.Pair)1 AbstractBlockStoreEventListener (alluxio.worker.block.AbstractBlockStoreEventListener)1 BlockMetadataManager (alluxio.worker.block.BlockMetadataManager)1 BlockStoreEventListener (alluxio.worker.block.BlockStoreEventListener)1 BlockStoreLocation (alluxio.worker.block.BlockStoreLocation)1 StorageDir (alluxio.worker.block.meta.StorageDir)1 StorageTier (alluxio.worker.block.meta.StorageTier)1 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1