Search in sources :

Example 66 with Range

use of org.apache.commons.lang3.Range in project cassandra by apache.

the class SelectStatement method loggableTokens.

private String loggableTokens(QueryOptions options) {
    if (restrictions.isKeyRange() || restrictions.usesSecondaryIndexing()) {
        AbstractBounds<PartitionPosition> bounds = restrictions.getPartitionKeyBounds(options);
        return "token range: " + (bounds.inclusiveLeft() ? '[' : '(') + bounds.left.getToken().toString() + ", " + bounds.right.getToken().toString() + (bounds.inclusiveRight() ? ']' : ')');
    } else {
        Collection<ByteBuffer> keys = restrictions.getPartitionKeys(options);
        if (keys.size() == 1) {
            return "token: " + table.partitioner.getToken(Iterables.getOnlyElement(keys)).toString();
        } else {
            StringBuilder sb = new StringBuilder("tokens: [");
            boolean isFirst = true;
            for (ByteBuffer key : keys) {
                if (!isFirst)
                    sb.append(", ");
                sb.append(table.partitioner.getToken(key).toString());
                isFirst = false;
            }
            return sb.append(']').toString();
        }
    }
}
Also used : ToStringBuilder(org.apache.commons.lang3.builder.ToStringBuilder) ByteBuffer(java.nio.ByteBuffer)

Example 67 with Range

use of org.apache.commons.lang3.Range in project Gaffer by gchq.

the class RFileReaderIterator method init.

private void init() throws IOException {
    final AccumuloTablet accumuloTablet = (AccumuloTablet) partition;
    LOGGER.info("Initialising RFileReaderIterator for files {}", StringUtils.join(accumuloTablet.getFiles(), ','));
    final AccumuloConfiguration accumuloConfiguration = SiteConfiguration.getInstance();
    // Required column families according to the configuration
    final Set<ByteSequence> requiredColumnFamilies = InputConfigurator.getFetchedColumns(AccumuloInputFormat.class, configuration).stream().map(Pair::getFirst).map(c -> new ArrayByteSequence(c.toString())).collect(Collectors.toSet());
    LOGGER.info("RFileReaderIterator will read column families of {}", StringUtils.join(requiredColumnFamilies, ','));
    // Column families
    final List<SortedKeyValueIterator<Key, Value>> iterators = new ArrayList<>();
    for (final String filename : accumuloTablet.getFiles()) {
        final Path path = new Path(filename);
        final FileSystem fs = path.getFileSystem(configuration);
        final RFile.Reader rFileReader = new RFile.Reader(new CachableBlockFile.Reader(fs, path, configuration, null, null, accumuloConfiguration));
        iterators.add(rFileReader);
    }
    mergedIterator = new MultiIterator(iterators, true);
    // Apply visibility filtering iterator
    if (null != auths) {
        final Authorizations authorizations = new Authorizations(auths.toArray(new String[auths.size()]));
        final SortedKeyValueIterator<Key, Value> visibilityFilter = VisibilityFilter.wrap(mergedIterator, authorizations, new byte[] {});
        final IteratorSetting visibilityIteratorSetting = new IteratorSetting(1, "auth", VisibilityFilter.class);
        visibilityFilter.init(mergedIterator, visibilityIteratorSetting.getOptions(), null);
        iteratorAfterIterators = visibilityFilter;
        LOGGER.info("Set authorizations to {}", authorizations);
    } else {
        iteratorAfterIterators = mergedIterator;
    }
    // Apply iterator stack
    final List<IteratorSetting> iteratorSettings = getIteratorSettings();
    iteratorSettings.sort(Comparator.comparingInt(IteratorSetting::getPriority));
    for (final IteratorSetting is : iteratorSettings) {
        iteratorAfterIterators = applyIterator(iteratorAfterIterators, is);
    }
    taskContext.addTaskCompletionListener(context -> close());
    final Range range = new Range(accumuloTablet.getStartRow(), true, accumuloTablet.getEndRow(), false);
    iteratorAfterIterators.seek(range, requiredColumnFamilies, true);
    LOGGER.info("Initialised iterator");
}
Also used : ByteSequence(org.apache.accumulo.core.data.ByteSequence) Partition(org.apache.spark.Partition) CachableBlockFile(org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile) FileSystem(org.apache.hadoop.fs.FileSystem) LoggerFactory(org.slf4j.LoggerFactory) ArrayByteSequence(org.apache.accumulo.core.data.ArrayByteSequence) SortedKeyValueIterator(org.apache.accumulo.core.iterators.SortedKeyValueIterator) StringUtils(org.apache.commons.lang3.StringUtils) ArrayList(java.util.ArrayList) Key(org.apache.accumulo.core.data.Key) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) Path(org.apache.hadoop.fs.Path) Value(org.apache.accumulo.core.data.Value) InputConfigurator(org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator) IteratorUtil(org.apache.accumulo.core.iterators.IteratorUtil) SamplerConfiguration(org.apache.accumulo.core.client.sample.SamplerConfiguration) MultiIterator(org.apache.accumulo.core.iterators.system.MultiIterator) Logger(org.slf4j.Logger) AccumuloInputFormat(org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat) TaskContext(org.apache.spark.TaskContext) Set(java.util.Set) IOException(java.io.IOException) Authorizations(org.apache.accumulo.core.security.Authorizations) Collectors(java.util.stream.Collectors) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) Range(org.apache.accumulo.core.data.Range) SiteConfiguration(org.apache.accumulo.core.conf.SiteConfiguration) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) VisibilityFilter(org.apache.accumulo.core.iterators.system.VisibilityFilter) AbstractMap(java.util.AbstractMap) List(java.util.List) RFile(org.apache.accumulo.core.file.rfile.RFile) Pair(org.apache.accumulo.core.util.Pair) Comparator(java.util.Comparator) IteratorEnvironment(org.apache.accumulo.core.iterators.IteratorEnvironment) ArrayList(java.util.ArrayList) RFile(org.apache.accumulo.core.file.rfile.RFile) FileSystem(org.apache.hadoop.fs.FileSystem) CachableBlockFile(org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) Pair(org.apache.accumulo.core.util.Pair) Path(org.apache.hadoop.fs.Path) Authorizations(org.apache.accumulo.core.security.Authorizations) MultiIterator(org.apache.accumulo.core.iterators.system.MultiIterator) SortedKeyValueIterator(org.apache.accumulo.core.iterators.SortedKeyValueIterator) Range(org.apache.accumulo.core.data.Range) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) Value(org.apache.accumulo.core.data.Value) ArrayByteSequence(org.apache.accumulo.core.data.ArrayByteSequence) ByteSequence(org.apache.accumulo.core.data.ByteSequence) ArrayByteSequence(org.apache.accumulo.core.data.ArrayByteSequence) Key(org.apache.accumulo.core.data.Key)

Example 68 with Range

use of org.apache.commons.lang3.Range in project neo4j by neo4j.

the class SimpleRandomizedIndexAccessorCompatibility method testRangeMatchInOrderOnRandomValues.

@Test
public void testRangeMatchInOrderOnRandomValues() throws Exception {
    Assume.assumeTrue("Assume support for granular composite queries", testSuite.supportsGranularCompositeQueries());
    // given
    ValueType[] types = randomSetOfSupportedAndSortableTypes();
    Set<Value> uniqueValues = new HashSet<>();
    TreeSet<ValueAndId> sortedValues = new TreeSet<>((v1, v2) -> Values.COMPARATOR.compare(v1.value, v2.value));
    MutableLong nextId = new MutableLong();
    // A couple of rounds of updates followed by lots of range verifications
    for (int i = 0; i < 5; i++) {
        List<ValueIndexEntryUpdate<?>> updates = new ArrayList<>();
        if (i == 0) {
            // The initial batch of data can simply be additions
            updates = generateUpdatesFromValues(generateValuesFromType(types, uniqueValues, 20_000), nextId);
            sortedValues.addAll(updates.stream().map(u -> new ValueAndId(u.values()[0], u.getEntityId())).collect(Collectors.toList()));
        } else {
            // Then do all sorts of updates
            for (int j = 0; j < 1_000; j++) {
                int type = random.intBetween(0, 2);
                if (type == 0) {
                    // add
                    Value value = generateUniqueRandomValue(types, uniqueValues);
                    long id = nextId.getAndIncrement();
                    sortedValues.add(new ValueAndId(value, id));
                    updates.add(add(id, descriptor.schema(), value));
                } else if (type == 1) {
                    // update
                    ValueAndId existing = random.among(sortedValues.toArray(new ValueAndId[0]));
                    sortedValues.remove(existing);
                    Value newValue = generateUniqueRandomValue(types, uniqueValues);
                    uniqueValues.remove(existing.value);
                    sortedValues.add(new ValueAndId(newValue, existing.id));
                    updates.add(change(existing.id, descriptor.schema(), existing.value, newValue));
                } else {
                    // remove
                    ValueAndId existing = random.among(sortedValues.toArray(new ValueAndId[0]));
                    sortedValues.remove(existing);
                    uniqueValues.remove(existing.value);
                    updates.add(remove(existing.id, descriptor.schema(), existing.value));
                }
            }
        }
        updateAndCommit(updates);
        verifyRandomRanges(types, sortedValues);
    }
}
Also used : ValueType(org.neo4j.values.storable.ValueType) ValueIndexEntryUpdate(org.neo4j.storageengine.api.ValueIndexEntryUpdate) ArrayList(java.util.ArrayList) MutableLong(org.apache.commons.lang3.mutable.MutableLong) TreeSet(java.util.TreeSet) Value(org.neo4j.values.storable.Value) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 69 with Range

use of org.apache.commons.lang3.Range in project neo4j by neo4j.

the class SimpleRandomizedIndexAccessorCompatibility method verifyRandomRanges.

private void verifyRandomRanges(ValueType[] types, TreeSet<ValueAndId> sortedValues) throws Exception {
    for (int i = 0; i < 100; i++) {
        // Construct a random range query of random value type
        ValueType type = random.among(types);
        Value from = random.randomValues().nextValueOfType(type);
        Value to = random.randomValues().nextValueOfType(type);
        if (Values.COMPARATOR.compare(from, to) > 0) {
            Value tmp = from;
            from = to;
            to = tmp;
        }
        boolean fromInclusive = random.nextBoolean();
        boolean toInclusive = random.nextBoolean();
        // Expected result based on query
        PropertyIndexQuery.RangePredicate<?> predicate = PropertyIndexQuery.range(0, from, fromInclusive, to, toInclusive);
        List<Long> expectedIds = expectedIds(sortedValues, from, to, fromInclusive, toInclusive);
        // Depending on order capabilities we verify ids or order and ids.
        IndexOrderCapability indexOrders = descriptor.getCapability().orderCapability(predicate.valueGroup().category());
        if (indexOrders.supportsAsc()) {
            List<Long> actualIds = assertInOrder(IndexOrder.ASCENDING, predicate);
            actualIds.sort(Long::compare);
            // then
            assertThat(actualIds).isEqualTo(expectedIds);
        }
        if (indexOrders.supportsDesc()) {
            List<Long> actualIds = assertInOrder(IndexOrder.DESCENDING, predicate);
            actualIds.sort(Long::compare);
            // then
            assertThat(actualIds).isEqualTo(expectedIds);
        }
    }
}
Also used : PropertyIndexQuery(org.neo4j.internal.kernel.api.PropertyIndexQuery) ValueType(org.neo4j.values.storable.ValueType) Value(org.neo4j.values.storable.Value) MutableLong(org.apache.commons.lang3.mutable.MutableLong) IndexOrderCapability(org.neo4j.internal.schema.IndexOrderCapability)

Example 70 with Range

use of org.apache.commons.lang3.Range in project neo4j by neo4j.

the class GBPTreeConsistencyCheckerTestBase method assertReportAnyStructuralInconsistency.

private static <KEY, VALUE> void assertReportAnyStructuralInconsistency(GBPTree<KEY, VALUE> index) throws IOException {
    MutableBoolean called = new MutableBoolean();
    index.consistencyCheck(new GBPTreeConsistencyCheckVisitor.Adaptor<>() {

        @Override
        public void rightmostNodeHasRightSibling(long rightSiblingPointer, long rightmostNode, Path file) {
            called.setTrue();
        }

        @Override
        public void siblingsDontPointToEachOther(long leftNode, long leftNodeGeneration, long leftRightSiblingPointerGeneration, long leftRightSiblingPointer, long rightLeftSiblingPointer, long rightLeftSiblingPointerGeneration, long rightNode, long rightNodeGeneration, Path file) {
            called.setTrue();
        }

        @Override
        public void keysLocatedInWrongNode(KeyRange<KEY> range, KEY key, int pos, int keyCount, long pageId, Path file) {
            called.setTrue();
        }

        @Override
        public void pageIdSeenMultipleTimes(long pageId, Path file) {
            called.setTrue();
        }

        @Override
        public void childNodeFoundAmongParentNodes(KeyRange<KEY> superRange, int level, long pageId, Path file) {
            called.setTrue();
        }
    }, NULL);
    assertCalled(called);
}
Also used : Path(java.nio.file.Path) MutableBoolean(org.apache.commons.lang3.mutable.MutableBoolean)

Aggregations

List (java.util.List)26 Map (java.util.Map)21 HashMap (java.util.HashMap)20 ArrayList (java.util.ArrayList)19 Collectors (java.util.stream.Collectors)19 StringUtils (org.apache.commons.lang3.StringUtils)16 Logger (org.slf4j.Logger)15 LoggerFactory (org.slf4j.LoggerFactory)15 Set (java.util.Set)14 Pair (org.apache.commons.lang3.tuple.Pair)14 Test (org.junit.Test)14 IOException (java.io.IOException)12 Optional (java.util.Optional)11 Range (org.apache.commons.lang3.Range)11 Date (java.util.Date)10 HashSet (java.util.HashSet)9 ExecutorService (java.util.concurrent.ExecutorService)9 Collection (java.util.Collection)8 Stream (java.util.stream.Stream)8 Lists (com.google.common.collect.Lists)7