Search in sources :

Example 81 with ArrayDeque

use of java.util.ArrayDeque in project ignite by apache.

the class GridCircularQueueTest method testQueue.

/**
     *
     */
public void testQueue() {
    GridCacheQueryManager.CircularQueue<Integer> q = new GridCacheQueryManager.CircularQueue<>(4);
    ArrayDeque<Integer> d = new ArrayDeque<>();
    for (int i = 0; i < 10; i++) {
        q.add(i);
        d.add(i);
    }
    check(q, d);
    q.remove(4);
    remove(d, 4);
    check(q, d);
    for (int i = 100; i < 110; i++) {
        q.add(i);
        d.add(i);
    }
    check(q, d);
    int size = q.size();
    q.remove(size);
    remove(d, size);
    check(q, d);
    assertEquals(0, q.size());
    GridRandom rnd = new GridRandom();
    for (int i = 0; i < 15000; i++) {
        switch(rnd.nextInt(2)) {
            case 1:
                if (q.size() > 0) {
                    int cnt = 1;
                    if (q.size() > 1)
                        cnt += rnd.nextInt(q.size() - 1);
                    q.remove(cnt);
                    remove(d, cnt);
                    break;
                }
            case 0:
                int cnt = rnd.nextInt(50);
                for (int j = 0; j < cnt; j++) {
                    int x = rnd.nextInt();
                    q.add(x);
                    d.add(x);
                }
                break;
        }
        check(q, d);
    }
}
Also used : GridRandom(org.apache.ignite.internal.util.GridRandom) ArrayDeque(java.util.ArrayDeque)

Example 82 with ArrayDeque

use of java.util.ArrayDeque in project ignite by apache.

the class HadoopIgfs20FileSystemAbstractSelfTest method testMultithreadedDelete.

/**
     * Test concurrent deletion of the same directory with advanced structure.
     *
     * @throws Exception If failed.
     */
@SuppressWarnings("TooBroadScope")
public void testMultithreadedDelete() throws Exception {
    final Path dir = new Path(new Path(primaryFsUri), "/dir");
    fs.mkdir(dir, FsPermission.getDefault(), true);
    int depth = 3;
    int entryCnt = 5;
    Deque<IgniteBiTuple<Integer, Path>> queue = new ArrayDeque<>();
    queue.add(F.t(0, dir));
    while (!queue.isEmpty()) {
        IgniteBiTuple<Integer, Path> t = queue.pollFirst();
        int curDepth = t.getKey();
        Path curPath = t.getValue();
        if (curDepth < depth) {
            int newDepth = curDepth + 1;
            // Create directories.
            for (int i = 0; i < entryCnt; i++) {
                Path subDir = new Path(curPath, "dir-" + newDepth + "-" + i);
                fs.mkdir(subDir, FsPermission.getDefault(), true);
                queue.addLast(F.t(newDepth, subDir));
            }
        } else {
            // Create files.
            for (int i = 0; i < entryCnt; i++) {
                Path file = new Path(curPath, "file " + i);
                fs.create(file, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault())).close();
            }
        }
    }
    final AtomicBoolean err = new AtomicBoolean();
    multithreaded(new Runnable() {

        @Override
        public void run() {
            try {
                U.awaitQuiet(barrier);
                fs.delete(dir, true);
            } catch (FileNotFoundException ignore) {
            // No-op.
            } catch (IOException ignore) {
                err.set(true);
            }
        }
    }, THREAD_CNT);
    // Ensure there were no errors.
    assert !err.get();
    // Ensure the directory was actually deleted.
    GridTestUtils.assertThrows(log, new Callable<Object>() {

        @Override
        public Object call() throws Exception {
            fs.getFileStatus(dir);
            return null;
        }
    }, FileNotFoundException.class, null);
}
Also used : Path(org.apache.hadoop.fs.Path) IgfsPath(org.apache.ignite.igfs.IgfsPath) IgniteBiTuple(org.apache.ignite.lang.IgniteBiTuple) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) ArrayDeque(java.util.ArrayDeque) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) FileNotFoundException(java.io.FileNotFoundException) PathExistsException(org.apache.hadoop.fs.PathExistsException) FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) IOException(java.io.IOException) PathIsNotEmptyDirectoryException(org.apache.hadoop.fs.PathIsNotEmptyDirectoryException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean)

Example 83 with ArrayDeque

use of java.util.ArrayDeque in project ignite by apache.

the class HadoopIgfs20FileSystemAbstractSelfTest method testMultithreadedMkdirs.

/**
     * Test concurrent creation of multiple directories.
     *
     * @throws Exception If failed.
     */
public void testMultithreadedMkdirs() throws Exception {
    final Path dir = new Path(new Path("igfs:///"), "/dir");
    fs.mkdir(dir, FsPermission.getDefault(), true);
    final int depth = 3;
    final int entryCnt = 5;
    final AtomicBoolean err = new AtomicBoolean();
    multithreaded(new Runnable() {

        @Override
        public void run() {
            Deque<IgniteBiTuple<Integer, Path>> queue = new ArrayDeque<>();
            queue.add(F.t(0, dir));
            U.awaitQuiet(barrier);
            while (!queue.isEmpty()) {
                IgniteBiTuple<Integer, Path> t = queue.pollFirst();
                int curDepth = t.getKey();
                Path curPath = t.getValue();
                if (curDepth <= depth) {
                    int newDepth = curDepth + 1;
                    // Create directories.
                    for (int i = 0; i < entryCnt; i++) {
                        Path subDir = new Path(curPath, "dir-" + newDepth + "-" + i);
                        try {
                            fs.mkdir(subDir, FsPermission.getDefault(), true);
                        } catch (IOException ignore) {
                            err.set(true);
                        }
                        queue.addLast(F.t(newDepth, subDir));
                    }
                }
            }
        }
    }, THREAD_CNT);
    // Ensure there were no errors.
    assert !err.get();
    // Ensure correct folders structure.
    Deque<IgniteBiTuple<Integer, Path>> queue = new ArrayDeque<>();
    queue.add(F.t(0, dir));
    while (!queue.isEmpty()) {
        IgniteBiTuple<Integer, Path> t = queue.pollFirst();
        int curDepth = t.getKey();
        Path curPath = t.getValue();
        if (curDepth <= depth) {
            int newDepth = curDepth + 1;
            // Create directories.
            for (int i = 0; i < entryCnt; i++) {
                Path subDir = new Path(curPath, "dir-" + newDepth + "-" + i);
                assertNotNull(fs.getFileStatus(subDir));
                queue.add(F.t(newDepth, subDir));
            }
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) IgfsPath(org.apache.ignite.igfs.IgfsPath) IgniteBiTuple(org.apache.ignite.lang.IgniteBiTuple) IOException(java.io.IOException) Deque(java.util.Deque) ArrayDeque(java.util.ArrayDeque) ArrayDeque(java.util.ArrayDeque) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean)

Example 84 with ArrayDeque

use of java.util.ArrayDeque in project jackrabbit-oak by apache.

the class MarkSweepGarbageCollector method sweep.

/**
     * Sweep phase of gc candidate deletion.
     * <p>
     * Performs the following steps depending upon the type of the blob store refer
     * {@link org.apache.jackrabbit.oak.plugins.blob.SharedDataStore.Type}:
     *
     * <ul>
     *     <li>Shared</li>
     *     <li>
     *     <ul>
     *      <li> Merge all marked references (from the mark phase run independently) available in the data store meta
     *          store (from all configured independent repositories).
     *      <li> Retrieve all blob ids available.
     *      <li> Diffs the 2 sets above to retrieve list of blob ids not used.
     *      <li> Deletes only blobs created after
     *          (earliest time stamp of the marked references - #maxLastModifiedInterval) from the above set.
     *     </ul>
     *     </li>
     *
     *     <li>Default</li>
     *     <li>
     *     <ul>
     *      <li> Mark phase already run.
     *      <li> Retrieve all blob ids available.
     *      <li> Diffs the 2 sets above to retrieve list of blob ids not used.
     *      <li> Deletes only blobs created after
     *          (time stamp of the marked references - #maxLastModifiedInterval).
     *     </ul>
     *     </li>
     * </ul>
     *
     * @return the number of blobs deleted
     * @throws Exception the exception
     * @param fs the garbage collector file state
     * @param markStart the start time of mark to take as reference for deletion
     * @param forceBlobRetrieve
     */
protected long sweep(GarbageCollectorFileState fs, long markStart, boolean forceBlobRetrieve) throws Exception {
    long earliestRefAvailTime;
    // Only go ahead if merge succeeded
    try {
        earliestRefAvailTime = GarbageCollectionType.get(blobStore).mergeAllMarkedReferences(blobStore, fs);
        LOG.debug("Earliest reference available for timestamp [{}]", earliestRefAvailTime);
        earliestRefAvailTime = (earliestRefAvailTime < markStart ? earliestRefAvailTime : markStart);
    } catch (Exception e) {
        return 0;
    }
    // Find all blob references after iterating over the whole repository
    (new BlobIdRetriever(fs, forceBlobRetrieve)).call();
    // Calculate the references not used
    difference(fs);
    long count = 0;
    long deleted = 0;
    long lastMaxModifiedTime = getLastMaxModifiedTime(earliestRefAvailTime);
    LOG.debug("Starting sweep phase of the garbage collector");
    LOG.debug("Sweeping blobs with modified time > than the configured max deleted time ({}). ", timestampToString(lastMaxModifiedTime));
    BufferedWriter removesWriter = null;
    LineIterator iterator = null;
    try {
        removesWriter = Files.newWriter(fs.getGarbage(), Charsets.UTF_8);
        ArrayDeque<String> removesQueue = new ArrayDeque<String>();
        iterator = FileUtils.lineIterator(fs.getGcCandidates(), Charsets.UTF_8.name());
        Iterator<List<String>> partitions = Iterators.partition(iterator, getBatchCount());
        while (partitions.hasNext()) {
            List<String> ids = partitions.next();
            count += ids.size();
            deleted += BlobCollectionType.get(blobStore).sweepInternal(blobStore, ids, removesQueue, lastMaxModifiedTime);
            saveBatchToFile(newArrayList(removesQueue), removesWriter);
            removesQueue.clear();
        }
    } finally {
        LineIterator.closeQuietly(iterator);
        closeQuietly(removesWriter);
    }
    BlobCollectionType.get(blobStore).handleRemoves(blobStore, fs.getGarbage());
    if (count != deleted) {
        LOG.warn("Deleted only [{}] blobs entries from the [{}] candidates identified. This may happen if blob " + "modified time is > " + "than the max deleted time ({})", deleted, count, timestampToString(lastMaxModifiedTime));
    }
    // Remove all the merged marked references
    GarbageCollectionType.get(blobStore).removeAllMarkedReferences(blobStore);
    LOG.debug("Ending sweep phase of the garbage collector");
    return deleted;
}
Also used : List(java.util.List) Lists.newArrayList(com.google.common.collect.Lists.newArrayList) LineIterator(org.apache.commons.io.LineIterator) DataStoreException(org.apache.jackrabbit.core.data.DataStoreException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) ArrayDeque(java.util.ArrayDeque) BufferedWriter(java.io.BufferedWriter)

Example 85 with ArrayDeque

use of java.util.ArrayDeque in project jackrabbit-oak by apache.

the class FunctionIndexProcessor method tryCalculateValue.

/**
     * Try to calculate the value for the given function code.
     * 
     * @param path the path of the node
     * @param state the node state
     * @param functionCode the tokens, for example ["function", "lower", "@name"]
     * @return null, or the calculated value
     */
public static PropertyState tryCalculateValue(String path, NodeState state, String[] functionCode) {
    Deque<PropertyState> stack = new ArrayDeque<PropertyState>();
    for (int i = functionCode.length - 1; i > 0; i--) {
        String token = functionCode[i];
        PropertyState ps;
        if (token.startsWith("@")) {
            String propertyName = token.substring(1);
            ps = getProperty(path, state, propertyName);
        } else {
            ps = calculateFunction(token, stack);
        }
        if (ps == null) {
            // currently, all operations involving null return null
            return null;
        }
        stack.push(ps);
    }
    return stack.pop();
}
Also used : ArrayDeque(java.util.ArrayDeque) PropertyState(org.apache.jackrabbit.oak.api.PropertyState)

Aggregations

ArrayDeque (java.util.ArrayDeque)195 Test (org.junit.Test)35 ArrayList (java.util.ArrayList)28 IOException (java.io.IOException)25 HashMap (java.util.HashMap)17 HashSet (java.util.HashSet)17 List (java.util.List)15 Map (java.util.Map)13 Iterator (java.util.Iterator)10 NoSuchElementException (java.util.NoSuchElementException)8 Random (java.util.Random)7 Deque (java.util.Deque)6 BooleanSupplier (io.reactivex.functions.BooleanSupplier)5 BooleanSubscription (io.reactivex.internal.subscriptions.BooleanSubscription)5 ByteBuffer (java.nio.ByteBuffer)5 AtomicLong (java.util.concurrent.atomic.AtomicLong)5 AtomicReference (java.util.concurrent.atomic.AtomicReference)5 HttpFields (org.eclipse.jetty.http.HttpFields)5 Name (com.github.anba.es6draft.ast.scope.Name)4 ExecutionContext (com.github.anba.es6draft.runtime.ExecutionContext)4