use of java.util.ArrayDeque in project ignite by apache.
the class GridCircularQueueTest method testQueue.
/**
*
*/
public void testQueue() {
GridCacheQueryManager.CircularQueue<Integer> q = new GridCacheQueryManager.CircularQueue<>(4);
ArrayDeque<Integer> d = new ArrayDeque<>();
for (int i = 0; i < 10; i++) {
q.add(i);
d.add(i);
}
check(q, d);
q.remove(4);
remove(d, 4);
check(q, d);
for (int i = 100; i < 110; i++) {
q.add(i);
d.add(i);
}
check(q, d);
int size = q.size();
q.remove(size);
remove(d, size);
check(q, d);
assertEquals(0, q.size());
GridRandom rnd = new GridRandom();
for (int i = 0; i < 15000; i++) {
switch(rnd.nextInt(2)) {
case 1:
if (q.size() > 0) {
int cnt = 1;
if (q.size() > 1)
cnt += rnd.nextInt(q.size() - 1);
q.remove(cnt);
remove(d, cnt);
break;
}
case 0:
int cnt = rnd.nextInt(50);
for (int j = 0; j < cnt; j++) {
int x = rnd.nextInt();
q.add(x);
d.add(x);
}
break;
}
check(q, d);
}
}
use of java.util.ArrayDeque in project ignite by apache.
the class HadoopIgfs20FileSystemAbstractSelfTest method testMultithreadedDelete.
/**
* Test concurrent deletion of the same directory with advanced structure.
*
* @throws Exception If failed.
*/
@SuppressWarnings("TooBroadScope")
public void testMultithreadedDelete() throws Exception {
final Path dir = new Path(new Path(primaryFsUri), "/dir");
fs.mkdir(dir, FsPermission.getDefault(), true);
int depth = 3;
int entryCnt = 5;
Deque<IgniteBiTuple<Integer, Path>> queue = new ArrayDeque<>();
queue.add(F.t(0, dir));
while (!queue.isEmpty()) {
IgniteBiTuple<Integer, Path> t = queue.pollFirst();
int curDepth = t.getKey();
Path curPath = t.getValue();
if (curDepth < depth) {
int newDepth = curDepth + 1;
// Create directories.
for (int i = 0; i < entryCnt; i++) {
Path subDir = new Path(curPath, "dir-" + newDepth + "-" + i);
fs.mkdir(subDir, FsPermission.getDefault(), true);
queue.addLast(F.t(newDepth, subDir));
}
} else {
// Create files.
for (int i = 0; i < entryCnt; i++) {
Path file = new Path(curPath, "file " + i);
fs.create(file, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault())).close();
}
}
}
final AtomicBoolean err = new AtomicBoolean();
multithreaded(new Runnable() {
@Override
public void run() {
try {
U.awaitQuiet(barrier);
fs.delete(dir, true);
} catch (FileNotFoundException ignore) {
// No-op.
} catch (IOException ignore) {
err.set(true);
}
}
}, THREAD_CNT);
// Ensure there were no errors.
assert !err.get();
// Ensure the directory was actually deleted.
GridTestUtils.assertThrows(log, new Callable<Object>() {
@Override
public Object call() throws Exception {
fs.getFileStatus(dir);
return null;
}
}, FileNotFoundException.class, null);
}
use of java.util.ArrayDeque in project ignite by apache.
the class HadoopIgfs20FileSystemAbstractSelfTest method testMultithreadedMkdirs.
/**
* Test concurrent creation of multiple directories.
*
* @throws Exception If failed.
*/
public void testMultithreadedMkdirs() throws Exception {
final Path dir = new Path(new Path("igfs:///"), "/dir");
fs.mkdir(dir, FsPermission.getDefault(), true);
final int depth = 3;
final int entryCnt = 5;
final AtomicBoolean err = new AtomicBoolean();
multithreaded(new Runnable() {
@Override
public void run() {
Deque<IgniteBiTuple<Integer, Path>> queue = new ArrayDeque<>();
queue.add(F.t(0, dir));
U.awaitQuiet(barrier);
while (!queue.isEmpty()) {
IgniteBiTuple<Integer, Path> t = queue.pollFirst();
int curDepth = t.getKey();
Path curPath = t.getValue();
if (curDepth <= depth) {
int newDepth = curDepth + 1;
// Create directories.
for (int i = 0; i < entryCnt; i++) {
Path subDir = new Path(curPath, "dir-" + newDepth + "-" + i);
try {
fs.mkdir(subDir, FsPermission.getDefault(), true);
} catch (IOException ignore) {
err.set(true);
}
queue.addLast(F.t(newDepth, subDir));
}
}
}
}
}, THREAD_CNT);
// Ensure there were no errors.
assert !err.get();
// Ensure correct folders structure.
Deque<IgniteBiTuple<Integer, Path>> queue = new ArrayDeque<>();
queue.add(F.t(0, dir));
while (!queue.isEmpty()) {
IgniteBiTuple<Integer, Path> t = queue.pollFirst();
int curDepth = t.getKey();
Path curPath = t.getValue();
if (curDepth <= depth) {
int newDepth = curDepth + 1;
// Create directories.
for (int i = 0; i < entryCnt; i++) {
Path subDir = new Path(curPath, "dir-" + newDepth + "-" + i);
assertNotNull(fs.getFileStatus(subDir));
queue.add(F.t(newDepth, subDir));
}
}
}
}
use of java.util.ArrayDeque in project jackrabbit-oak by apache.
the class MarkSweepGarbageCollector method sweep.
/**
* Sweep phase of gc candidate deletion.
* <p>
* Performs the following steps depending upon the type of the blob store refer
* {@link org.apache.jackrabbit.oak.plugins.blob.SharedDataStore.Type}:
*
* <ul>
* <li>Shared</li>
* <li>
* <ul>
* <li> Merge all marked references (from the mark phase run independently) available in the data store meta
* store (from all configured independent repositories).
* <li> Retrieve all blob ids available.
* <li> Diffs the 2 sets above to retrieve list of blob ids not used.
* <li> Deletes only blobs created after
* (earliest time stamp of the marked references - #maxLastModifiedInterval) from the above set.
* </ul>
* </li>
*
* <li>Default</li>
* <li>
* <ul>
* <li> Mark phase already run.
* <li> Retrieve all blob ids available.
* <li> Diffs the 2 sets above to retrieve list of blob ids not used.
* <li> Deletes only blobs created after
* (time stamp of the marked references - #maxLastModifiedInterval).
* </ul>
* </li>
* </ul>
*
* @return the number of blobs deleted
* @throws Exception the exception
* @param fs the garbage collector file state
* @param markStart the start time of mark to take as reference for deletion
* @param forceBlobRetrieve
*/
protected long sweep(GarbageCollectorFileState fs, long markStart, boolean forceBlobRetrieve) throws Exception {
long earliestRefAvailTime;
// Only go ahead if merge succeeded
try {
earliestRefAvailTime = GarbageCollectionType.get(blobStore).mergeAllMarkedReferences(blobStore, fs);
LOG.debug("Earliest reference available for timestamp [{}]", earliestRefAvailTime);
earliestRefAvailTime = (earliestRefAvailTime < markStart ? earliestRefAvailTime : markStart);
} catch (Exception e) {
return 0;
}
// Find all blob references after iterating over the whole repository
(new BlobIdRetriever(fs, forceBlobRetrieve)).call();
// Calculate the references not used
difference(fs);
long count = 0;
long deleted = 0;
long lastMaxModifiedTime = getLastMaxModifiedTime(earliestRefAvailTime);
LOG.debug("Starting sweep phase of the garbage collector");
LOG.debug("Sweeping blobs with modified time > than the configured max deleted time ({}). ", timestampToString(lastMaxModifiedTime));
BufferedWriter removesWriter = null;
LineIterator iterator = null;
try {
removesWriter = Files.newWriter(fs.getGarbage(), Charsets.UTF_8);
ArrayDeque<String> removesQueue = new ArrayDeque<String>();
iterator = FileUtils.lineIterator(fs.getGcCandidates(), Charsets.UTF_8.name());
Iterator<List<String>> partitions = Iterators.partition(iterator, getBatchCount());
while (partitions.hasNext()) {
List<String> ids = partitions.next();
count += ids.size();
deleted += BlobCollectionType.get(blobStore).sweepInternal(blobStore, ids, removesQueue, lastMaxModifiedTime);
saveBatchToFile(newArrayList(removesQueue), removesWriter);
removesQueue.clear();
}
} finally {
LineIterator.closeQuietly(iterator);
closeQuietly(removesWriter);
}
BlobCollectionType.get(blobStore).handleRemoves(blobStore, fs.getGarbage());
if (count != deleted) {
LOG.warn("Deleted only [{}] blobs entries from the [{}] candidates identified. This may happen if blob " + "modified time is > " + "than the max deleted time ({})", deleted, count, timestampToString(lastMaxModifiedTime));
}
// Remove all the merged marked references
GarbageCollectionType.get(blobStore).removeAllMarkedReferences(blobStore);
LOG.debug("Ending sweep phase of the garbage collector");
return deleted;
}
use of java.util.ArrayDeque in project jackrabbit-oak by apache.
the class FunctionIndexProcessor method tryCalculateValue.
/**
* Try to calculate the value for the given function code.
*
* @param path the path of the node
* @param state the node state
* @param functionCode the tokens, for example ["function", "lower", "@name"]
* @return null, or the calculated value
*/
public static PropertyState tryCalculateValue(String path, NodeState state, String[] functionCode) {
Deque<PropertyState> stack = new ArrayDeque<PropertyState>();
for (int i = functionCode.length - 1; i > 0; i--) {
String token = functionCode[i];
PropertyState ps;
if (token.startsWith("@")) {
String propertyName = token.substring(1);
ps = getProperty(path, state, propertyName);
} else {
ps = calculateFunction(token, stack);
}
if (ps == null) {
// currently, all operations involving null return null
return null;
}
stack.push(ps);
}
return stack.pop();
}
Aggregations