use of org.apache.ignite.lang.IgniteBiTuple in project ignite by apache.
the class IgfsAbstractSelfTest method checkDeadlocks.
/**
* Check deadlocks by creating complex directories structure and then executing chaotic operations on it. A lot of
* exception are expected here. We are not interested in them. Instead, we want to ensure that no deadlocks occur
* during execution.
*
* @param lvlCnt Total levels in folder hierarchy.
* @param childrenDirPerLvl How many children directories to create per level.
* @param childrenFilePerLvl How many children file to create per level.
* @param primaryLvlCnt How many levels will exist in the primary file system before check start.
* @param renCnt How many renames to perform.
* @param delCnt How many deletes to perform.
* @param updateCnt How many updates to perform.
* @param mkdirsCnt How many directory creations to perform.
* @param createCnt How many file creations to perform.
* @throws Exception If failed.
*/
@SuppressWarnings("ConstantConditions")
private void checkDeadlocks(final int lvlCnt, final int childrenDirPerLvl, final int childrenFilePerLvl, int primaryLvlCnt, int renCnt, int delCnt, int updateCnt, int mkdirsCnt, int createCnt) throws Exception {
assert childrenDirPerLvl > 0;
// First define file system structure.
final Map<Integer, List<IgfsPath>> dirPaths = new HashMap<>();
final Map<Integer, List<IgfsPath>> filePaths = new HashMap<>();
Queue<IgniteBiTuple<Integer, IgfsPath>> queue = new ArrayDeque<>();
// Add root directory.
queue.add(F.t(0, IgfsPath.ROOT));
while (!queue.isEmpty()) {
IgniteBiTuple<Integer, IgfsPath> entry = queue.poll();
int lvl = entry.getKey();
if (lvl < lvlCnt) {
int newLvl = lvl + 1;
for (int i = 0; i < childrenDirPerLvl; i++) {
IgfsPath path = new IgfsPath(entry.getValue(), "dir-" + newLvl + "-" + i);
queue.add(F.t(newLvl, path));
if (!dirPaths.containsKey(newLvl))
dirPaths.put(newLvl, new ArrayList<IgfsPath>());
dirPaths.get(newLvl).add(path);
}
for (int i = 0; i < childrenFilePerLvl; i++) {
IgfsPath path = new IgfsPath(entry.getValue(), "file-" + newLvl + "-" + i);
if (!filePaths.containsKey(newLvl))
filePaths.put(newLvl, new ArrayList<IgfsPath>());
filePaths.get(newLvl).add(path);
}
}
}
// Now as we have all paths defined, plan operations on them.
final Random rand = new Random(SEED);
final int totalOpCnt = renCnt + delCnt + updateCnt + mkdirsCnt + createCnt;
if (totalOpCnt == 0)
throw new RuntimeException("Operations count is zero.");
final CyclicBarrier barrier = new CyclicBarrier(totalOpCnt);
Collection<Thread> threads = new ArrayList<>(totalOpCnt);
// Renames.
for (int i = 0; i < renCnt; i++) {
Runnable r = new Runnable() {
@Override
public void run() {
try {
int fromLvl = rand.nextInt(lvlCnt) + 1;
int toLvl = rand.nextInt(lvlCnt) + 1;
List<IgfsPath> fromPaths;
List<IgfsPath> toPaths;
if (rand.nextInt(childrenDirPerLvl + childrenFilePerLvl) < childrenDirPerLvl) {
// Rename directories.
fromPaths = dirPaths.get(fromLvl);
toPaths = dirPaths.get(toLvl);
} else {
// Rename files.
fromPaths = filePaths.get(fromLvl);
toPaths = filePaths.get(toLvl);
}
IgfsPath fromPath = fromPaths.get(rand.nextInt(fromPaths.size()));
IgfsPath toPath = toPaths.get(rand.nextInt(toPaths.size()));
U.awaitQuiet(barrier);
igfs.rename(fromPath, toPath);
} catch (IgniteException ignore) {
// No-op.
}
}
};
threads.add(new Thread(r));
}
// Deletes.
for (int i = 0; i < delCnt; i++) {
Runnable r = new Runnable() {
@Override
public void run() {
try {
int lvl = rand.nextInt(lvlCnt) + 1;
IgfsPath path = rand.nextInt(childrenDirPerLvl + childrenFilePerLvl) < childrenDirPerLvl ? dirPaths.get(lvl).get(rand.nextInt(dirPaths.get(lvl).size())) : filePaths.get(lvl).get(rand.nextInt(filePaths.get(lvl).size()));
U.awaitQuiet(barrier);
igfs.delete(path, true);
} catch (IgniteException ignore) {
// No-op.
}
}
};
threads.add(new Thread(r));
}
// Updates.
for (int i = 0; i < updateCnt; i++) {
Runnable r = new Runnable() {
@Override
public void run() {
try {
int lvl = rand.nextInt(lvlCnt) + 1;
IgfsPath path = rand.nextInt(childrenDirPerLvl + childrenFilePerLvl) < childrenDirPerLvl ? dirPaths.get(lvl).get(rand.nextInt(dirPaths.get(lvl).size())) : filePaths.get(lvl).get(rand.nextInt(filePaths.get(lvl).size()));
U.awaitQuiet(barrier);
igfs.update(path, properties("owner", "group", null));
} catch (IgniteException ignore) {
// No-op.
}
}
};
threads.add(new Thread(r));
}
// Directory creations.
final AtomicInteger dirCtr = new AtomicInteger();
for (int i = 0; i < mkdirsCnt; i++) {
Runnable r = new Runnable() {
@Override
public void run() {
try {
int lvl = rand.nextInt(lvlCnt) + 1;
IgfsPath parentPath = dirPaths.get(lvl).get(rand.nextInt(dirPaths.get(lvl).size()));
IgfsPath path = new IgfsPath(parentPath, "newDir-" + dirCtr.incrementAndGet());
U.awaitQuiet(barrier);
igfs.mkdirs(path);
} catch (IgniteException ignore) {
// No-op.
}
}
};
threads.add(new Thread(r));
}
// File creations.
final AtomicInteger fileCtr = new AtomicInteger();
for (int i = 0; i < createCnt; i++) {
Runnable r = new Runnable() {
@Override
public void run() {
try {
int lvl = rand.nextInt(lvlCnt) + 1;
IgfsPath parentPath = dirPaths.get(lvl).get(rand.nextInt(dirPaths.get(lvl).size()));
IgfsPath path = new IgfsPath(parentPath, "newFile-" + fileCtr.incrementAndGet());
U.awaitQuiet(barrier);
IgfsOutputStream os = null;
try {
os = igfs.create(path, true);
os.write(chunk);
} finally {
U.closeQuiet(os);
}
} catch (IOException | IgniteException ignore) {
// No-op.
}
}
};
threads.add(new Thread(r));
}
// Create file/directory structure.
for (int i = 0; i < lvlCnt; i++) {
int lvl = i + 1;
boolean targetToPrimary = !dual || lvl <= primaryLvlCnt;
IgfsPath[] dirs = dirPaths.get(lvl).toArray(new IgfsPath[dirPaths.get(lvl).size()]);
IgfsPath[] files = filePaths.get(lvl).toArray(new IgfsPath[filePaths.get(lvl).size()]);
if (targetToPrimary)
create(igfs, dirs, files);
else
create(igfsSecondary, dirs, files);
}
// Start all threads and wait for them to finish.
for (Thread thread : threads) thread.start();
U.joinThreads(threads, null);
}
use of org.apache.ignite.lang.IgniteBiTuple in project ignite by apache.
the class IgfsNearOnlyMultiNodeSelfTest method testContentsConsistency.
/**
* @throws Exception If failed.
*/
public void testContentsConsistency() throws Exception {
try (FileSystem fs = FileSystem.get(getFileSystemURI(0), getFileSystemConfig())) {
Collection<IgniteBiTuple<String, Long>> files = F.asList(F.t("/dir1/dir2/file1", 1024L), F.t("/dir1/dir2/file2", 8 * 1024L), F.t("/dir1/file1", 1024 * 1024L), F.t("/dir1/file2", 5 * 1024 * 1024L), F.t("/file1", 64 * 1024L + 13), F.t("/file2", 13L), F.t("/file3", 123764L));
for (IgniteBiTuple<String, Long> file : files) {
info("Writing file: " + file.get1());
try (OutputStream os = fs.create(new Path(file.get1()), (short) 3)) {
byte[] data = new byte[file.get2().intValue()];
data[0] = 25;
data[data.length - 1] = 26;
os.write(data);
}
info("Finished writing file: " + file.get1());
}
for (int i = 1; i < nodeCount(); i++) {
try (FileSystem ignored = FileSystem.get(getFileSystemURI(i), getFileSystemConfig())) {
for (IgniteBiTuple<String, Long> file : files) {
Path path = new Path(file.get1());
FileStatus fileStatus = fs.getFileStatus(path);
assertEquals(file.get2(), (Long) fileStatus.getLen());
byte[] read = new byte[file.get2().intValue()];
info("Reading file: " + path);
try (FSDataInputStream in = fs.open(path)) {
in.readFully(read);
assert read[0] == 25;
assert read[read.length - 1] == 26;
}
info("Finished reading file: " + path);
}
}
}
}
}
use of org.apache.ignite.lang.IgniteBiTuple in project ignite by apache.
the class IgniteHadoopFileSystemAbstractSelfTest method testMultithreadedDelete.
/**
* Test concurrent deletion of the same directory with advanced structure.
*
* @throws Exception If failed.
*/
@SuppressWarnings("TooBroadScope")
public void testMultithreadedDelete() throws Exception {
final Path dir = new Path(new Path(PRIMARY_URI), "/dir");
assert fs.mkdirs(dir);
int depth = 3;
int entryCnt = 5;
Deque<IgniteBiTuple<Integer, Path>> queue = new ArrayDeque<>();
queue.add(F.t(0, dir));
while (!queue.isEmpty()) {
IgniteBiTuple<Integer, Path> t = queue.pollFirst();
int curDepth = t.getKey();
Path curPath = t.getValue();
if (curDepth < depth) {
int newDepth = curDepth + 1;
// Create directories.
for (int i = 0; i < entryCnt; i++) {
Path subDir = new Path(curPath, "dir-" + newDepth + "-" + i);
fs.mkdirs(subDir);
queue.addLast(F.t(newDepth, subDir));
}
} else {
// Create files.
for (int i = 0; i < entryCnt; i++) {
Path file = new Path(curPath, "file " + i);
fs.create(file).close();
}
}
}
final AtomicBoolean err = new AtomicBoolean();
multithreaded(new Runnable() {
@Override
public void run() {
try {
U.awaitQuiet(barrier);
fs.delete(dir, true);
} catch (IOException ignore) {
err.set(true);
}
}
}, THREAD_CNT);
// Ensure there were no errors.
assert !err.get();
assert GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
try {
return !fs.exists(dir);
} catch (IOException e) {
throw new AssertionError(e);
}
}
}, 5000L);
}
use of org.apache.ignite.lang.IgniteBiTuple in project ignite by apache.
the class IgniteHadoopFileSystemAbstractSelfTest method testMultithreadedMkdirs.
/**
* Test concurrent creation of multiple directories.
*
* @throws Exception If failed.
*/
public void testMultithreadedMkdirs() throws Exception {
final Path dir = new Path(new Path(PRIMARY_URI), "/dir");
assert fs.mkdirs(dir);
final int depth = 3;
final int entryCnt = 5;
final AtomicReference<IOException> err = new AtomicReference();
multithreaded(new Runnable() {
@Override
public void run() {
Deque<IgniteBiTuple<Integer, Path>> queue = new ArrayDeque<>();
queue.add(F.t(0, dir));
U.awaitQuiet(barrier);
while (!queue.isEmpty()) {
IgniteBiTuple<Integer, Path> t = queue.pollFirst();
int curDepth = t.getKey();
Path curPath = t.getValue();
if (curDepth <= depth) {
int newDepth = curDepth + 1;
// Create directories.
for (int i = 0; i < entryCnt; i++) {
Path subDir = new Path(curPath, "dir-" + newDepth + "-" + i);
try {
if (fs.mkdirs(subDir))
queue.addLast(F.t(newDepth, subDir));
} catch (IOException e) {
err.compareAndSet(null, e);
}
}
}
}
}
}, THREAD_CNT);
// Ensure there were no errors.
assert err.get() == null : err.get();
// Ensure correct folders structure.
Deque<IgniteBiTuple<Integer, Path>> queue = new ArrayDeque<>();
queue.add(F.t(0, dir));
while (!queue.isEmpty()) {
IgniteBiTuple<Integer, Path> t = queue.pollFirst();
int curDepth = t.getKey();
Path curPath = t.getValue();
if (curDepth <= depth) {
int newDepth = curDepth + 1;
// Create directories.
for (int i = 0; i < entryCnt; i++) {
Path subDir = new Path(curPath, "dir-" + newDepth + "-" + i);
assert fs.exists(subDir) : "Expected directory doesn't exist: " + subDir;
queue.add(F.t(newDepth, subDir));
}
}
}
}
use of org.apache.ignite.lang.IgniteBiTuple in project ignite by apache.
the class HadoopExternalTaskExecutor method startProcess.
/**
* Starts process template that will be ready to execute Hadoop tasks.
*
* @param job Job instance.
* @param plan Map reduce plan.
*/
private HadoopProcess startProcess(final HadoopJobEx job, final HadoopMapReducePlan plan) {
final UUID childProcId = UUID.randomUUID();
HadoopJobId jobId = job.id();
final HadoopProcessFuture fut = new HadoopProcessFuture(childProcId, jobId);
final HadoopProcess proc = new HadoopProcess(jobId, fut, plan.reducers(ctx.localNodeId()));
HadoopProcess old = runningProcsByJobId.put(jobId, proc);
assert old == null;
old = runningProcsByProcId.put(childProcId, proc);
assert old == null;
ctx.kernalContext().closure().runLocalSafe(new Runnable() {
@Override
public void run() {
if (!busyLock.tryReadLock()) {
fut.onDone(new IgniteCheckedException("Failed to start external process (grid is stopping)."));
return;
}
try {
HadoopExternalTaskMetadata startMeta = buildTaskMeta();
if (log.isDebugEnabled())
log.debug("Created hadoop child process metadata for job [job=" + job + ", childProcId=" + childProcId + ", taskMeta=" + startMeta + ']');
Process proc = startJavaProcess(childProcId, startMeta, job, ctx.kernalContext().config().getWorkDirectory());
BufferedReader rdr = new BufferedReader(new InputStreamReader(proc.getInputStream()));
String line;
// Read up all the process output.
while ((line = rdr.readLine()) != null) {
if (log.isDebugEnabled())
log.debug("Tracing process output: " + line);
if ("Started".equals(line)) {
// Process started successfully, it should not write anything more to the output stream.
if (log.isDebugEnabled())
log.debug("Successfully started child process [childProcId=" + childProcId + ", meta=" + job + ']');
fut.onProcessStarted(proc);
break;
} else if ("Failed".equals(line)) {
StringBuilder sb = new StringBuilder("Failed to start child process: " + job + "\n");
while ((line = rdr.readLine()) != null) sb.append(" ").append(line).append("\n");
// Cut last character.
sb.setLength(sb.length() - 1);
log.warning(sb.toString());
fut.onDone(new IgniteCheckedException(sb.toString()));
break;
}
}
} catch (Throwable e) {
fut.onDone(new IgniteCheckedException("Failed to initialize child process: " + job, e));
if (e instanceof Error)
throw (Error) e;
} finally {
busyLock.readUnlock();
}
}
}, true);
fut.listen(new CI1<IgniteInternalFuture<IgniteBiTuple<Process, HadoopProcessDescriptor>>>() {
@Override
public void apply(IgniteInternalFuture<IgniteBiTuple<Process, HadoopProcessDescriptor>> f) {
try {
// Make sure there were no exceptions.
f.get();
prepareForJob(proc, job, plan);
} catch (IgniteCheckedException ignore) {
// Exception is printed in future's onDone() method.
}
}
});
return proc;
}
Aggregations