use of org.apache.ignite.internal.processors.hadoop.HadoopJobInfo in project ignite by apache.
the class HadoopConcurrentHashMultimapSelftest method testMapSimple.
/**
*/
public void testMapSimple() throws Exception {
GridUnsafeMemory mem = new GridUnsafeMemory(0);
// mem.listen(new GridOffHeapEventListener() {
// @Override public void onEvent(GridOffHeapEvent evt) {
// if (evt == GridOffHeapEvent.ALLOCATE)
// U.dumpStack();
// }
// });
Random rnd = new Random();
int mapSize = 16 << rnd.nextInt(3);
HadoopJobInfo job = new JobInfo();
HadoopTaskContext taskCtx = new TaskContext();
HadoopConcurrentHashMultimap m = new HadoopConcurrentHashMultimap(job, mem, mapSize);
HadoopConcurrentHashMultimap.Adder a = m.startAdding(taskCtx);
Multimap<Integer, Integer> mm = ArrayListMultimap.create();
Multimap<Integer, Integer> vis = ArrayListMultimap.create();
for (int i = 0, vals = 4 * mapSize + rnd.nextInt(25); i < vals; i++) {
int key = rnd.nextInt(mapSize);
int val = rnd.nextInt();
a.write(new IntWritable(key), new IntWritable(val));
mm.put(key, val);
X.println("k: " + key + " v: " + val);
a.close();
check(m, mm, vis, taskCtx);
a = m.startAdding(taskCtx);
}
// a.add(new IntWritable(10), new IntWritable(2));
// mm.put(10, 2);
// check(m, mm);
a.close();
X.println("Alloc: " + mem.allocatedSize());
m.close();
assertEquals(0, mem.allocatedSize());
}
use of org.apache.ignite.internal.processors.hadoop.HadoopJobInfo in project ignite by apache.
the class HadoopConcurrentHashMultimapSelftest method testMultiThreaded.
/**
* @throws Exception if failed.
*/
public void testMultiThreaded() throws Exception {
GridUnsafeMemory mem = new GridUnsafeMemory(0);
X.println("___ Started");
Random rnd = new GridRandom();
for (int i = 0; i < 20; i++) {
HadoopJobInfo job = new JobInfo();
final HadoopTaskContext taskCtx = new TaskContext();
final HadoopConcurrentHashMultimap m = new HadoopConcurrentHashMultimap(job, mem, 16);
final ConcurrentMap<Integer, Collection<Integer>> mm = new ConcurrentHashMap<>();
X.println("___ MT");
multithreaded(new Callable<Object>() {
@Override
public Object call() throws Exception {
X.println("___ TH in");
Random rnd = new GridRandom();
IntWritable key = new IntWritable();
IntWritable val = new IntWritable();
HadoopMultimap.Adder a = m.startAdding(taskCtx);
for (int i = 0; i < 50000; i++) {
int k = rnd.nextInt(32000);
int v = rnd.nextInt();
key.set(k);
val.set(v);
a.write(key, val);
Collection<Integer> list = mm.get(k);
if (list == null) {
list = new ConcurrentLinkedQueue<>();
Collection<Integer> old = mm.putIfAbsent(k, list);
if (old != null)
list = old;
}
list.add(v);
}
a.close();
X.println("___ TH out");
return null;
}
}, 3 + rnd.nextInt(27));
X.println("___ Check: " + m.capacity());
assertEquals(mm.size(), m.keys());
assertTrue(m.capacity() > 32000);
HadoopTaskInput in = m.input(taskCtx);
while (in.next()) {
IntWritable key = (IntWritable) in.key();
Iterator<?> valsIter = in.values();
Collection<Integer> vals = mm.remove(key.get());
assertNotNull(vals);
while (valsIter.hasNext()) {
IntWritable val = (IntWritable) valsIter.next();
assertTrue(vals.remove(val.get()));
}
assertTrue(vals.isEmpty());
}
in.close();
m.close();
assertEquals(0, mem.allocatedSize());
}
}
use of org.apache.ignite.internal.processors.hadoop.HadoopJobInfo in project ignite by apache.
the class HadoopSkipListSelfTest method testMapSimple.
/**
* @throws Exception On error.
*/
public void testMapSimple() throws Exception {
GridUnsafeMemory mem = new GridUnsafeMemory(0);
// mem.listen(new GridOffHeapEventListener() {
// @Override public void onEvent(GridOffHeapEvent evt) {
// if (evt == GridOffHeapEvent.ALLOCATE)
// U.dumpStack();
// }
// });
Random rnd = new Random();
int mapSize = 16 << rnd.nextInt(6);
HadoopJobInfo job = new JobInfo();
HadoopTaskContext taskCtx = new TaskContext();
HadoopMultimap m = new HadoopSkipList(job, mem);
HadoopMultimap.Adder a = m.startAdding(taskCtx);
Multimap<Integer, Integer> mm = ArrayListMultimap.create();
Multimap<Integer, Integer> vis = ArrayListMultimap.create();
for (int i = 0, vals = 4 * mapSize + rnd.nextInt(25); i < vals; i++) {
int key = rnd.nextInt(mapSize);
int val = rnd.nextInt();
a.write(new IntWritable(key), new IntWritable(val));
mm.put(key, val);
X.println("k: " + key + " v: " + val);
a.close();
check(m, mm, vis, taskCtx);
a = m.startAdding(taskCtx);
}
// a.add(new IntWritable(10), new IntWritable(2));
// mm.put(10, 2);
// check(m, mm);
a.close();
X.println("Alloc: " + mem.allocatedSize());
m.close();
assertEquals(0, mem.allocatedSize());
}
use of org.apache.ignite.internal.processors.hadoop.HadoopJobInfo in project ignite by apache.
the class HadoopSkipListSelfTest method testMultiThreaded.
/**
* @throws Exception if failed.
*/
public void testMultiThreaded() throws Exception {
GridUnsafeMemory mem = new GridUnsafeMemory(0);
X.println("___ Started");
Random rnd = new GridRandom();
for (int i = 0; i < 20; i++) {
HadoopJobInfo job = new JobInfo();
final HadoopTaskContext taskCtx = new TaskContext();
final HadoopMultimap m = new HadoopSkipList(job, mem);
final ConcurrentMap<Integer, Collection<Integer>> mm = new ConcurrentHashMap<>();
X.println("___ MT");
multithreaded(new Callable<Object>() {
@Override
public Object call() throws Exception {
X.println("___ TH in");
Random rnd = new GridRandom();
IntWritable key = new IntWritable();
IntWritable val = new IntWritable();
HadoopMultimap.Adder a = m.startAdding(taskCtx);
for (int i = 0; i < 50000; i++) {
int k = rnd.nextInt(32000);
int v = rnd.nextInt();
key.set(k);
val.set(v);
a.write(key, val);
Collection<Integer> list = mm.get(k);
if (list == null) {
list = new ConcurrentLinkedQueue<>();
Collection<Integer> old = mm.putIfAbsent(k, list);
if (old != null)
list = old;
}
list.add(v);
}
a.close();
X.println("___ TH out");
return null;
}
}, 3 + rnd.nextInt(27));
HadoopTaskInput in = m.input(taskCtx);
int prevKey = Integer.MIN_VALUE;
while (in.next()) {
IntWritable key = (IntWritable) in.key();
assertTrue(key.get() > prevKey);
prevKey = key.get();
Iterator<?> valsIter = in.values();
Collection<Integer> vals = mm.remove(key.get());
assertNotNull(vals);
while (valsIter.hasNext()) {
IntWritable val = (IntWritable) valsIter.next();
assertTrue(vals.remove(val.get()));
}
assertTrue(vals.isEmpty());
}
in.close();
m.close();
assertEquals(0, mem.allocatedSize());
}
}
use of org.apache.ignite.internal.processors.hadoop.HadoopJobInfo in project ignite by apache.
the class HadoopFileSystemCounterWriterDelegateImpl method write.
/**
* {@inheritDoc}
*/
public void write(HadoopJobEx job, HadoopCounters cntrs) throws IgniteCheckedException {
Configuration hadoopCfg = HadoopUtils.safeCreateConfiguration();
final HadoopJobInfo jobInfo = job.info();
final HadoopJobId jobId = job.id();
for (Map.Entry<String, String> e : ((HadoopDefaultJobInfo) jobInfo).properties().entrySet()) hadoopCfg.set(e.getKey(), e.getValue());
String user = jobInfo.user();
user = IgfsUtils.fixUserName(user);
String dir = jobInfo.property(IgniteHadoopFileSystemCounterWriter.COUNTER_WRITER_DIR_PROPERTY);
if (dir == null)
dir = DEFAULT_COUNTER_WRITER_DIR;
Path jobStatPath = new Path(new Path(dir.replace(USER_MACRO, user)), jobId.toString());
HadoopPerformanceCounter perfCntr = HadoopPerformanceCounter.getCounter(cntrs, null);
try {
hadoopCfg.set(MRJobConfig.USER_NAME, user);
FileSystem fs = ((HadoopV2Job) job).fileSystem(jobStatPath.toUri(), hadoopCfg);
fs.mkdirs(jobStatPath);
try (PrintStream out = new PrintStream(fs.create(new Path(jobStatPath, IgniteHadoopFileSystemCounterWriter.PERFORMANCE_COUNTER_FILE_NAME)))) {
for (T2<String, Long> evt : perfCntr.evts()) {
out.print(evt.get1());
out.print(':');
out.println(evt.get2().toString());
}
out.flush();
}
} catch (IOException e) {
throw new IgniteCheckedException(e);
}
}
Aggregations