Search in sources :

Example 1 with GridDataInput

use of org.apache.ignite.internal.util.io.GridDataInput in project ignite by apache.

the class HadoopConcurrentHashMultimapSelftest method check.

private void check(HadoopConcurrentHashMultimap m, Multimap<Integer, Integer> mm, final Multimap<Integer, Integer> vis, HadoopTaskContext taskCtx) throws Exception {
    final HadoopTaskInput in = m.input(taskCtx);
    Map<Integer, Collection<Integer>> mmm = mm.asMap();
    int keys = 0;
    while (in.next()) {
        keys++;
        IntWritable k = (IntWritable) in.key();
        assertNotNull(k);
        Deque<Integer> vs = new LinkedList<>();
        Iterator<?> it = in.values();
        while (it.hasNext()) vs.addFirst(((IntWritable) it.next()).get());
        Collection<Integer> exp = mmm.get(k.get());
        assertEquals(exp, vs);
    }
    assertEquals(mmm.size(), keys);
    assertEquals(m.keys(), keys);
    X.println("keys: " + keys + " cap: " + m.capacity());
    // Check visitor.
    final byte[] buf = new byte[4];
    final GridDataInput dataInput = new GridUnsafeDataInput();
    m.visit(false, new HadoopConcurrentHashMultimap.Visitor() {

        /**
         */
        IntWritable key = new IntWritable();

        /**
         */
        IntWritable val = new IntWritable();

        @Override
        public void onKey(long keyPtr, int keySize) {
            read(keyPtr, keySize, key);
        }

        @Override
        public void onValue(long valPtr, int valSize) {
            read(valPtr, valSize, val);
            vis.put(key.get(), val.get());
        }

        private void read(long ptr, int size, Writable w) {
            assert size == 4 : size;
            GridUnsafe.copyOffheapHeap(ptr, buf, GridUnsafe.BYTE_ARR_OFF, size);
            dataInput.bytes(buf, size);
            try {
                w.readFields(dataInput);
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        }
    });
    // X.println("vis: " + vis);
    assertEquals(mm, vis);
    in.close();
}
Also used : HadoopTaskInput(org.apache.ignite.internal.processors.hadoop.HadoopTaskInput) Writable(org.apache.hadoop.io.Writable) IntWritable(org.apache.hadoop.io.IntWritable) IOException(java.io.IOException) LinkedList(java.util.LinkedList) GridUnsafeDataInput(org.apache.ignite.internal.util.io.GridUnsafeDataInput) Collection(java.util.Collection) GridDataInput(org.apache.ignite.internal.util.io.GridDataInput) HadoopConcurrentHashMultimap(org.apache.ignite.internal.processors.hadoop.shuffle.collections.HadoopConcurrentHashMultimap) IntWritable(org.apache.hadoop.io.IntWritable)

Example 2 with GridDataInput

use of org.apache.ignite.internal.util.io.GridDataInput in project ignite by apache.

the class HadoopSkipListSelfTest method check.

/**
 * Check.
 * @param m The multimap.
 * @param mm The multimap storing expectations.
 * @param vis The multimap to store visitor results.
 * @param taskCtx The task context.
 * @throws Exception On error.
 */
private void check(HadoopMultimap m, Multimap<Integer, Integer> mm, final Multimap<Integer, Integer> vis, HadoopTaskContext taskCtx) throws Exception {
    final HadoopTaskInput in = m.input(taskCtx);
    Map<Integer, Collection<Integer>> mmm = mm.asMap();
    int keys = 0;
    int prevKey = Integer.MIN_VALUE;
    while (in.next()) {
        keys++;
        IntWritable k = (IntWritable) in.key();
        assertNotNull(k);
        assertTrue(k.get() > prevKey);
        prevKey = k.get();
        Deque<Integer> vs = new LinkedList<>();
        Iterator<?> it = in.values();
        while (it.hasNext()) vs.addFirst(((IntWritable) it.next()).get());
        Collection<Integer> exp = mmm.get(k.get());
        assertEquals(exp, vs);
    }
    assertEquals(mmm.size(), keys);
    // !        assertEquals(m.keys(), keys);
    // Check visitor.
    final byte[] buf = new byte[4];
    final GridDataInput dataInput = new GridUnsafeDataInput();
    m.visit(false, new HadoopMultimap.Visitor() {

        /**
         */
        IntWritable key = new IntWritable();

        /**
         */
        IntWritable val = new IntWritable();

        @Override
        public void onKey(long keyPtr, int keySize) {
            read(keyPtr, keySize, key);
        }

        @Override
        public void onValue(long valPtr, int valSize) {
            read(valPtr, valSize, val);
            vis.put(key.get(), val.get());
        }

        private void read(long ptr, int size, Writable w) {
            assert size == 4 : size;
            GridUnsafe.copyOffheapHeap(ptr, buf, GridUnsafe.BYTE_ARR_OFF, size);
            dataInput.bytes(buf, size);
            try {
                w.readFields(dataInput);
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        }
    });
    // X.println("vis: " + vis);
    assertEquals(mm, vis);
    in.close();
}
Also used : HadoopTaskInput(org.apache.ignite.internal.processors.hadoop.HadoopTaskInput) HadoopMultimap(org.apache.ignite.internal.processors.hadoop.shuffle.collections.HadoopMultimap) Writable(org.apache.hadoop.io.Writable) IntWritable(org.apache.hadoop.io.IntWritable) IOException(java.io.IOException) LinkedList(java.util.LinkedList) GridUnsafeDataInput(org.apache.ignite.internal.util.io.GridUnsafeDataInput) Collection(java.util.Collection) GridDataInput(org.apache.ignite.internal.util.io.GridDataInput) IntWritable(org.apache.hadoop.io.IntWritable)

Aggregations

IOException (java.io.IOException)2 Collection (java.util.Collection)2 LinkedList (java.util.LinkedList)2 IntWritable (org.apache.hadoop.io.IntWritable)2 Writable (org.apache.hadoop.io.Writable)2 HadoopTaskInput (org.apache.ignite.internal.processors.hadoop.HadoopTaskInput)2 GridDataInput (org.apache.ignite.internal.util.io.GridDataInput)2 GridUnsafeDataInput (org.apache.ignite.internal.util.io.GridUnsafeDataInput)2 HadoopConcurrentHashMultimap (org.apache.ignite.internal.processors.hadoop.shuffle.collections.HadoopConcurrentHashMultimap)1 HadoopMultimap (org.apache.ignite.internal.processors.hadoop.shuffle.collections.HadoopMultimap)1