Search in sources :

Example 66 with GridAbsPredicate

use of org.apache.ignite.internal.util.lang.GridAbsPredicate in project ignite by apache.

the class IgniteCacheAtomicProtocolTest method testPutAllNearNodeFailure.

/**
 * @throws Exception If failed.
 */
public void testPutAllNearNodeFailure() throws Exception {
    final int SRVS = 4;
    startGrids(SRVS);
    client = true;
    Ignite clientNode = startGrid(SRVS);
    final IgniteCache<Integer, Integer> nearCache = clientNode.createCache(cacheConfiguration(1, FULL_SYNC));
    awaitPartitionMapExchange();
    for (int i = 0; i < SRVS; i++) testSpi(grid(i)).blockMessages(GridDhtAtomicNearResponse.class, clientNode.name());
    final Map<Integer, Integer> map = new HashMap<>();
    for (int i = 0; i < 100; i++) map.put(i, i);
    nearCache.putAllAsync(map);
    boolean wait = GridTestUtils.waitForCondition(new GridAbsPredicate() {

        @Override
        public boolean apply() {
            IgniteCache cache = ignite(0).cache(TEST_CACHE);
            for (Integer key : map.keySet()) {
                if (cache.get(key) == null)
                    return false;
            }
            return true;
        }
    }, 5000);
    assertTrue(wait);
    stopGrid(SRVS);
    GridTestUtils.waitForCondition(new GridAbsPredicate() {

        @Override
        public boolean apply() {
            for (int i = 0; i < SRVS; i++) {
                if (grid(i).context().cache().context().mvcc().atomicFuturesCount() != 0)
                    return false;
            }
            return true;
        }
    }, 5000);
    for (int i = 0; i < SRVS; i++) assertEquals(0, grid(i).context().cache().context().mvcc().atomicFuturesCount());
    checkData(map);
}
Also used : HashMap(java.util.HashMap) GridAbsPredicate(org.apache.ignite.internal.util.lang.GridAbsPredicate) IgniteCache(org.apache.ignite.IgniteCache) Ignite(org.apache.ignite.Ignite)

Example 67 with GridAbsPredicate

use of org.apache.ignite.internal.util.lang.GridAbsPredicate in project ignite by apache.

the class GridCacheNearReadersSelfTest method testTwoNodesTwoKeysNoBackups.

/**
 * @throws Exception If failed.
 */
public void testTwoNodesTwoKeysNoBackups() throws Exception {
    aff.backups(0);
    grids = 2;
    aff.partitions(grids);
    startGrids();
    ClusterNode n1 = F.first(aff.nodes(aff.partition(1), grid(0).cluster().nodes()));
    final ClusterNode n2 = F.first(aff.nodes(aff.partition(2), grid(0).cluster().nodes()));
    assertNotNull(n1);
    assertNotNull(n2);
    assertNotSame(n1, n2);
    assertFalse("Nodes cannot be equal: " + n1, n1.equals(n2));
    Ignite g1 = grid(n1.id());
    Ignite g2 = grid(n2.id());
    IgniteCache<Integer, String> cache1 = g1.cache(DEFAULT_CACHE_NAME);
    IgniteCache<Integer, String> cache2 = g2.cache(DEFAULT_CACHE_NAME);
    // Store some values in cache.
    assertNull(cache1.getAndPut(1, "v1"));
    assertNull(cache1.getAndPut(2, "v2"));
    GridDhtCacheEntry e1 = (GridDhtCacheEntry) dht(cache1).entryEx(1);
    GridDhtCacheEntry e2 = (GridDhtCacheEntry) dht(cache2).entryEx(2);
    assertNotNull(e1.readers());
    assertTrue(cache1.containsKey(1));
    assertTrue(cache1.containsKey(2));
    assertNotNull(nearPeek(cache1, 1));
    assertNotNull(nearPeek(cache1, 2));
    assertNotNull(dhtPeek(cache1, 1));
    assertNull(dhtPeek(cache1, 2));
    assertNull(nearPeek(cache2, 1));
    assertNotNull(dhtPeek(cache2, 2));
    // Node2 should have node1 in reader's map, since request to
    // put key 2 came from node1.
    assertTrue(e2.readers().contains(n1.id()));
    e1 = (GridDhtCacheEntry) dht(cache1).entryEx(1);
    // Node1 should not have node2 in readers map yet.
    assertFalse(e1.readers().contains(n2.id()));
    // Get key1 on node2.
    assertEquals("v1", cache2.get(1));
    // Check that key1 is in near cache of cache2.
    assertNotNull(nearPeek(cache2, 1));
    e1 = (GridDhtCacheEntry) dht(cache1).entryEx(1);
    // Now node1 should have node2 in readers map.
    assertTrue(e1.readers().contains(n2.id()));
    // Evict locally from cache2.
    cache2.localEvict(Collections.singleton(1));
    assertNull(nearPeek(cache2, 1));
    assertNull(dhtPeek(cache2, 1));
    // Node 1 still has node2 in readers map.
    assertTrue(e1.readers().contains(n2.id()));
    assertNotNull(cache1.getAndPut(1, "z1"));
    final GridDhtCacheEntry e1f = e1;
    GridTestUtils.waitForCondition(new GridAbsPredicate() {

        @Override
        public boolean apply() {
            try {
                return !e1f.readers().contains(n2.id());
            } catch (GridCacheEntryRemovedException ignored) {
                return true;
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        }
    }, 5000);
    // Node 1 still has node2 in readers map.
    assertFalse(((GridDhtCacheEntry) dht(cache1).entryEx(1)).readers().contains(n2.id()));
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) GridDhtCacheEntry(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry) GridAbsPredicate(org.apache.ignite.internal.util.lang.GridAbsPredicate) Ignite(org.apache.ignite.Ignite) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException)

Example 68 with GridAbsPredicate

use of org.apache.ignite.internal.util.lang.GridAbsPredicate in project ignite by apache.

the class IgniteCacheEntryListenerAbstractTest method afterTest.

/**
 * {@inheritDoc}
 */
@Override
protected void afterTest() throws Exception {
    super.afterTest();
    for (int i = 0; i < gridCount(); i++) {
        GridContinuousProcessor proc = grid(i).context().continuous();
        final ConcurrentMap<?, ?> syncMsgFuts = GridTestUtils.getFieldValue(proc, "syncMsgFuts");
        GridTestUtils.waitForCondition(new GridAbsPredicate() {

            @Override
            public boolean apply() {
                return syncMsgFuts.size() == 0;
            }
        }, 5000);
        assertEquals(0, syncMsgFuts.size());
    }
    serialized.set(false);
}
Also used : GridAbsPredicate(org.apache.ignite.internal.util.lang.GridAbsPredicate) GridContinuousProcessor(org.apache.ignite.internal.processors.continuous.GridContinuousProcessor)

Example 69 with GridAbsPredicate

use of org.apache.ignite.internal.util.lang.GridAbsPredicate in project ignite by apache.

the class HadoopAbstractMapReduceTest method checkJobStatistics.

/**
 * Simple test job statistics.
 *
 * @param jobId Job id.
 * @throws IgniteCheckedException
 */
private void checkJobStatistics(HadoopJobId jobId) throws IgniteCheckedException, IOException {
    HadoopCounters cntrs = grid(0).hadoop().counters(jobId);
    HadoopPerformanceCounter perfCntr = HadoopPerformanceCounter.getCounter(cntrs, null);
    Map<String, SortedMap<Integer, Long>> tasks = new TreeMap<>();
    Map<String, Integer> phaseOrders = new HashMap<>();
    phaseOrders.put("submit", 0);
    phaseOrders.put("prepare", 1);
    phaseOrders.put("start", 2);
    phaseOrders.put("Cstart", 3);
    phaseOrders.put("finish", 4);
    String prevTaskId = null;
    long apiEvtCnt = 0;
    for (T2<String, Long> evt : perfCntr.evts()) {
        // We expect string pattern: COMBINE 1 run 7fa86a14-5a08-40e3-a7cb-98109b52a706
        String[] parsedEvt = evt.get1().split(" ");
        String taskId;
        String taskPhase;
        if ("JOB".equals(parsedEvt[0])) {
            taskId = parsedEvt[0];
            taskPhase = parsedEvt[1];
        } else {
            taskId = ("COMBINE".equals(parsedEvt[0]) ? "MAP" : parsedEvt[0].substring(0, 3)) + parsedEvt[1];
            taskPhase = ("COMBINE".equals(parsedEvt[0]) ? "C" : "") + parsedEvt[2];
        }
        if (!taskId.equals(prevTaskId))
            tasks.put(taskId, new TreeMap<Integer, Long>());
        Integer pos = phaseOrders.get(taskPhase);
        assertNotNull("Invalid phase " + taskPhase, pos);
        tasks.get(taskId).put(pos, evt.get2());
        prevTaskId = taskId;
        apiEvtCnt++;
    }
    for (Map.Entry<String, SortedMap<Integer, Long>> task : tasks.entrySet()) {
        Map<Integer, Long> order = task.getValue();
        long prev = 0;
        for (Map.Entry<Integer, Long> phase : order.entrySet()) {
            assertTrue("Phase order of " + task.getKey() + " is invalid", phase.getValue() >= prev);
            prev = phase.getValue();
        }
    }
    final IgfsPath statPath = new IgfsPath("/xxx/" + USER + "/zzz/" + jobId + "/performance");
    assert GridTestUtils.waitForCondition(new GridAbsPredicate() {

        @Override
        public boolean apply() {
            return igfs.exists(statPath);
        }
    }, 20_000);
    final long apiEvtCnt0 = apiEvtCnt;
    boolean res = GridTestUtils.waitForCondition(new GridAbsPredicate() {

        @Override
        public boolean apply() {
            try {
                try (BufferedReader reader = new BufferedReader(new InputStreamReader(igfs.open(statPath)))) {
                    return apiEvtCnt0 == HadoopTestUtils.simpleCheckJobStatFile(reader);
                }
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        }
    }, 10000);
    if (!res) {
        BufferedReader reader = new BufferedReader(new InputStreamReader(igfs.open(statPath)));
        assert false : "Invalid API events count [exp=" + apiEvtCnt0 + ", actual=" + HadoopTestUtils.simpleCheckJobStatFile(reader) + ']';
    }
}
Also used : HashMap(java.util.HashMap) HadoopCounters(org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters) InputStreamReader(java.io.InputStreamReader) GridAbsPredicate(org.apache.ignite.internal.util.lang.GridAbsPredicate) HadoopPerformanceCounter(org.apache.ignite.internal.processors.hadoop.counter.HadoopPerformanceCounter) IOException(java.io.IOException) TreeMap(java.util.TreeMap) IgfsPath(org.apache.ignite.igfs.IgfsPath) SortedMap(java.util.SortedMap) BufferedReader(java.io.BufferedReader) Map(java.util.Map) SortedMap(java.util.SortedMap) HashMap(java.util.HashMap) TreeMap(java.util.TreeMap)

Example 70 with GridAbsPredicate

use of org.apache.ignite.internal.util.lang.GridAbsPredicate in project ignite by apache.

the class HadoopClientProtocolSelfTest method checkJobSubmit.

/**
 * Test job submission.
 *
 * @param noCombiners Whether there are no combiners.
 * @param noReducers Whether there are no reducers.
 * @throws Exception If failed.
 */
public void checkJobSubmit(boolean noCombiners, boolean noReducers) throws Exception {
    IgniteFileSystem igfs = grid(0).fileSystem(HadoopAbstractSelfTest.igfsName);
    igfs.mkdirs(new IgfsPath(PATH_INPUT));
    try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create(new IgfsPath(PATH_INPUT + "/test.file"), true)))) {
        bw.write("word");
    }
    Configuration conf = config(HadoopAbstractSelfTest.REST_PORT);
    final Job job = Job.getInstance(conf);
    try {
        job.setJobName(JOB_NAME);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        job.setMapperClass(TestMapper.class);
        job.setReducerClass(TestReducer.class);
        if (!noCombiners)
            job.setCombinerClass(TestCombiner.class);
        if (noReducers)
            job.setNumReduceTasks(0);
        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TestOutputFormat.class);
        FileInputFormat.setInputPaths(job, new Path(PATH_INPUT));
        FileOutputFormat.setOutputPath(job, new Path(PATH_OUTPUT));
        job.submit();
        JobID jobId = job.getJobID();
        // Setup phase.
        JobStatus jobStatus = job.getStatus();
        checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
        assert jobStatus.getSetupProgress() >= 0.0f && jobStatus.getSetupProgress() < 1.0f;
        assert jobStatus.getMapProgress() == 0.0f;
        assert jobStatus.getReduceProgress() == 0.0f;
        U.sleep(2100);
        JobStatus recentJobStatus = job.getStatus();
        assert recentJobStatus.getSetupProgress() > jobStatus.getSetupProgress() : "Old=" + jobStatus.getSetupProgress() + ", new=" + recentJobStatus.getSetupProgress();
        // Transferring to map phase.
        setupLockFile.delete();
        assert GridTestUtils.waitForCondition(new GridAbsPredicate() {

            @Override
            public boolean apply() {
                try {
                    return F.eq(1.0f, job.getStatus().getSetupProgress());
                } catch (Exception e) {
                    throw new RuntimeException("Unexpected exception.", e);
                }
            }
        }, 5000L);
        // Map phase.
        jobStatus = job.getStatus();
        checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
        assert jobStatus.getSetupProgress() == 1.0f;
        assert jobStatus.getMapProgress() >= 0.0f && jobStatus.getMapProgress() < 1.0f;
        assert jobStatus.getReduceProgress() == 0.0f;
        U.sleep(2100);
        recentJobStatus = job.getStatus();
        assert recentJobStatus.getMapProgress() > jobStatus.getMapProgress() : "Old=" + jobStatus.getMapProgress() + ", new=" + recentJobStatus.getMapProgress();
        // Transferring to reduce phase.
        mapLockFile.delete();
        assert GridTestUtils.waitForCondition(new GridAbsPredicate() {

            @Override
            public boolean apply() {
                try {
                    return F.eq(1.0f, job.getStatus().getMapProgress());
                } catch (Exception e) {
                    throw new RuntimeException("Unexpected exception.", e);
                }
            }
        }, 5000L);
        if (!noReducers) {
            // Reduce phase.
            jobStatus = job.getStatus();
            checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
            assert jobStatus.getSetupProgress() == 1.0f;
            assert jobStatus.getMapProgress() == 1.0f;
            assert jobStatus.getReduceProgress() >= 0.0f && jobStatus.getReduceProgress() < 1.0f;
            // Ensure that reduces progress increases.
            U.sleep(2100);
            recentJobStatus = job.getStatus();
            assert recentJobStatus.getReduceProgress() > jobStatus.getReduceProgress() : "Old=" + jobStatus.getReduceProgress() + ", new=" + recentJobStatus.getReduceProgress();
            reduceLockFile.delete();
        }
        job.waitForCompletion(false);
        jobStatus = job.getStatus();
        checkJobStatus(job.getStatus(), jobId, JOB_NAME, JobStatus.State.SUCCEEDED, 1.0f);
        assert jobStatus.getSetupProgress() == 1.0f;
        assert jobStatus.getMapProgress() == 1.0f;
        assert jobStatus.getReduceProgress() == 1.0f;
        dumpIgfs(igfs, new IgfsPath(PATH_OUTPUT));
    } finally {
        job.getCluster().close();
    }
}
Also used : IgfsPath(org.apache.ignite.igfs.IgfsPath) Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) GridAbsPredicate(org.apache.ignite.internal.util.lang.GridAbsPredicate) IgniteFileSystem(org.apache.ignite.IgniteFileSystem) IOException(java.io.IOException) BufferedWriter(java.io.BufferedWriter) IgfsPath(org.apache.ignite.igfs.IgfsPath) JobStatus(org.apache.hadoop.mapreduce.JobStatus) OutputStreamWriter(java.io.OutputStreamWriter) Job(org.apache.hadoop.mapreduce.Job) JobID(org.apache.hadoop.mapreduce.JobID)

Aggregations

GridAbsPredicate (org.apache.ignite.internal.util.lang.GridAbsPredicate)175 Ignite (org.apache.ignite.Ignite)81 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)42 ClusterNode (org.apache.ignite.cluster.ClusterNode)32 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)27 IgniteException (org.apache.ignite.IgniteException)22 CountDownLatch (java.util.concurrent.CountDownLatch)20 IgniteKernal (org.apache.ignite.internal.IgniteKernal)19 ArrayList (java.util.ArrayList)18 IgniteCache (org.apache.ignite.IgniteCache)16 Map (java.util.Map)15 Transaction (org.apache.ignite.transactions.Transaction)15 IgniteEx (org.apache.ignite.internal.IgniteEx)14 Duration (javax.cache.expiry.Duration)13 CacheConfiguration (org.apache.ignite.configuration.CacheConfiguration)13 IgniteSpiException (org.apache.ignite.spi.IgniteSpiException)12 HashMap (java.util.HashMap)11 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)11 TouchedExpiryPolicy (javax.cache.expiry.TouchedExpiryPolicy)11 Event (org.apache.ignite.events.Event)11