use of org.apache.ignite.cache.CacheMode.PARTITIONED in project ignite by apache.
the class OpenCensusSqlNativeTracingTest method testDistributedJoin.
/**
* Tests tracing of distributed join query which includes all communications between reducer and mapped nodes and
* index range requests.
*
* @throws Exception If failed.
*/
@Test
public void testDistributedJoin() throws Exception {
String prsnTable = createTableAndPopulate(Person.class, PARTITIONED, 1);
String orgTable = createTableAndPopulate(Organization.class, PARTITIONED, 1);
SpanId rootSpan = executeAndCheckRootSpan("SELECT * FROM " + prsnTable + " AS p JOIN " + orgTable + " AS o ON o.orgId = p.prsnId", TEST_SCHEMA, false, true, true);
String qryId = getAttribute(rootSpan, SQL_QRY_ID);
assertTrue(Long.parseLong(qryId.substring(qryId.indexOf('_') + 1)) > 0);
UUID.fromString(qryId.substring(0, qryId.indexOf('_')));
checkChildSpan(SQL_QRY_PARSE, rootSpan);
checkChildSpan(SQL_CURSOR_OPEN, rootSpan);
SpanId iterSpan = checkChildSpan(SQL_ITER_OPEN, rootSpan);
List<SpanId> execReqSpans = checkSpan(SQL_QRY_EXEC_REQ, iterSpan, GRID_CNT, null);
int idxRangeReqRows = 0;
int preparedRows = 0;
int fetchedRows = 0;
for (int i = 0; i < GRID_CNT; i++) {
SpanId execReqSpan = execReqSpans.get(i);
Ignite ignite = Ignition.ignite(UUID.fromString(getAttribute(execReqSpan, NODE_ID)));
SpanId partsReserveSpan = checkChildSpan(SQL_PARTITIONS_RESERVE, execReqSpan);
List<String> partsReserveLogs = handler().spanById(partsReserveSpan).getAnnotations().getEvents().stream().map(e -> e.getEvent().getDescription()).collect(Collectors.toList());
assertEquals(2, partsReserveLogs.size());
Pattern ptrn = compile("Cache partitions were reserved \\[cache=(.+), partitions=\\[(.+)], topology=(.+)]");
partsReserveLogs.forEach(l -> {
Matcher matcher = ptrn.matcher(l);
assertTrue(matcher.matches());
Set<Integer> expParts = Arrays.stream(ignite.affinity(matcher.group(1)).primaryPartitions(ignite.cluster().localNode())).boxed().collect(Collectors.toSet());
Set<Integer> parts = Arrays.stream(matcher.group(2).split(",")).map(s -> parseInt(s.trim())).collect(Collectors.toSet());
assertEquals(expParts, parts);
});
SpanId execSpan = checkChildSpan(SQL_QRY_EXECUTE, execReqSpan);
List<SpanId> distrLookupReqSpans = findChildSpans(SQL_IDX_RANGE_REQ, execSpan);
for (SpanId span : distrLookupReqSpans) {
idxRangeReqRows += parseInt(getAttribute(span, SQL_IDX_RANGE_ROWS));
checkChildSpan(SQL_IDX_RANGE_RESP, span);
}
preparedRows += parseInt(getAttribute(checkChildSpan(SQL_PAGE_PREPARE, execReqSpan), SQL_PAGE_ROWS));
checkChildSpan(SQL_PAGE_RESP, execReqSpan);
}
SpanId pageFetchSpan = checkChildSpan(SQL_PAGE_FETCH, iterSpan);
fetchedRows += parseInt(getAttribute(pageFetchSpan, SQL_PAGE_ROWS));
checkChildSpan(SQL_PAGE_WAIT, pageFetchSpan);
SpanId nexPageSpan = checkChildSpan(SQL_NEXT_PAGE_REQ, pageFetchSpan);
preparedRows += parseInt(getAttribute(checkChildSpan(SQL_PAGE_PREPARE, nexPageSpan), SQL_PAGE_ROWS));
checkChildSpan(SQL_PAGE_RESP, nexPageSpan);
List<SpanId> pageFetchSpans = findChildSpans(SQL_PAGE_FETCH, rootSpan);
for (SpanId span : pageFetchSpans) {
fetchedRows += parseInt(getAttribute(span, SQL_PAGE_ROWS));
checkChildSpan(SQL_PAGE_WAIT, span);
List<SpanId> nextPageSpans = findChildSpans(SQL_NEXT_PAGE_REQ, span);
if (!nextPageSpans.isEmpty()) {
assertEquals(1, nextPageSpans.size());
SpanId nextPageSpan = nextPageSpans.get(0);
preparedRows += parseInt(getAttribute(checkChildSpan(SQL_PAGE_PREPARE, nextPageSpan), SQL_PAGE_ROWS));
checkChildSpan(SQL_PAGE_RESP, nextPageSpan);
}
}
assertEquals(TEST_TABLE_POPULATION, fetchedRows);
assertEquals(TEST_TABLE_POPULATION, preparedRows);
assertEquals(TEST_TABLE_POPULATION, idxRangeReqRows);
checkSpan(SQL_QRY_CANCEL_REQ, rootSpan, mapNodesCount(), null);
assertFalse(findChildSpans(SQL_CURSOR_CLOSE, rootSpan).isEmpty());
}
use of org.apache.ignite.cache.CacheMode.PARTITIONED in project ignite by apache.
the class BinaryMetadataRegistrationInsideEntryProcessorTest method testContinuousQueryAndBinaryObjectBuilder.
/**
* Continuously execute multiple EntryProcessors with having continuous queries in parallel.
* This used to lead to several deadlocks.
*
* @throws Exception If failed.
*/
@Test
public void testContinuousQueryAndBinaryObjectBuilder() throws Exception {
startGrids(3).cluster().active(true);
grid(0).createCache(new CacheConfiguration<>().setName(CACHE_NAME).setAtomicityMode(ATOMIC).setBackups(2).setCacheMode(PARTITIONED).setWriteSynchronizationMode(FULL_SYNC).setPartitionLossPolicy(READ_WRITE_SAFE));
IgniteEx client1 = startClientGrid(getConfiguration().setIgniteInstanceName("client1"));
IgniteEx client2 = startClientGrid(getConfiguration().setIgniteInstanceName("client2"));
AtomicBoolean stop = new AtomicBoolean();
AtomicInteger keyCntr = new AtomicInteger();
AtomicInteger binaryTypeCntr = new AtomicInteger();
/**
*/
class MyEntryProcessor implements CacheEntryProcessor<Object, Object, Object> {
/**
* Cached int value retrieved from {@code binaryTypeCntr} variable.
*/
private int i;
/**
*/
public MyEntryProcessor(int i) {
this.i = i;
}
/**
*/
@IgniteInstanceResource
Ignite ignite;
/**
* {@inheritDoc}
*/
@Override
public Object process(MutableEntry<Object, Object> entry, Object... arguments) throws EntryProcessorException {
BinaryObjectBuilder builder = ignite.binary().builder("my_type");
builder.setField("new_field" + i, i);
entry.setValue(builder.build());
return null;
}
}
IgniteInternalFuture fut1 = GridTestUtils.runMultiThreadedAsync(() -> {
IgniteCache<Object, Object> cache = client1.cache(CACHE_NAME).withKeepBinary();
while (!stop.get()) {
Integer key = keyCntr.getAndIncrement();
cache.put(key, key);
cache.invoke(key, new MyEntryProcessor(binaryTypeCntr.get()));
binaryTypeCntr.incrementAndGet();
}
}, 8, "writer-thread");
IgniteInternalFuture fut2 = GridTestUtils.runAsync(() -> {
IgniteCache<Object, Object> cache = client2.cache(CACHE_NAME).withKeepBinary();
while (!stop.get()) {
ContinuousQuery<Object, Object> qry = new ContinuousQuery<>();
qry.setInitialQuery(new ScanQuery<>((key, val) -> true));
qry.setLocalListener(evts -> {
});
// noinspection EmptyTryBlock
try (QueryCursor<Cache.Entry<Object, Object>> cursor = cache.query(qry)) {
// No-op.
}
}
});
doSleep(10_000);
stop.set(true);
fut1.get(10, TimeUnit.SECONDS);
fut2.get(10, TimeUnit.SECONDS);
}
use of org.apache.ignite.cache.CacheMode.PARTITIONED in project ignite by apache.
the class CacheBaselineTopologyTest method testBaselineTopologyChanges.
/**
* @throws Exception If failed.
*/
private void testBaselineTopologyChanges(boolean fromClient) throws Exception {
startGrids(NODE_COUNT);
IgniteEx ignite;
if (fromClient)
ignite = startClientGrid(NODE_COUNT + 10);
else
ignite = grid(0);
ignite.cluster().baselineAutoAdjustEnabled(false);
ignite.cluster().active(true);
awaitPartitionMapExchange();
Map<ClusterNode, Ignite> nodes = new HashMap<>();
for (int i = 0; i < NODE_COUNT; i++) {
Ignite ig = grid(i);
nodes.put(ig.cluster().localNode(), ig);
}
ignite.createCache(new CacheConfiguration<Integer, Integer>().setName(CACHE_NAME).setCacheMode(PARTITIONED).setBackups(1).setPartitionLossPolicy(READ_ONLY_SAFE));
manualCacheRebalancing(ignite, CACHE_NAME);
int key = -1;
for (int k = 0; k < 100_000; k++) {
if (!ignite.affinity(CACHE_NAME).mapKeyToPrimaryAndBackups(k).contains(ignite.localNode())) {
key = k;
break;
}
}
assert key >= 0;
Collection<ClusterNode> initialMapping = ignite.affinity(CACHE_NAME).mapKeyToPrimaryAndBackups(key);
assert initialMapping.size() == 2 : initialMapping;
ignite.cluster().setBaselineTopology(baselineNodes(nodes.keySet()));
Set<String> stoppedNodeNames = new HashSet<>();
ClusterNode node = initialMapping.iterator().next();
stoppedNodeNames.add(nodes.get(node).name());
nodes.get(node).close();
nodes.remove(node);
awaitPartitionMapExchange();
Collection<ClusterNode> mapping = ignite.affinity(CACHE_NAME).mapKeyToPrimaryAndBackups(key);
assert mapping.size() == 1 : mapping;
assert initialMapping.containsAll(mapping);
Set<ClusterNode> blt2 = new HashSet<>(ignite.cluster().nodes());
ignite.cluster().setBaselineTopology(baselineNodes(blt2.stream().filter(n -> !n.isClient()).collect(Collectors.toSet())));
awaitPartitionMapExchange();
Collection<ClusterNode> initialMapping2 = ignite.affinity(CACHE_NAME).mapKeyToPrimaryAndBackups(key);
assert initialMapping2.size() == 2 : initialMapping2;
Ignite newIgnite = startGrid(NODE_COUNT);
awaitPartitionMapExchange();
mapping = ignite.affinity(CACHE_NAME).mapKeyToPrimaryAndBackups(key);
assert mapping.size() == initialMapping2.size() : mapping;
assert mapping.containsAll(initialMapping2);
assert ignite.affinity(CACHE_NAME).primaryPartitions(newIgnite.cluster().localNode()).length == 0;
Set<ClusterNode> blt3 = new HashSet<>(ignite.cluster().nodes());
ignite.cluster().setBaselineTopology(baselineNodes(blt3.stream().filter(n -> !n.isClient()).collect(Collectors.toSet())));
awaitPartitionMapExchange();
Collection<ClusterNode> initialMapping3 = ignite.affinity(CACHE_NAME).mapKeyToPrimaryAndBackups(key);
assert initialMapping3.size() == 2;
assert ignite.affinity(CACHE_NAME).primaryPartitions(newIgnite.cluster().localNode()).length > 0;
newIgnite = startGrid(NODE_COUNT + 1);
awaitPartitionMapExchange();
mapping = ignite.affinity(CACHE_NAME).mapKeyToPrimaryAndBackups(key);
assert mapping.size() == initialMapping3.size() : mapping;
assert mapping.containsAll(initialMapping3);
assert ignite.affinity(CACHE_NAME).primaryPartitions(newIgnite.cluster().localNode()).length == 0;
ignite.cluster().setBaselineTopology(null);
awaitPartitionMapExchange();
assert ignite.affinity(CACHE_NAME).primaryPartitions(newIgnite.cluster().localNode()).length > 0;
}
Aggregations