use of org.apache.ignite.lang.IgniteBiTuple in project ignite by apache.
the class CacheScanPartitionQueryFallbackSelfTest method remotePartition.
/**
* @param cctx Cctx.
* @return Remote partition.
*/
private IgniteBiTuple<Integer, UUID> remotePartition(final GridCacheContext cctx) {
ClusterNode node = F.first(cctx.kernalContext().grid().cluster().forRemotes().nodes());
GridCacheAffinityManager affMgr = cctx.affinity();
AffinityTopologyVersion topVer = affMgr.affinityTopologyVersion();
Set<Integer> parts = affMgr.primaryPartitions(node.id(), topVer);
return new IgniteBiTuple<>(F.first(parts), node.id());
}
use of org.apache.ignite.lang.IgniteBiTuple in project ignite by apache.
the class IgniteCacheClientNodeChangingTopologyTest method findKeys.
/**
* Tries to find keys for two partitions: for one partition assignment should not change after node join,
* for another primary node should change.
*
* @param ignite Ignite.
* @param nodes Current nodes.
* @return Found keys.
*/
private IgniteBiTuple<Integer, Integer> findKeys(Ignite ignite, ClusterNode... nodes) {
ClusterNode newNode = new TcpDiscoveryNode();
GridTestUtils.setFieldValue(newNode, "consistentId", getTestIgniteInstanceName(4));
GridTestUtils.setFieldValue(newNode, "id", UUID.randomUUID());
List<ClusterNode> topNodes = new ArrayList<>();
Collections.addAll(topNodes, nodes);
topNodes.add(newNode);
DiscoveryEvent discoEvt = new DiscoveryEvent(newNode, "", EventType.EVT_NODE_JOINED, newNode);
final long topVer = ignite.cluster().topologyVersion();
GridAffinityFunctionContextImpl ctx = new GridAffinityFunctionContextImpl(topNodes, null, discoEvt, new AffinityTopologyVersion(topVer + 1), 1);
AffinityFunction affFunc = ignite.cache(DEFAULT_CACHE_NAME).getConfiguration(CacheConfiguration.class).getAffinity();
List<List<ClusterNode>> newAff = affFunc.assignPartitions(ctx);
List<List<ClusterNode>> curAff = ((IgniteKernal) ignite).context().cache().internalCache(DEFAULT_CACHE_NAME).context().affinity().assignments(new AffinityTopologyVersion(topVer));
Integer key1 = null;
Integer key2 = null;
Affinity<Integer> aff = ignite.affinity(DEFAULT_CACHE_NAME);
for (int i = 0; i < curAff.size(); i++) {
if (key1 == null) {
List<ClusterNode> oldNodes = curAff.get(i);
List<ClusterNode> newNodes = newAff.get(i);
if (oldNodes.equals(newNodes))
key1 = findKey(aff, i);
}
if (key2 == null) {
ClusterNode oldPrimary = F.first(curAff.get(i));
ClusterNode newPrimary = F.first(newAff.get(i));
if (!oldPrimary.equals(newPrimary))
key2 = findKey(aff, i);
}
if (key1 != null && key2 != null)
break;
}
if (key1 == null || key2 == null)
fail("Failed to find nodes required for test.");
return new IgniteBiTuple<>(key1, key2);
}
use of org.apache.ignite.lang.IgniteBiTuple in project ignite by apache.
the class DmlStatementsProcessor method doInsert.
/**
* Execute INSERT statement plan.
* @param cursor Cursor to take inserted data from.
* @param pageSize Batch size for streaming, anything <= 0 for single page operations.
* @return Number of items affected.
* @throws IgniteCheckedException if failed, particularly in case of duplicate keys.
*/
@SuppressWarnings({ "unchecked", "ConstantConditions" })
private long doInsert(UpdatePlan plan, Iterable<List<?>> cursor, int pageSize) throws IgniteCheckedException {
GridH2RowDescriptor desc = plan.tbl.rowDescriptor();
GridCacheContext cctx = desc.context();
// If we have just one item to put, just do so
if (plan.rowsNum == 1) {
IgniteBiTuple t = rowToKeyValue(cctx, cursor.iterator().next(), plan);
if (cctx.cache().putIfAbsent(t.getKey(), t.getValue()))
return 1;
else
throw new IgniteSQLException("Duplicate key during INSERT [key=" + t.getKey() + ']', IgniteQueryErrorCode.DUPLICATE_KEY);
} else {
Map<Object, EntryProcessor<Object, Object, Boolean>> rows = plan.isLocSubqry ? new LinkedHashMap<Object, EntryProcessor<Object, Object, Boolean>>(plan.rowsNum) : new LinkedHashMap<Object, EntryProcessor<Object, Object, Boolean>>();
// Keys that failed to INSERT due to duplication.
List<Object> duplicateKeys = new ArrayList<>();
int resCnt = 0;
SQLException resEx = null;
Iterator<List<?>> it = cursor.iterator();
while (it.hasNext()) {
List<?> row = it.next();
final IgniteBiTuple t = rowToKeyValue(cctx, row, plan);
rows.put(t.getKey(), new InsertEntryProcessor(t.getValue()));
if (!it.hasNext() || (pageSize > 0 && rows.size() == pageSize)) {
PageProcessingResult pageRes = processPage(cctx, rows);
resCnt += pageRes.cnt;
duplicateKeys.addAll(F.asList(pageRes.errKeys));
if (pageRes.ex != null) {
if (resEx == null)
resEx = pageRes.ex;
else
resEx.setNextException(pageRes.ex);
}
rows.clear();
}
}
if (!F.isEmpty(duplicateKeys)) {
String msg = "Failed to INSERT some keys because they are already in cache " + "[keys=" + duplicateKeys + ']';
SQLException dupEx = new SQLException(msg, null, IgniteQueryErrorCode.DUPLICATE_KEY);
if (resEx == null)
resEx = dupEx;
else
resEx.setNextException(dupEx);
}
if (resEx != null)
throw new IgniteSQLException(resEx);
return resCnt;
}
}
use of org.apache.ignite.lang.IgniteBiTuple in project ignite by apache.
the class DmlStatementsProcessor method streamUpdateQuery.
/**
* Perform given statement against given data streamer. Only rows based INSERT and MERGE are supported
* as well as key bound UPDATE and DELETE (ones with filter {@code WHERE _key = ?}).
*
* @param streamer Streamer to feed data to.
* @param stmt Statement.
* @param args Statement arguments.
* @return Number of rows in given statement for INSERT and MERGE, {@code 1} otherwise.
* @throws IgniteCheckedException if failed.
*/
@SuppressWarnings({ "unchecked", "ConstantConditions" })
long streamUpdateQuery(IgniteDataStreamer streamer, PreparedStatement stmt, Object[] args) throws IgniteCheckedException {
args = U.firstNotNull(args, X.EMPTY_OBJECT_ARRAY);
Prepared p = GridSqlQueryParser.prepared(stmt);
assert p != null;
UpdatePlan plan = UpdatePlanBuilder.planForStatement(p, null);
if (!F.eq(streamer.cacheName(), plan.tbl.rowDescriptor().context().name()))
throw new IgniteSQLException("Cross cache streaming is not supported, please specify cache explicitly" + " in connection options", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
if (plan.mode == UpdateMode.INSERT && plan.rowsNum > 0) {
assert plan.isLocSubqry;
final GridCacheContext cctx = plan.tbl.rowDescriptor().context();
QueryCursorImpl<List<?>> cur;
final ArrayList<List<?>> data = new ArrayList<>(plan.rowsNum);
final GridQueryFieldsResult res = idx.queryLocalSqlFields(idx.schema(cctx.name()), plan.selectQry, F.asList(args), null, false, 0, null);
QueryCursorImpl<List<?>> stepCur = new QueryCursorImpl<>(new Iterable<List<?>>() {
@Override
public Iterator<List<?>> iterator() {
try {
return new GridQueryCacheObjectsIterator(res.iterator(), idx.objectContext(), cctx.keepBinary());
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
}
}, null);
data.addAll(stepCur.getAll());
cur = new QueryCursorImpl<>(new Iterable<List<?>>() {
@Override
public Iterator<List<?>> iterator() {
return data.iterator();
}
}, null);
if (plan.rowsNum == 1) {
IgniteBiTuple t = rowToKeyValue(cctx, cur.iterator().next(), plan);
streamer.addData(t.getKey(), t.getValue());
return 1;
}
Map<Object, Object> rows = new LinkedHashMap<>(plan.rowsNum);
for (List<?> row : cur) {
final IgniteBiTuple t = rowToKeyValue(cctx, row, plan);
rows.put(t.getKey(), t.getValue());
}
streamer.addData(rows);
return rows.size();
} else
throw new IgniteSQLException("Only tuple based INSERT statements are supported in streaming mode", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
}
use of org.apache.ignite.lang.IgniteBiTuple in project ignite by apache.
the class DmlStatementsProcessor method doMerge.
/**
* Execute MERGE statement plan.
* @param cursor Cursor to take inserted data from.
* @param pageSize Batch size to stream data from {@code cursor}, anything <= 0 for single page operations.
* @return Number of items affected.
* @throws IgniteCheckedException if failed.
*/
@SuppressWarnings("unchecked")
private long doMerge(UpdatePlan plan, Iterable<List<?>> cursor, int pageSize) throws IgniteCheckedException {
GridH2RowDescriptor desc = plan.tbl.rowDescriptor();
GridCacheContext cctx = desc.context();
// If we have just one item to put, just do so
if (plan.rowsNum == 1) {
IgniteBiTuple t = rowToKeyValue(cctx, cursor.iterator().next(), plan);
cctx.cache().put(t.getKey(), t.getValue());
return 1;
} else {
int resCnt = 0;
Map<Object, Object> rows = new LinkedHashMap<>();
for (Iterator<List<?>> it = cursor.iterator(); it.hasNext(); ) {
List<?> row = it.next();
IgniteBiTuple t = rowToKeyValue(cctx, row, plan);
rows.put(t.getKey(), t.getValue());
if ((pageSize > 0 && rows.size() == pageSize) || !it.hasNext()) {
cctx.cache().putAll(rows);
resCnt += rows.size();
if (it.hasNext())
rows.clear();
}
}
return resCnt;
}
}
Aggregations