use of org.apache.ignite.lang.IgniteBiTuple in project ignite by apache.
the class GridDhtAtomicCache method updateAllAsyncInternal0.
/**
* Executes local update after preloader fetched values.
*
* @param node Node.
* @param req Update request.
* @param completionCb Completion callback.
*/
private void updateAllAsyncInternal0(ClusterNode node, GridNearAtomicAbstractUpdateRequest req, UpdateReplyClosure completionCb) {
GridNearAtomicUpdateResponse res = new GridNearAtomicUpdateResponse(ctx.cacheId(), node.id(), req.futureId(), req.partition(), false, ctx.deploymentEnabled());
assert !req.returnValue() || (req.operation() == TRANSFORM || req.size() == 1);
GridDhtAtomicAbstractUpdateFuture dhtFut = null;
IgniteCacheExpiryPolicy expiry = null;
ctx.shared().database().checkpointReadLock();
try {
ctx.shared().database().ensureFreeSpace(ctx.dataRegion());
// If batch store update is enabled, we need to lock all entries.
// First, need to acquire locks on cache entries, then check filter.
List<GridDhtCacheEntry> locked = lockEntries(req, req.topologyVersion());
;
Collection<IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion>> deleted = null;
try {
GridDhtPartitionTopology top = topology();
top.readLock();
try {
if (top.stopping()) {
res.addFailedKeys(req.keys(), new CacheStoppedException(name()));
completionCb.apply(req, res);
return;
}
boolean remap = false;
// external transaction or explicit lock.
if (!req.topologyLocked()) {
// Can not wait for topology future since it will break
// GridNearAtomicCheckUpdateRequest processing.
remap = !top.topologyVersionFuture().exchangeDone() || needRemap(req.topologyVersion(), top.readyTopologyVersion());
}
if (!remap) {
DhtAtomicUpdateResult updRes = update(node, locked, req, res);
dhtFut = updRes.dhtFuture();
deleted = updRes.deleted();
expiry = updRes.expiryPolicy();
} else
// Should remap all keys.
res.remapTopologyVersion(top.lastTopologyChangeVersion());
} finally {
top.readUnlock();
}
} catch (GridCacheEntryRemovedException e) {
assert false : "Entry should not become obsolete while holding lock.";
e.printStackTrace();
} finally {
if (locked != null)
unlockEntries(locked, req.topologyVersion());
// Enqueue if necessary after locks release.
if (deleted != null) {
assert !deleted.isEmpty();
assert ctx.deferredDelete() : this;
for (IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion> e : deleted) ctx.onDeferredDelete(e.get1(), e.get2());
}
// TODO fire events only after successful fsync
if (ctx.shared().wal() != null)
ctx.shared().wal().fsync(null);
}
} catch (GridDhtInvalidPartitionException ignore) {
if (log.isDebugEnabled())
log.debug("Caught invalid partition exception for cache entry (will remap update request): " + req);
res.remapTopologyVersion(ctx.topology().lastTopologyChangeVersion());
} catch (Throwable e) {
// At least RuntimeException can be thrown by the code above when GridCacheContext is cleaned and there is
// an attempt to use cleaned resources.
U.error(log, "Unexpected exception during cache update", e);
res.addFailedKeys(req.keys(), e);
completionCb.apply(req, res);
if (e instanceof Error)
throw (Error) e;
return;
} finally {
ctx.shared().database().checkpointReadUnlock();
}
if (res.remapTopologyVersion() != null) {
assert dhtFut == null;
completionCb.apply(req, res);
} else {
if (dhtFut != null)
dhtFut.map(node, res.returnValue(), res, completionCb);
}
if (req.writeSynchronizationMode() != FULL_ASYNC)
req.cleanup(!node.isLocal());
sendTtlUpdateRequest(expiry);
}
use of org.apache.ignite.lang.IgniteBiTuple in project ignite by apache.
the class FileWriteAheadLogManager method readSerializerVersionAndCompactedFlag.
/**
* Reads record serializer version from provided {@code io} along with compacted flag.
* NOTE: Method mutates position of {@code io}.
*
* @param io I/O interface for file.
* @return Serializer version stored in the file.
* @throws IgniteCheckedException If failed to read serializer version.
*/
static IgniteBiTuple<Integer, Boolean> readSerializerVersionAndCompactedFlag(FileIO io) throws IgniteCheckedException, IOException {
try (ByteBufferExpander buf = new ByteBufferExpander(RecordV1Serializer.HEADER_RECORD_SIZE, ByteOrder.nativeOrder())) {
FileInput in = new FileInput(io, buf);
in.ensure(RecordV1Serializer.HEADER_RECORD_SIZE);
int recordType = in.readUnsignedByte();
if (recordType == WALRecord.RecordType.STOP_ITERATION_RECORD_TYPE)
throw new SegmentEofException("Reached logical end of the segment", null);
WALRecord.RecordType type = WALRecord.RecordType.fromOrdinal(recordType - 1);
if (type != WALRecord.RecordType.HEADER_RECORD)
throw new IOException("Can't read serializer version", null);
// Read file pointer.
FileWALPointer ptr = RecordV1Serializer.readPosition(in);
assert ptr.fileOffset() == 0 : "Header record should be placed at the beginning of file " + ptr;
long hdrMagicNum = in.readLong();
boolean compacted;
if (hdrMagicNum == HeaderRecord.REGULAR_MAGIC)
compacted = false;
else if (hdrMagicNum == HeaderRecord.COMPACTED_MAGIC)
compacted = true;
else {
throw new IOException("Magic is corrupted [exp=" + U.hexLong(HeaderRecord.REGULAR_MAGIC) + ", actual=" + U.hexLong(hdrMagicNum) + ']');
}
// Read serializer version.
int ver = in.readInt();
// Read and skip CRC.
in.readInt();
return new IgniteBiTuple<>(ver, compacted);
}
}
use of org.apache.ignite.lang.IgniteBiTuple in project ignite by apache.
the class GridCacheQueryManager method setIterator.
/**
* @param qry Query.
* @return Cache set items iterator.
*/
private GridCloseableIterator<IgniteBiTuple<K, V>> setIterator(GridCacheQueryAdapter<?> qry) {
final GridSetQueryPredicate filter = (GridSetQueryPredicate) qry.scanFilter();
filter.init(cctx);
IgniteUuid id = filter.setId();
Collection<SetItemKey> data = cctx.dataStructures().setData(id);
if (data == null)
data = Collections.emptyList();
final GridIterator<IgniteBiTuple<K, V>> it = F.iterator(data, new C1<SetItemKey, IgniteBiTuple<K, V>>() {
@Override
public IgniteBiTuple<K, V> apply(SetItemKey e) {
return new IgniteBiTuple<>((K) e.item(), (V) Boolean.TRUE);
}
}, true, new P1<SetItemKey>() {
@Override
public boolean apply(SetItemKey e) {
return filter.apply(e, null);
}
});
return new GridCloseableIteratorAdapter<IgniteBiTuple<K, V>>() {
@Override
protected boolean onHasNext() {
return it.hasNext();
}
@Override
protected IgniteBiTuple<K, V> onNext() {
return it.next();
}
@Override
protected void onRemove() {
it.remove();
}
@Override
protected void onClose() {
// No-op.
}
};
}
use of org.apache.ignite.lang.IgniteBiTuple in project ignite by apache.
the class CacheObjectBinaryProcessorImpl method marshalToBinary.
/**
* {@inheritDoc}
*/
@SuppressWarnings("unchecked")
@Override
public Object marshalToBinary(@Nullable Object obj) throws BinaryObjectException {
if (obj == null)
return null;
if (BinaryUtils.isBinaryType(obj.getClass()))
return obj;
if (obj instanceof Object[]) {
Object[] arr = (Object[]) obj;
Object[] pArr = new Object[arr.length];
for (int i = 0; i < arr.length; i++) pArr[i] = marshalToBinary(arr[i]);
return pArr;
}
if (obj instanceof IgniteBiTuple) {
IgniteBiTuple tup = (IgniteBiTuple) obj;
if (obj instanceof T2)
return new T2<>(marshalToBinary(tup.get1()), marshalToBinary(tup.get2()));
return new IgniteBiTuple<>(marshalToBinary(tup.get1()), marshalToBinary(tup.get2()));
}
{
Collection<Object> pCol = BinaryUtils.newKnownCollection(obj);
if (pCol != null) {
Collection<?> col = (Collection<?>) obj;
for (Object item : col) pCol.add(marshalToBinary(item));
return (pCol instanceof MutableSingletonList) ? U.convertToSingletonList(pCol) : pCol;
}
}
{
Map<Object, Object> pMap = BinaryUtils.newKnownMap(obj);
if (pMap != null) {
Map<?, ?> map = (Map<?, ?>) obj;
for (Map.Entry<?, ?> e : map.entrySet()) pMap.put(marshalToBinary(e.getKey()), marshalToBinary(e.getValue()));
return pMap;
}
}
if (obj instanceof Map.Entry) {
Map.Entry<?, ?> e = (Map.Entry<?, ?>) obj;
return new GridMapEntry<>(marshalToBinary(e.getKey()), marshalToBinary(e.getValue()));
}
if (binaryMarsh.mustDeserialize(obj))
// No need to go through marshal-unmarshal because result will be the same as initial object.
return obj;
byte[] arr = binaryMarsh.marshal(obj);
assert arr.length > 0;
Object obj0 = binaryMarsh.unmarshal(arr, null);
// Possible if a class has writeObject method.
if (obj0 instanceof BinaryObjectImpl)
((BinaryObjectImpl) obj0).detachAllowed(true);
return obj0;
}
use of org.apache.ignite.lang.IgniteBiTuple in project ignite by apache.
the class JdbcRequestHandler method executeBatch.
/**
* @param req Request.
* @return Response.
*/
private ClientListenerResponse executeBatch(JdbcBatchExecuteRequest req) {
String schemaName = req.schemaName();
if (F.isEmpty(schemaName))
schemaName = QueryUtils.DFLT_SCHEMA;
int qryCnt = req.queries().size();
List<Integer> updCntsAcc = new ArrayList<>(qryCnt);
// Send back only the first error. Others will be written to the log.
IgniteBiTuple<Integer, String> firstErr = new IgniteBiTuple<>();
SqlFieldsQueryEx qry = null;
for (JdbcQuery q : req.queries()) {
if (q.sql() != null) {
// If we have a new query string in the batch,
if (// then execute the previous sub-batch and create a new SqlFieldsQueryEx.
qry != null)
executeBatchedQuery(qry, updCntsAcc, firstErr);
qry = new SqlFieldsQueryEx(q.sql(), false);
qry.setDistributedJoins(cliCtx.isDistributedJoins());
qry.setEnforceJoinOrder(cliCtx.isEnforceJoinOrder());
qry.setCollocated(cliCtx.isCollocated());
qry.setReplicatedOnly(cliCtx.isReplicatedOnly());
qry.setLazy(cliCtx.isLazy());
qry.setSchema(schemaName);
}
assert qry != null;
qry.addBatchedArgs(q.args());
}
if (qry != null)
executeBatchedQuery(qry, updCntsAcc, firstErr);
if (req.isLastStreamBatch())
cliCtx.disableStreaming();
int[] updCnts = U.toIntArray(updCntsAcc);
if (firstErr.isEmpty())
return new JdbcResponse(new JdbcBatchExecuteResult(updCnts, ClientListenerResponse.STATUS_SUCCESS, null));
else
return new JdbcResponse(new JdbcBatchExecuteResult(updCnts, firstErr.getKey(), firstErr.getValue()));
}
Aggregations