use of org.apache.ignite.internal.UnregisteredBinaryTypeException in project ignite by apache.
the class CacheObjectBinaryProcessorImpl method failIfUnregistered.
/**
* Throw specific exception if given binary metadata is unregistered.
*
* @param typeId Type id.
* @param newMeta0 Expected binary metadata.
*/
private void failIfUnregistered(int typeId, BinaryMetadata newMeta0) {
BinaryMetadataHolder metaHolder = metadataLocCache.get(typeId);
BinaryMetadata oldMeta = metaHolder != null ? metaHolder.metadata() : null;
BinaryMetadata mergedMeta = mergeMetadata(oldMeta, newMeta0);
if (mergedMeta != oldMeta)
throw new UnregisteredBinaryTypeException(typeId, mergedMeta);
if (metaHolder.pendingVersion() == metaHolder.acceptedVersion())
return;
// Metadata locally is up-to-date. Waiting for updating metadata in an entire cluster, if necessary.
GridFutureAdapter<MetadataUpdateResult> fut = transport.awaitMetadataUpdate(typeId, metaHolder.pendingVersion());
if (!fut.isDone())
throw new UnregisteredBinaryTypeException(typeId, fut);
}
use of org.apache.ignite.internal.UnregisteredBinaryTypeException in project ignite by apache.
the class BinaryClassDescriptor method write.
/**
* @param obj Object.
* @param writer Writer.
* @throws BinaryObjectException In case of error.
*/
void write(Object obj, BinaryWriterExImpl writer) throws BinaryObjectException {
try {
assert obj != null;
assert writer != null;
assert mode != BinaryWriteMode.OPTIMIZED : "OptimizedMarshaller should not be used here: " + cls.getName();
writer.typeId(typeId);
switch(mode) {
case P_BYTE:
case BYTE:
writer.writeByteFieldPrimitive((byte) obj);
break;
case P_SHORT:
case SHORT:
writer.writeShortFieldPrimitive((short) obj);
break;
case P_INT:
case INT:
writer.writeIntFieldPrimitive((int) obj);
break;
case P_LONG:
case LONG:
writer.writeLongFieldPrimitive((long) obj);
break;
case P_FLOAT:
case FLOAT:
writer.writeFloatFieldPrimitive((float) obj);
break;
case P_DOUBLE:
case DOUBLE:
writer.writeDoubleFieldPrimitive((double) obj);
break;
case P_CHAR:
case CHAR:
writer.writeCharFieldPrimitive((char) obj);
break;
case P_BOOLEAN:
case BOOLEAN:
writer.writeBooleanFieldPrimitive((boolean) obj);
break;
case DECIMAL:
writer.doWriteDecimal((BigDecimal) obj);
break;
case STRING:
writer.doWriteString((String) obj);
break;
case UUID:
writer.doWriteUuid((UUID) obj);
break;
case DATE:
writer.doWriteDate((Date) obj);
break;
case TIMESTAMP:
writer.doWriteTimestamp((Timestamp) obj);
break;
case TIME:
writer.doWriteTime((Time) obj);
break;
case BYTE_ARR:
writer.doWriteByteArray((byte[]) obj);
break;
case SHORT_ARR:
writer.doWriteShortArray((short[]) obj);
break;
case INT_ARR:
writer.doWriteIntArray((int[]) obj);
break;
case LONG_ARR:
writer.doWriteLongArray((long[]) obj);
break;
case FLOAT_ARR:
writer.doWriteFloatArray((float[]) obj);
break;
case DOUBLE_ARR:
writer.doWriteDoubleArray((double[]) obj);
break;
case CHAR_ARR:
writer.doWriteCharArray((char[]) obj);
break;
case BOOLEAN_ARR:
writer.doWriteBooleanArray((boolean[]) obj);
break;
case DECIMAL_ARR:
writer.doWriteDecimalArray((BigDecimal[]) obj);
break;
case STRING_ARR:
writer.doWriteStringArray((String[]) obj);
break;
case UUID_ARR:
writer.doWriteUuidArray((UUID[]) obj);
break;
case DATE_ARR:
writer.doWriteDateArray((Date[]) obj);
break;
case TIMESTAMP_ARR:
writer.doWriteTimestampArray((Timestamp[]) obj);
break;
case TIME_ARR:
writer.doWriteTimeArray((Time[]) obj);
break;
case OBJECT_ARR:
if (obj instanceof BinaryArray)
writer.doWriteBinaryArray(((BinaryArray) obj));
else
writer.doWriteObjectArray((Object[]) obj);
break;
case COL:
writer.doWriteCollection((Collection<?>) obj);
break;
case MAP:
writer.doWriteMap((Map<?, ?>) obj);
break;
case ENUM:
writer.doWriteEnum((Enum<?>) obj);
break;
case BINARY_ENUM:
writer.doWriteBinaryEnum((BinaryEnumObjectImpl) obj);
break;
case ENUM_ARR:
if (obj instanceof BinaryArray)
writer.doWriteBinaryArray(((BinaryArray) obj));
else
writer.doWriteEnumArray((Object[]) obj);
break;
case CLASS:
writer.doWriteClass((Class) obj);
break;
case PROXY:
writer.doWriteProxy((Proxy) obj, intfs);
break;
case BINARY_OBJ:
writer.doWriteBinaryObject((BinaryObjectImpl) obj);
break;
case BINARY:
if (preWrite(writer, obj)) {
try {
if (serializer != null)
serializer.writeBinary(obj, writer);
else
((Binarylizable) obj).writeBinary(writer);
postWrite(writer);
// The reason for this check is described in https://issues.apache.org/jira/browse/IGNITE-7138.
if (obj.getClass() != BinaryMetadata.class && obj.getClass() != BinaryTreeMap.class) {
int schemaId = writer.schemaId();
if (schemaReg.schema(schemaId) == null) {
// This is new schema, let's update metadata.
BinaryMetadataCollector collector = new BinaryMetadataCollector(typeId, typeName, mapper);
if (serializer != null)
serializer.writeBinary(obj, collector);
else
((Binarylizable) obj).writeBinary(collector);
BinarySchema newSchema = collector.schema();
schemaReg.addSchema(newSchema.schemaId(), newSchema);
if (userType) {
BinaryMetadata meta = new BinaryMetadata(typeId, typeName, collector.meta(), affKeyFieldName, Collections.singleton(newSchema), false, null);
ctx.updateMetadata(typeId, meta, writer.failIfUnregistered());
}
}
}
postWriteHashCode(writer, obj);
} finally {
writer.popSchema();
}
}
break;
case OBJECT:
if (userType && !stableSchemaPublished) {
// Update meta before write object with new schema
BinaryMetadata meta = new BinaryMetadata(typeId, typeName, stableFieldsMeta, affKeyFieldName, Collections.singleton(stableSchema), false, null);
ctx.updateMetadata(typeId, meta, writer.failIfUnregistered());
schemaReg.addSchema(stableSchema.schemaId(), stableSchema);
stableSchemaPublished = true;
}
if (preWrite(writer, obj)) {
try {
for (BinaryFieldAccessor info : fields) info.write(obj, writer);
writer.schemaId(stableSchema.schemaId());
postWrite(writer);
postWriteHashCode(writer, obj);
} finally {
writer.popSchema();
}
}
break;
default:
assert false : "Invalid mode: " + mode;
}
} catch (UnregisteredBinaryTypeException | UnregisteredClassException e) {
throw e;
} catch (Exception e) {
String msg;
if (S.includeSensitive() && !F.isEmpty(typeName))
msg = "Failed to serialize object [typeName=" + typeName + ']';
else
msg = "Failed to serialize object [typeId=" + typeId + ']';
U.error(ctx.log(), msg, e);
throw new BinaryObjectException(msg, e);
}
}
use of org.apache.ignite.internal.UnregisteredBinaryTypeException in project ignite by apache.
the class GridDhtAtomicCache method updateAllAsyncInternal0.
/**
* Executes local update after preloader fetched values.
*
* @param node Node.
* @param req Update request.
* @param completionCb Completion callback.
*/
private void updateAllAsyncInternal0(final ClusterNode node, final GridNearAtomicAbstractUpdateRequest req, final UpdateReplyClosure completionCb) {
GridNearAtomicUpdateResponse res = new GridNearAtomicUpdateResponse(ctx.cacheId(), node.id(), req.futureId(), req.partition(), false, ctx.deploymentEnabled());
assert !req.returnValue() || (req.operation() == TRANSFORM || req.size() == 1);
GridDhtAtomicAbstractUpdateFuture dhtFut = null;
IgniteCacheExpiryPolicy expiry = null;
boolean needTaskName = ctx.events().isRecordable(EVT_CACHE_OBJECT_READ) || ctx.events().isRecordable(EVT_CACHE_OBJECT_PUT) || ctx.events().isRecordable(EVT_CACHE_OBJECT_REMOVED);
String taskName = needTaskName ? ctx.kernalContext().task().resolveTaskName(req.taskNameHash()) : null;
ctx.shared().database().checkpointReadLock();
try {
ctx.shared().database().ensureFreeSpace(ctx.dataRegion());
// If batch store update is enabled, we need to lock all entries.
// First, need to acquire locks on cache entries, then check filter.
List<GridDhtCacheEntry> locked = lockEntries(req, req.topologyVersion());
;
Collection<IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion>> deleted = null;
DhtAtomicUpdateResult updDhtRes = new DhtAtomicUpdateResult();
try {
while (true) {
try {
GridDhtPartitionTopology top = topology();
top.readLock();
try {
if (top.stopping()) {
if (ctx.shared().cache().isCacheRestarting(name()))
res.addFailedKeys(req.keys(), new IgniteCacheRestartingException(name()));
else
res.addFailedKeys(req.keys(), new CacheStoppedException(name()));
completionCb.apply(req, res);
return;
}
boolean remap = false;
// external transaction or explicit lock.
if (!req.topologyLocked()) {
AffinityTopologyVersion waitVer = top.topologyVersionFuture().initialVersion();
// No need to remap if next future version is compatible.
boolean compatible = waitVer.isBetween(req.lastAffinityChangedTopologyVersion(), req.topologyVersion());
// Can not wait for topology future since it will break
// GridNearAtomicCheckUpdateRequest processing.
remap = !compatible && !top.topologyVersionFuture().isDone() || needRemap(req.topologyVersion(), top.readyTopologyVersion());
}
if (!remap) {
update(node, locked, req, res, updDhtRes, taskName);
dhtFut = updDhtRes.dhtFuture();
deleted = updDhtRes.deleted();
expiry = updDhtRes.expiryPolicy();
} else
// Should remap all keys.
res.remapTopologyVersion(top.lastTopologyChangeVersion());
} finally {
top.readUnlock();
}
// Must be done outside topology read lock to avoid deadlocks.
if (res.returnValue() != null)
res.returnValue().marshalResult(ctx);
break;
} catch (UnregisteredClassException ex) {
IgniteCacheObjectProcessor cacheObjProc = ctx.cacheObjects();
assert cacheObjProc instanceof CacheObjectBinaryProcessorImpl;
((CacheObjectBinaryProcessorImpl) cacheObjProc).binaryContext().registerClass(ex.cls(), true, false);
} catch (UnregisteredBinaryTypeException ex) {
if (ex.future() != null) {
// Wait for the future that couldn't be processed because of
// IgniteThread#isForbiddenToRequestBinaryMetadata flag being true. Usually this means
// that awaiting for the future right there would lead to potential deadlock if
// continuous queries are used in parallel with entry processor.
ex.future().get();
// Retry and don't update current binary metadata, because it most likely already exists.
continue;
}
IgniteCacheObjectProcessor cacheObjProc = ctx.cacheObjects();
assert cacheObjProc instanceof CacheObjectBinaryProcessorImpl;
((CacheObjectBinaryProcessorImpl) cacheObjProc).binaryContext().updateMetadata(ex.typeId(), ex.binaryMetadata(), false);
}
}
} catch (GridCacheEntryRemovedException e) {
assert false : "Entry should not become obsolete while holding lock.";
e.printStackTrace();
} finally {
if (locked != null)
unlockEntries(locked, req.topologyVersion());
// Enqueue if necessary after locks release.
if (deleted != null) {
assert !deleted.isEmpty();
assert ctx.deferredDelete() : this;
for (IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion> e : deleted) ctx.onDeferredDelete(e.get1(), e.get2());
}
// TODO fire events only after successful fsync
if (ctx.shared().wal() != null)
ctx.shared().wal().flush(null, false);
}
} catch (GridDhtInvalidPartitionException ignore) {
if (log.isDebugEnabled())
log.debug("Caught invalid partition exception for cache entry (will remap update request): " + req);
res.remapTopologyVersion(ctx.topology().lastTopologyChangeVersion());
} catch (Throwable e) {
// At least RuntimeException can be thrown by the code above when GridCacheContext is cleaned and there is
// an attempt to use cleaned resources.
U.error(log, "Unexpected exception during cache update", e);
res.addFailedKeys(req.keys(), e);
completionCb.apply(req, res);
if (e instanceof Error)
throw (Error) e;
return;
} finally {
ctx.shared().database().checkpointReadUnlock();
}
if (res.remapTopologyVersion() != null) {
assert dhtFut == null;
completionCb.apply(req, res);
} else {
if (dhtFut != null)
dhtFut.map(node, res.returnValue(), res, completionCb);
}
if (req.writeSynchronizationMode() != FULL_ASYNC)
req.cleanup(!node.isLocal());
sendTtlUpdateRequest(expiry);
}
use of org.apache.ignite.internal.UnregisteredBinaryTypeException in project ignite by apache.
the class GridDhtAtomicCache method updateWithBatch.
/**
* Updates locked entries using batched write-through.
*
* @param node Sender node.
* @param hasNear {@code True} if originating node has near cache.
* @param req Update request.
* @param res Update response.
* @param locked Locked entries.
* @param ver Assigned version.
* @param replicate Whether replication is enabled.
* @param taskName Task name.
* @param expiry Expiry policy.
* @param sndPrevVal If {@code true} sends previous value to backups.
* @param dhtUpdRes DHT update result.
* @throws GridCacheEntryRemovedException Should not be thrown.
*/
@SuppressWarnings("unchecked")
private void updateWithBatch(final ClusterNode node, final boolean hasNear, final GridNearAtomicAbstractUpdateRequest req, final GridNearAtomicUpdateResponse res, final List<GridDhtCacheEntry> locked, final GridCacheVersion ver, final boolean replicate, final String taskName, @Nullable final IgniteCacheExpiryPolicy expiry, final boolean sndPrevVal, final DhtAtomicUpdateResult dhtUpdRes) throws GridCacheEntryRemovedException {
// Cannot update in batches during DR due to possible conflicts.
assert !ctx.dr().receiveEnabled();
// Should not request return values for putAll.
assert !req.returnValue() || req.operation() == TRANSFORM;
if (!F.isEmpty(req.filter()) && ctx.loadPreviousValue()) {
try {
reloadIfNeeded(locked);
} catch (IgniteCheckedException e) {
res.addFailedKeys(req.keys(), e);
return;
}
}
int size = req.size();
Map<KeyCacheObject, CacheObject> putMap = null;
Map<KeyCacheObject, EntryProcessor<Object, Object, Object>> entryProcessorMap = null;
Collection<KeyCacheObject> rmvKeys = null;
List<CacheObject> writeVals = null;
List<GridDhtCacheEntry> filtered = new ArrayList<>(size);
GridCacheOperation op = req.operation();
GridCacheReturn invokeRes = null;
int firstEntryIdx = 0;
boolean intercept = ctx.config().getInterceptor() != null;
for (int i = dhtUpdRes.processedEntriesCount(); i < locked.size(); i++) {
GridDhtCacheEntry entry = locked.get(i);
try {
if (!checkFilter(entry, req, res)) {
if (expiry != null && entry.hasValue()) {
long ttl = expiry.forAccess();
if (ttl != CU.TTL_NOT_CHANGED) {
entry.updateTtl(null, ttl);
expiry.ttlUpdated(entry.key(), entry.version(), entry.readers());
}
}
if (log.isDebugEnabled())
log.debug("Entry did not pass the filter (will skip write) [entry=" + entry + ", filter=" + Arrays.toString(req.filter()) + ", res=" + res + ']');
if (hasNear)
res.addSkippedIndex(i);
firstEntryIdx++;
continue;
}
if (op == TRANSFORM) {
EntryProcessor<Object, Object, Object> entryProcessor = req.entryProcessor(i);
CacheObject old = entry.innerGet(ver, null, /*read through*/
true, /*metrics*/
true, /*event*/
true, entryProcessor, taskName, null, req.keepBinary());
Object oldVal = null;
Object updatedVal = null;
CacheInvokeEntry<Object, Object> invokeEntry = new CacheInvokeEntry(entry.key(), old, entry.version(), req.keepBinary(), entry);
CacheObject updated = null;
if (invokeRes == null)
invokeRes = new GridCacheReturn(node.isLocal());
CacheInvokeResult curInvokeRes = null;
boolean validation = false;
IgniteThread.onEntryProcessorEntered(true);
try {
Object computed = entryProcessor.process(invokeEntry, req.invokeArguments());
if (computed != null) {
computed = ctx.unwrapTemporary(computed);
curInvokeRes = CacheInvokeResult.fromResult(computed);
}
if (!invokeEntry.modified()) {
if (ctx.statisticsEnabled())
ctx.cache().metrics0().onReadOnlyInvoke(old != null);
continue;
} else {
updatedVal = ctx.unwrapTemporary(invokeEntry.getValue());
updated = ctx.toCacheObject(updatedVal);
validation = true;
if (updated != null)
ctx.validateKeyAndValue(entry.key(), updated);
}
} catch (UnregisteredClassException | UnregisteredBinaryTypeException e) {
throw e;
} catch (Exception e) {
curInvokeRes = CacheInvokeResult.fromError(e);
updated = old;
if (validation) {
res.addSkippedIndex(i);
continue;
}
} finally {
IgniteThread.onEntryProcessorLeft();
if (curInvokeRes != null) {
invokeRes.addEntryProcessResult(ctx, entry.key(), invokeEntry.key(), curInvokeRes.result(), curInvokeRes.error(), req.keepBinary());
}
}
if (updated == null) {
if (intercept) {
CacheLazyEntry e = new CacheLazyEntry(ctx, entry.key(), invokeEntry.key(), old, oldVal, req.keepBinary());
IgniteBiTuple<Boolean, ?> interceptorRes = ctx.config().getInterceptor().onBeforeRemove(e);
if (ctx.cancelRemove(interceptorRes))
continue;
}
// Update previous batch.
if (putMap != null) {
updatePartialBatch(hasNear, firstEntryIdx, filtered, ver, node, writeVals, putMap, null, entryProcessorMap, req, res, replicate, dhtUpdRes, taskName, expiry, sndPrevVal);
firstEntryIdx = i;
putMap = null;
writeVals = null;
entryProcessorMap = null;
filtered = new ArrayList<>();
}
// Start collecting new batch.
if (rmvKeys == null)
rmvKeys = new ArrayList<>(size);
rmvKeys.add(entry.key());
} else {
if (intercept) {
CacheLazyEntry e = new CacheLazyEntry(ctx, entry.key(), invokeEntry.key(), old, oldVal, req.keepBinary());
Object val = ctx.config().getInterceptor().onBeforePut(e, updatedVal);
if (val == null)
continue;
updated = ctx.toCacheObject(ctx.unwrapTemporary(val));
}
// Update previous batch.
if (rmvKeys != null) {
updatePartialBatch(hasNear, firstEntryIdx, filtered, ver, node, null, null, rmvKeys, entryProcessorMap, req, res, replicate, dhtUpdRes, taskName, expiry, sndPrevVal);
firstEntryIdx = i;
rmvKeys = null;
entryProcessorMap = null;
filtered = new ArrayList<>();
}
if (putMap == null) {
putMap = new LinkedHashMap<>(size, 1.0f);
writeVals = new ArrayList<>(size);
}
putMap.put(entry.key(), updated);
writeVals.add(updated);
}
if (entryProcessorMap == null)
entryProcessorMap = new HashMap<>();
entryProcessorMap.put(entry.key(), entryProcessor);
} else if (op == UPDATE) {
CacheObject updated = req.value(i);
if (intercept) {
CacheObject old = entry.innerGet(null, null, /*read through*/
ctx.loadPreviousValue(), /*metrics*/
true, /*event*/
true, null, taskName, null, req.keepBinary());
Object val = ctx.config().getInterceptor().onBeforePut(new CacheLazyEntry(ctx, entry.key(), old, req.keepBinary()), ctx.unwrapBinaryIfNeeded(updated, req.keepBinary(), false, null));
if (val == null)
continue;
updated = ctx.toCacheObject(ctx.unwrapTemporary(val));
}
assert updated != null;
ctx.validateKeyAndValue(entry.key(), updated);
if (putMap == null) {
putMap = new LinkedHashMap<>(size, 1.0f);
writeVals = new ArrayList<>(size);
}
putMap.put(entry.key(), updated);
writeVals.add(updated);
} else {
assert op == DELETE;
if (intercept) {
CacheObject old = entry.innerGet(null, null, /*read through*/
ctx.loadPreviousValue(), /*metrics*/
true, /*event*/
true, null, taskName, null, req.keepBinary());
IgniteBiTuple<Boolean, ?> interceptorRes = ctx.config().getInterceptor().onBeforeRemove(new CacheLazyEntry(ctx, entry.key(), old, req.keepBinary()));
if (ctx.cancelRemove(interceptorRes))
continue;
}
if (rmvKeys == null)
rmvKeys = new ArrayList<>(size);
rmvKeys.add(entry.key());
}
filtered.add(entry);
} catch (IgniteCheckedException e) {
res.addFailedKey(entry.key(), e);
}
}
// Store final batch.
if (putMap != null || rmvKeys != null) {
updatePartialBatch(hasNear, firstEntryIdx, filtered, ver, node, writeVals, putMap, rmvKeys, entryProcessorMap, req, res, replicate, dhtUpdRes, taskName, expiry, sndPrevVal);
} else
assert filtered.isEmpty();
dhtUpdRes.returnValue(invokeRes);
}
use of org.apache.ignite.internal.UnregisteredBinaryTypeException in project ignite by apache.
the class BPlusTree method invoke.
/**
* {@inheritDoc}
*/
@Override
public void invoke(L row, Object z, InvokeClosure<T> c) throws IgniteCheckedException {
checkDestroyed();
Invoke x = new Invoke(row, z, c);
try {
for (; ; ) {
x.init();
Result res = invokeDown(x, x.rootId, 0L, 0L, x.rootLvl);
switch(res) {
case RETRY:
case RETRY_ROOT:
checkInterrupted();
continue;
default:
if (!x.isFinished()) {
res = x.tryFinish();
if (res == RETRY || res == RETRY_ROOT) {
checkInterrupted();
continue;
}
assert x.isFinished() : res;
}
return;
}
}
} catch (UnregisteredClassException | UnregisteredBinaryTypeException | CorruptedDataStructureException e) {
throw e;
} catch (IgniteCheckedException e) {
throw new IgniteCheckedException("Runtime failure on search row: " + row, e);
} catch (RuntimeException | AssertionError e) {
throw corruptedTreeException("Runtime failure on search row: " + row, e, grpId, x.pageId);
} finally {
x.releaseAll();
checkDestroyed();
}
}
Aggregations