use of org.apache.ignite.internal.processors.cache.CacheObjectImpl in project ignite by apache.
the class TestStorageUtils method corruptDataEntry.
/**
* Corrupts data entry.
*
* @param ctx Context.
* @param key Key.
* @param breakCntr Break counter.
* @param breakData Break data.
*/
public static void corruptDataEntry(GridCacheContext<?, ?> ctx, Object key, boolean breakCntr, boolean breakData) throws IgniteCheckedException {
assert !ctx.isLocal();
int partId = ctx.affinity().partition(key);
GridDhtLocalPartition locPart = ctx.topology().localPartition(partId);
CacheEntry<Object, Object> e = ctx.cache().keepBinary().getEntry(key);
KeyCacheObject keyCacheObj = e.getKey() instanceof BinaryObject ? (KeyCacheObject) e.getKey() : new KeyCacheObjectImpl(e.getKey(), null, partId);
DataEntry dataEntry = new DataEntry(ctx.cacheId(), keyCacheObj, new CacheObjectImpl(breakData ? e.getValue().toString() + "brokenValPostfix" : e.getValue(), null), GridCacheOperation.UPDATE, new GridCacheVersion(), new GridCacheVersion(), 0L, partId, breakCntr ? locPart.updateCounter() + 1 : locPart.updateCounter(), DataEntry.EMPTY_FLAGS);
IgniteCacheDatabaseSharedManager db = ctx.shared().database();
db.checkpointReadLock();
try {
assert dataEntry.op() == GridCacheOperation.UPDATE;
ctx.offheap().update(ctx, dataEntry.key(), dataEntry.value(), dataEntry.writeVersion(), dataEntry.expireTime(), locPart, null);
ctx.offheap().dataStore(locPart).updateInitialCounter(dataEntry.partitionCounter() - 1, 1);
} finally {
db.checkpointReadUnlock();
}
}
use of org.apache.ignite.internal.processors.cache.CacheObjectImpl in project ignite by apache.
the class UserCacheObjectImpl method prepareForCache.
/**
* {@inheritDoc}
*/
@Override
public CacheObject prepareForCache(CacheObjectContext ctx) {
try {
IgniteCacheObjectProcessor proc = ctx.kernalContext().cacheObjects();
if (valBytes == null)
valBytes = proc.marshal(ctx, val);
if (ctx.storeValue()) {
boolean p2pEnabled = ctx.kernalContext().config().isPeerClassLoadingEnabled();
ClassLoader ldr = p2pEnabled ? IgniteUtils.detectClass(this.val).getClassLoader() : val.getClass().getClassLoader();
Object val = this.val != null && proc.immutable(this.val) ? this.val : proc.unmarshal(ctx, valBytes, ldr);
return new CacheObjectImpl(val, valBytes);
}
return new CacheObjectImpl(null, valBytes);
} catch (IgniteCheckedException e) {
throw new IgniteException("Failed to marshal object: " + val, e);
}
}
use of org.apache.ignite.internal.processors.cache.CacheObjectImpl in project ignite by apache.
the class GridCommandHandlerConsistencyTest method fillCache.
/**
*/
private void fillCache(String name, Ignite filtered, boolean incVal) throws Exception {
for (Ignite node : G.allGrids()) {
if (node.equals(filtered))
continue;
while (// Waiting for cache internals to init.
((IgniteEx) node).cachex(name) == null) U.sleep(1);
}
GridCacheVersionManager mgr = ((GridCacheAdapter) (grid(1)).cachex(name).cache()).context().shared().versions();
for (int key = 0; key < PARTITIONS; key++) {
List<Ignite> nodes = new ArrayList<>();
nodes.add(primaryNode(key, name));
nodes.addAll(backupNodes(key, name));
Collections.shuffle(nodes);
int val = key;
Object obj;
for (Ignite node : nodes) {
IgniteInternalCache cache = ((IgniteEx) node).cachex(name);
GridCacheAdapter adapter = ((GridCacheAdapter) cache.cache());
GridCacheEntryEx entry = adapter.entryEx(key);
val = incVal ? ++val : val;
if (binaryCache()) {
BinaryObjectBuilder builder = node.binary().builder("org.apache.ignite.TestValue");
builder.setField("val", val);
obj = builder.build();
} else
obj = val;
boolean init = entry.initialValue(// Incremental or same value.
new CacheObjectImpl(obj, null), // Incremental version.
mgr.next(entry.context().kernalContext().discovery().topologyVersion()), 0, 0, false, AffinityTopologyVersion.NONE, GridDrType.DR_NONE, false, false);
assertTrue("iterableKey " + key + " already inited", init);
}
}
}
use of org.apache.ignite.internal.processors.cache.CacheObjectImpl in project ignite by apache.
the class ClientDataStreamerReader method readCacheObject.
/**
* Read cache object from the stream as raw bytes to avoid marshalling.
*/
private static <T extends CacheObject> T readCacheObject(BinaryReaderExImpl reader, boolean isKey) {
BinaryInputStream in = reader.in();
int pos0 = in.position();
Object obj = reader.readObjectDetached();
if (obj == null)
return null;
if (obj instanceof CacheObject)
return (T) obj;
int pos1 = in.position();
in.position(pos0);
byte[] objBytes = in.readByteArray(pos1 - pos0);
return isKey ? (T) new KeyCacheObjectImpl(obj, objBytes, -1) : (T) new CacheObjectImpl(obj, objBytes);
}
use of org.apache.ignite.internal.processors.cache.CacheObjectImpl in project ignite by apache.
the class InconsistentNodeApplication method run.
/**
* {@inheritDoc}
*/
@Override
protected void run(JsonNode jsonNode) throws Exception {
String cacheName = jsonNode.get("cacheName").asText();
int amount = jsonNode.get("amount").asInt();
int parts = jsonNode.get("parts").asInt();
boolean tx = jsonNode.get("tx").asBoolean();
markInitialized();
waitForActivation();
CacheConfiguration<Integer, Integer> cfg = new CacheConfiguration<>(cacheName);
cfg.setAtomicityMode(tx ? TRANSACTIONAL : ATOMIC);
cfg.setCacheMode(CacheMode.REPLICATED);
cfg.setAffinity(new RendezvousAffinityFunction().setPartitions(parts));
ignite.getOrCreateCache(cfg);
GridCacheVersionManager mgr = ((GridCacheAdapter) ((IgniteEx) ignite).cachex(cacheName).cache()).context().shared().versions();
int cnt = 0;
for (int key = 0; key < amount; key += ThreadLocalRandom.current().nextInt(1, 3)) {
// Random shift.
IgniteInternalCache<?, ?> cache = ((IgniteEx) ignite).cachex(cacheName);
GridCacheAdapter<?, ?> adapter = (GridCacheAdapter) cache.cache();
GridCacheEntryEx entry = adapter.entryEx(key);
boolean init = entry.initialValue(// Incremental value.
new CacheObjectImpl(cnt, null), // Incremental version.
mgr.next(entry.context().kernalContext().discovery().topologyVersion()), 0, 0, false, AffinityTopologyVersion.NONE, GridDrType.DR_NONE, false, false);
assert init : "iterableKey " + key + " already inited";
if (cnt % 1_000 == 0)
log.info("APPLICATION_STREAMED [entries=" + cnt + "]");
cnt++;
}
log.info("APPLICATION_STREAMING_FINISHED [entries=" + cnt + "]");
while (!terminated()) // Keeping node alive.
U.sleep(100);
markFinished();
}
Aggregations