use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class TestStorageUtils method corruptDataEntry.
/**
* Corrupts data entry.
*
* @param ctx Context.
* @param key Key.
* @param breakCntr Break counter.
* @param breakData Break data.
*/
public static void corruptDataEntry(GridCacheContext<?, ?> ctx, Object key, boolean breakCntr, boolean breakData) throws IgniteCheckedException {
assert !ctx.isLocal();
int partId = ctx.affinity().partition(key);
GridDhtLocalPartition locPart = ctx.topology().localPartition(partId);
CacheEntry<Object, Object> e = ctx.cache().keepBinary().getEntry(key);
KeyCacheObject keyCacheObj = e.getKey() instanceof BinaryObject ? (KeyCacheObject) e.getKey() : new KeyCacheObjectImpl(e.getKey(), null, partId);
DataEntry dataEntry = new DataEntry(ctx.cacheId(), keyCacheObj, new CacheObjectImpl(breakData ? e.getValue().toString() + "brokenValPostfix" : e.getValue(), null), GridCacheOperation.UPDATE, new GridCacheVersion(), new GridCacheVersion(), 0L, partId, breakCntr ? locPart.updateCounter() + 1 : locPart.updateCounter(), DataEntry.EMPTY_FLAGS);
IgniteCacheDatabaseSharedManager db = ctx.shared().database();
db.checkpointReadLock();
try {
assert dataEntry.op() == GridCacheOperation.UPDATE;
ctx.offheap().update(ctx, dataEntry.key(), dataEntry.value(), dataEntry.writeVersion(), dataEntry.expireTime(), locPart, null);
ctx.offheap().dataStore(locPart).updateInitialCounter(dataEntry.partitionCounter() - 1, 1);
} finally {
db.checkpointReadUnlock();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class IgniteTxConcurrentRemoveObjectsTest method checkTxLeavesObjectsInLocalPartition.
/**
* Too many deletes in single transaction may overflow {@link GridDhtLocalPartition#rmvQueue} and entries will be
* deleted synchronously in {@link GridDhtLocalPartition#onDeferredDelete(int, KeyCacheObject, GridCacheVersion)}.
* This should not corrupt internal map state in {@link GridDhtLocalPartition}.
*
* @throws Exception If failed.
*/
public void checkTxLeavesObjectsInLocalPartition(CacheConfiguration<Integer, String> ccfg, TransactionConcurrency optimistic, TransactionIsolation isolation) throws Exception {
IgniteEx igniteEx = grid(0);
igniteEx.getOrCreateCache(ccfg);
try (IgniteDataStreamer<Integer, String> dataStreamer = igniteEx.dataStreamer(DEFAULT_CACHE_NAME)) {
for (int i = 0; i < CACHE_ENTRIES_COUNT; i++) dataStreamer.addData(i, UUID.randomUUID().toString());
}
IgniteEx client = startClientGrid(getConfiguration().setIgniteInstanceName(UUID.randomUUID().toString()));
awaitPartitionMapExchange();
assertEquals(CACHE_ENTRIES_COUNT, client.getOrCreateCache(DEFAULT_CACHE_NAME).size());
try (Transaction tx = client.transactions().txStart(optimistic, isolation)) {
IgniteCache<Integer, String> cache = client.getOrCreateCache(cacheConfiguration());
for (int v = 0; v < CACHE_ENTRIES_COUNT; v++) {
cache.get(v);
cache.remove(v);
}
tx.commit();
}
GridTestUtils.waitForCondition(() -> igniteEx.context().cache().cacheGroups().stream().filter(CacheGroupContext::userCache).flatMap(cgctx -> cgctx.topology().localPartitions().stream()).mapToInt(GridDhtLocalPartition::internalSize).max().orElse(-1) == 0, 500L);
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class CacheDataLossOnPartitionMoveTest method testDataLossOnPartitionMove.
/**
* @throws Exception if failed.
*/
@Test
public void testDataLossOnPartitionMove() throws Exception {
try {
Ignite ignite = startGridsMultiThreaded(GRIDS_CNT / 2, false);
ignite.cluster().baselineAutoAdjustEnabled(false);
ignite.cluster().active(true);
List<Integer> toCp = movingKeysAfterJoin(ignite, DEFAULT_CACHE_NAME, 1, node -> ((GridTestNode) node).setAttribute(GRP_ATTR, ODD_GRP), null);
int blockPartId = ignite.affinity(DEFAULT_CACHE_NAME).partition(toCp.get(0));
awaitPartitionMapExchange();
int c = 0;
for (int i = 0; i < 1000; i++) {
if (ignite.affinity(DEFAULT_CACHE_NAME).partition(i) == blockPartId) {
ignite.cache(DEFAULT_CACHE_NAME).put(i, i);
c++;
}
}
assertEquals(c, ignite.cache(DEFAULT_CACHE_NAME).size());
startGridsMultiThreaded(GRIDS_CNT / 2, GRIDS_CNT / 2);
// Prevent rebalancing to new nodes.
for (Ignite ig0 : G.allGrids()) {
TestRecordingCommunicationSpi.spi(ig0).blockMessages((node, message) -> {
if (message instanceof GridDhtPartitionDemandMessage) {
assertTrue(node.order() <= GRIDS_CNT / 2);
GridDhtPartitionDemandMessage msg = (GridDhtPartitionDemandMessage) message;
return msg.groupId() == CU.cacheId(DEFAULT_CACHE_NAME);
}
return false;
});
}
ignite.cluster().setBaselineTopology(GRIDS_CNT);
for (Ignite ig0 : G.allGrids()) {
if (ig0.cluster().localNode().order() <= GRIDS_CNT / 2)
continue;
TestRecordingCommunicationSpi.spi(ig0).waitForBlocked();
}
assertEquals(c, ignite.cache(DEFAULT_CACHE_NAME).size());
int i = 0;
while (i < GRIDS_CNT / 2) {
stopGrid(GRIDS_CNT / 2 + i);
i++;
}
awaitPartitionMapExchange();
for (Ignite ig : G.allGrids()) {
GridDhtLocalPartition locPart = dht(ig.cache(DEFAULT_CACHE_NAME)).topology().localPartition(blockPartId);
assertNotNull(locPart);
assertEquals("Unexpected state", OWNING, locPart.state());
}
startGridsMultiThreaded(GRIDS_CNT / 2, GRIDS_CNT / 2);
awaitPartitionMapExchange(true, true, null);
for (Ignite ig : G.allGrids()) {
GridDhtLocalPartition locPart = dht(ig.cache(DEFAULT_CACHE_NAME)).topology().localPartition(blockPartId);
assertNotNull(locPart);
switch((String) ig.cluster().localNode().attribute(GRP_ATTR)) {
case EVEN_GRP:
assertEquals("Unexpected state", EVICTED, locPart.state());
break;
case ODD_GRP:
assertEquals("Unexpected state", OWNING, locPart.state());
break;
default:
fail();
}
}
} finally {
stopAllGrids();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class GridCachePartitionsStateValidatorSelfTest method partitionMock.
/**
* @return Partition mock with specified {@code id}, {@code updateCounter} and {@code size}.
*/
private GridDhtLocalPartition partitionMock(int id, long updateCounter, long size) {
GridDhtLocalPartition partitionMock = Mockito.mock(GridDhtLocalPartition.class);
Mockito.when(partitionMock.id()).thenReturn(id);
Mockito.when(partitionMock.updateCounter()).thenReturn(updateCounter);
Mockito.when(partitionMock.fullSize()).thenReturn(size);
Mockito.when(partitionMock.state()).thenReturn(GridDhtPartitionState.OWNING);
return partitionMock;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class GridCachePartitionsStateValidationTest method testValidationIfPartitionCountersAreInconsistent.
/**
* Test that partitions state validation works correctly.
*
* @throws Exception If failed.
*/
@Test
public void testValidationIfPartitionCountersAreInconsistent() throws Exception {
IgniteEx ignite = (IgniteEx) startGrids(2);
ignite.cluster().active(true);
awaitPartitionMapExchange();
// Populate cache to increment update counters.
for (int i = 0; i < 1000; i++) ignite.cache(CACHE_NAME).put(i, i);
// Modify update counter for some partition.
for (GridDhtLocalPartition partition : ignite.cachex(CACHE_NAME).context().topology().localPartitions()) {
partition.updateCounter(100500L);
break;
}
// Trigger exchange.
startGrid(2);
awaitPartitionMapExchange();
// Nothing should happen (just log error message) and we're still able to put data to corrupted cache.
ignite.cache(CACHE_NAME).put(0, 0);
stopAllGrids();
}
Aggregations