use of org.apache.ignite.internal.util.future.GridFinishedFuture in project ignite by apache.
the class TcpCommunicationSpi method dumpNodeStatistics.
/**
* @param nodeId Target node ID.
* @return Future.
*/
public IgniteInternalFuture<String> dumpNodeStatistics(final UUID nodeId) {
StringBuilder sb = new StringBuilder("Communication SPI statistics [rmtNode=").append(nodeId).append(']').append(U.nl());
dumpInfo(sb, nodeId);
GridNioServer<Message> nioSrvr = nioSrvWrapper.nio();
if (nioSrvr != null) {
sb.append("NIO sessions statistics:");
IgnitePredicate<GridNioSession> p = (IgnitePredicate<GridNioSession>) ses -> {
ConnectionKey connId = ses.meta(CONN_IDX_META);
return connId != null && nodeId.equals(connId.nodeId());
};
return nioSrvr.dumpStats(sb.toString(), p);
} else {
sb.append(U.nl()).append("GridNioServer is null.");
return new GridFinishedFuture<>(sb.toString());
}
}
use of org.apache.ignite.internal.util.future.GridFinishedFuture in project ignite by apache.
the class IgnitePdsCheckpointSimulationWithRealCpDisabledTest method runCheckpointing.
/**
* @param mem Memory to use.
* @param storeMgr Store manager.
* @param cacheId Cache ID.
* @return Result map of random operations.
* @throws Exception If failure occurred.
*/
private IgniteBiTuple<Map<FullPageId, Integer>, WALPointer> runCheckpointing(final IgniteEx ig, final PageMemoryImpl mem, final IgnitePageStoreManager storeMgr, final IgniteWriteAheadLogManager wal, final int cacheId) throws Exception {
final ConcurrentMap<FullPageId, Integer> resMap = new ConcurrentHashMap<>();
final FullPageId[] pages = new FullPageId[TOTAL_PAGES];
Set<FullPageId> allocated = new HashSet<>();
IgniteCacheDatabaseSharedManager db = ig.context().cache().context().database();
PageIO pageIO = new DummyPageIO();
for (int i = 0; i < TOTAL_PAGES; i++) {
FullPageId fullId;
db.checkpointReadLock();
try {
fullId = new FullPageId(mem.allocatePage(cacheId, 0, PageIdAllocator.FLAG_DATA), cacheId);
initPage(mem, pageIO, fullId);
} finally {
db.checkpointReadUnlock();
}
resMap.put(fullId, -1);
pages[i] = fullId;
allocated.add(fullId);
}
final AtomicBoolean run = new AtomicBoolean(true);
// Simulate transaction lock.
final ReadWriteLock updLock = new ReentrantReadWriteLock();
// Mark the start position.
CheckpointRecord cpRec = new CheckpointRecord(null);
WALPointer start = wal.log(cpRec);
wal.flush(start, false);
IgniteInternalFuture<Long> updFut = GridTestUtils.runMultiThreadedAsync(new Callable<Object>() {
@Override
public Object call() throws Exception {
while (true) {
FullPageId fullId = pages[ThreadLocalRandom.current().nextInt(TOTAL_PAGES)];
updLock.readLock().lock();
try {
if (!run.get())
return null;
ig.context().cache().context().database().checkpointReadLock();
try {
long page = mem.acquirePage(fullId.groupId(), fullId.pageId());
try {
long pageAddr = mem.writeLock(fullId.groupId(), fullId.pageId(), page);
PageIO.setPageId(pageAddr, fullId.pageId());
try {
int state = resMap.get(fullId);
if (state != -1) {
if (VERBOSE)
info("Verify page [fullId=" + fullId + ", state=" + state + ", buf=" + pageAddr + ", bhc=" + U.hexLong(System.identityHashCode(pageAddr)) + ", page=" + U.hexLong(System.identityHashCode(page)) + ']');
for (int i = PageIO.COMMON_HEADER_END; i < mem.realPageSize(fullId.groupId()); i++) {
assertEquals("Verify page failed [fullId=" + fullId + ", i=" + i + ", state=" + state + ", buf=" + pageAddr + ", bhc=" + U.hexLong(System.identityHashCode(pageAddr)) + ", page=" + U.hexLong(System.identityHashCode(page)) + ']', state & 0xFF, PageUtils.getByte(pageAddr, i) & 0xFF);
}
}
state = (state + 1) & 0xFF;
if (VERBOSE)
info("Write page [fullId=" + fullId + ", state=" + state + ", buf=" + pageAddr + ", bhc=" + U.hexLong(System.identityHashCode(pageAddr)) + ", page=" + U.hexLong(System.identityHashCode(page)) + ']');
for (int i = PageIO.COMMON_HEADER_END; i < mem.realPageSize(fullId.groupId()); i++) PageUtils.putByte(pageAddr, i, (byte) state);
resMap.put(fullId, state);
} finally {
mem.writeUnlock(fullId.groupId(), fullId.pageId(), page, null, true);
}
} finally {
mem.releasePage(fullId.groupId(), fullId.pageId(), page);
}
} finally {
ig.context().cache().context().database().checkpointReadUnlock();
}
} finally {
updLock.readLock().unlock();
}
}
}
}, 8, "update-thread");
int checkpoints = 20;
while (checkpoints > 0) {
Map<FullPageId, Integer> snapshot = null;
Collection<FullPageId> pageIds;
updLock.writeLock().lock();
try {
snapshot = new HashMap<>(resMap);
pageIds = mem.beginCheckpoint(new GridFinishedFuture());
checkpoints--;
if (checkpoints == 0)
// No more writes should be done at this point.
run.set(false);
info("Acquired pages for checkpoint: " + pageIds.size());
} finally {
updLock.writeLock().unlock();
}
boolean ok = false;
try {
ByteBuffer tmpBuf = ByteBuffer.allocate(mem.pageSize());
tmpBuf.order(ByteOrder.nativeOrder());
long begin = System.currentTimeMillis();
long cp = 0;
long write = 0;
for (FullPageId fullId : pageIds) {
long cpStart = System.nanoTime();
Integer tag;
AtomicReference<Integer> tag0 = new AtomicReference<>();
PageStoreWriter pageStoreWriter = (fullPageId, buf, tagx) -> {
tag0.set(tagx);
};
while (true) {
mem.checkpointWritePage(fullId, tmpBuf, pageStoreWriter, null);
tag = tag0.get();
if (tag != null && tag == PageMemoryImpl.TRY_AGAIN_TAG)
continue;
break;
}
if (tag == null)
continue;
long cpEnd = System.nanoTime();
cp += cpEnd - cpStart;
Integer state = snapshot.get(fullId);
if (allocated.contains(fullId) && state != -1) {
tmpBuf.rewind();
Integer first = null;
for (int i = PageIO.COMMON_HEADER_END; i < mem.realPageSize(fullId.groupId()); i++) {
int val = tmpBuf.get(i) & 0xFF;
if (first == null)
first = val;
// Avoid string concat.
if (first != val)
assertEquals("Corrupted buffer at position [pageId=" + fullId + ", pos=" + i + ']', (int) first, val);
// Avoid string concat.
if (state != val)
assertEquals("Invalid value at position [pageId=" + fullId + ", pos=" + i + ']', (int) state, val);
}
}
tmpBuf.rewind();
long writeStart = System.nanoTime();
storeMgr.write(cacheId, fullId.pageId(), tmpBuf, tag, true);
long writeEnd = System.nanoTime();
write += writeEnd - writeStart;
tmpBuf.rewind();
}
long syncStart = System.currentTimeMillis();
storeMgr.sync(cacheId, 0);
long end = System.currentTimeMillis();
info("Written pages in " + (end - begin) + "ms, copy took " + (cp / 1_000_000) + "ms, " + "write took " + (write / 1_000_000) + "ms, sync took " + (end - syncStart) + "ms");
ok = true;
} finally {
info("Finishing checkpoint...");
mem.finishCheckpoint();
info("Finished checkpoint");
if (!ok) {
info("Cancelling updates...");
run.set(false);
updFut.get();
}
}
if (checkpoints != 0)
Thread.sleep(2_000);
}
info("checkpoints=" + checkpoints + ", done=" + updFut.isDone());
updFut.get();
assertEquals(0, mem.activePagesCount());
for (FullPageId fullId : pages) {
long page = mem.acquirePage(fullId.groupId(), fullId.pageId());
try {
assertFalse("Page has a temp heap copy after the last checkpoint: [cacheId=" + fullId.groupId() + ", pageId=" + fullId.pageId() + "]", mem.hasTempCopy(page));
assertFalse("Page is dirty after the last checkpoint: [cacheId=" + fullId.groupId() + ", pageId=" + fullId.pageId() + "]", mem.isDirty(fullId.groupId(), fullId.pageId(), page));
} finally {
mem.releasePage(fullId.groupId(), fullId.pageId(), page);
}
}
return F.t((Map<FullPageId, Integer>) resMap, start);
}
use of org.apache.ignite.internal.util.future.GridFinishedFuture in project ignite by apache.
the class CacheGroupKeyChangeTest method checkNodeFailsDuringRotation.
/**
* @param stopCrd {@code True} to stop coordinator.
* @param prepare {@code True} to stop on the prepare phase. {@code False} to stop on the perform phase.
* @param discoBlock {@code True} to block discovery, {@code False} to block communication SPI.
*/
private void checkNodeFailsDuringRotation(boolean stopCrd, boolean prepare, boolean discoBlock) throws Exception {
cleanPersistenceDir();
DistributedProcessType type = prepare ? DistributedProcessType.CACHE_GROUP_KEY_CHANGE_PREPARE : DistributedProcessType.CACHE_GROUP_KEY_CHANGE_FINISH;
InitMessageDiscoveryHook locHook = new InitMessageDiscoveryHook(type);
if (discoBlock && stopCrd)
discoveryHook = locHook;
IgniteEx grid0 = startGrid(GRID_0);
if (discoBlock && !stopCrd)
discoveryHook = locHook;
IgniteEx grid1 = startGrid(GRID_1);
grid0.cluster().state(ClusterState.ACTIVE);
createEncryptedCache(grid0, grid1, cacheName(), null);
int grpId = CU.cacheId(cacheName());
checkGroupKey(grpId, INITIAL_KEY_ID, MAX_AWAIT_MILLIS);
TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(grid1);
if (!discoBlock) {
AtomicBoolean preparePhase = new AtomicBoolean(true);
spi.blockMessages((node, msg) -> {
if (msg instanceof SingleNodeMessage) {
boolean isPrepare = preparePhase.compareAndSet(true, false);
return prepare || !isPrepare;
}
return false;
});
}
String alive = stopCrd ? GRID_1 : GRID_0;
String stopped = stopCrd ? GRID_0 : GRID_1;
IgniteFuture<Void> changeFut = grid(alive).encryption().changeCacheGroupKey(Collections.singleton(cacheName()));
IgniteInternalFuture<?> stopFut = new GridFinishedFuture<>();
if (discoBlock) {
locHook.waitForBlocked(MAX_AWAIT_MILLIS);
stopGrid(stopped, true);
locHook.stopBlock();
} else {
spi.waitForBlocked();
stopFut = runAsync(() -> stopGrid(stopped, true));
}
changeFut.get(MAX_AWAIT_MILLIS);
stopFut.get(MAX_AWAIT_MILLIS);
checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS);
IgniteEx stoppedNode = startGrid(stopped);
stoppedNode.resetLostPartitions(Collections.singleton(ENCRYPTED_CACHE));
awaitPartitionMapExchange();
checkGroupKey(grpId, INITIAL_KEY_ID + 1, MAX_AWAIT_MILLIS);
stoppedNode.encryption().changeCacheGroupKey(Collections.singleton(cacheName())).get(MAX_AWAIT_MILLIS);
checkGroupKey(grpId, INITIAL_KEY_ID + 2, MAX_AWAIT_MILLIS);
}
use of org.apache.ignite.internal.util.future.GridFinishedFuture in project ignite by apache.
the class IgnitePageMemReplaceDelayedWriteUnitTest method testReplacementWithDelayCausesLockForRead.
/**
* Test delayed eviction causes locking in page reads
* @throws IgniteCheckedException if failed.
*/
@Test
public void testReplacementWithDelayCausesLockForRead() throws IgniteCheckedException {
IgniteConfiguration cfg = getConfiguration(16 * MB);
AtomicInteger totalEvicted = new AtomicInteger();
PageStoreWriter pageWriter = (FullPageId fullPageId, ByteBuffer byteBuf, int tag) -> {
log.info("Evicting " + fullPageId);
assert getLockedPages(fullPageId).contains(fullPageId);
assert !getSegment(fullPageId).writeLock().isHeldByCurrentThread();
totalEvicted.incrementAndGet();
};
int pageSize = 4096;
PageMemoryImpl memory = createPageMemory(cfg, pageWriter, pageSize);
this.pageMemory = memory;
long pagesTotal = cfg.getDataStorageConfiguration().getDefaultDataRegionConfiguration().getMaxSize() / pageSize;
long markDirty = pagesTotal * 2 / 3;
for (int i = 0; i < markDirty; i++) {
long pageId = memory.allocatePage(1, 1, PageIdAllocator.FLAG_DATA);
long ptr = memory.acquirePage(1, pageId);
memory.releasePage(1, pageId, ptr);
}
GridMultiCollectionWrapper<FullPageId> ids = memory.beginCheckpoint(new GridFinishedFuture());
int cpPages = ids.size();
log.info("Started CP with [" + cpPages + "] pages in it, created [" + markDirty + "] pages");
for (int i = 0; i < cpPages; i++) {
long pageId = memory.allocatePage(1, 1, PageIdAllocator.FLAG_DATA);
long ptr = memory.acquirePage(1, pageId);
memory.releasePage(1, pageId, ptr);
}
List<Collection<FullPageId>> stripes = getAllLockedPages();
assert !stripes.isEmpty();
for (Collection<FullPageId> pageIds : stripes) {
assert pageIds.isEmpty();
}
assert totalEvicted.get() > 0;
memory.stop(true);
}
use of org.apache.ignite.internal.util.future.GridFinishedFuture in project ignite by apache.
the class CommandProcessor method runCommandH2.
/**
* Execute DDL statement.
*
* @param sql SQL.
* @param cmdH2 Command.
*/
private void runCommandH2(String sql, GridSqlStatement cmdH2) {
IgniteInternalFuture fut = null;
try {
finishActiveTxIfNecessary();
if (cmdH2 instanceof GridSqlCreateIndex) {
GridSqlCreateIndex cmd = (GridSqlCreateIndex) cmdH2;
isDdlOnSchemaSupported(cmd.schemaName());
GridH2Table tbl = schemaMgr.dataTable(cmd.schemaName(), cmd.tableName());
if (tbl == null)
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd.tableName());
assert tbl.rowDescriptor() != null;
ensureDdlSupported(tbl);
QueryIndex newIdx = new QueryIndex();
newIdx.setName(cmd.index().getName());
newIdx.setIndexType(cmd.index().getIndexType());
LinkedHashMap<String, Boolean> flds = new LinkedHashMap<>();
// Let's replace H2's table and property names by those operated by GridQueryProcessor.
GridQueryTypeDescriptor typeDesc = tbl.rowDescriptor().type();
for (Map.Entry<String, Boolean> e : cmd.index().getFields().entrySet()) {
GridQueryProperty prop = typeDesc.property(e.getKey());
if (prop == null)
throw new SchemaOperationException(SchemaOperationException.CODE_COLUMN_NOT_FOUND, e.getKey());
flds.put(prop.name(), e.getValue());
}
newIdx.setFields(flds);
fut = ctx.query().dynamicIndexCreate(tbl.cacheName(), cmd.schemaName(), typeDesc.tableName(), newIdx, cmd.ifNotExists(), 0);
} else if (cmdH2 instanceof GridSqlDropIndex) {
GridSqlDropIndex cmd = (GridSqlDropIndex) cmdH2;
isDdlOnSchemaSupported(cmd.schemaName());
GridH2Table tbl = schemaMgr.dataTableForIndex(cmd.schemaName(), cmd.indexName());
if (tbl != null) {
ensureDdlSupported(tbl);
fut = ctx.query().dynamicIndexDrop(tbl.cacheName(), cmd.schemaName(), cmd.indexName(), cmd.ifExists());
} else {
if (cmd.ifExists())
fut = new GridFinishedFuture();
else
throw new SchemaOperationException(SchemaOperationException.CODE_INDEX_NOT_FOUND, cmd.indexName());
}
} else if (cmdH2 instanceof GridSqlCreateTable) {
GridSqlCreateTable cmd = (GridSqlCreateTable) cmdH2;
ctx.security().authorize(cmd.cacheName(), SecurityPermission.CACHE_CREATE);
isDdlOnSchemaSupported(cmd.schemaName());
GridH2Table tbl = schemaMgr.dataTable(cmd.schemaName(), cmd.tableName());
if (tbl != null) {
if (!cmd.ifNotExists())
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_EXISTS, cmd.tableName());
} else {
QueryEntity e = toQueryEntity(cmd);
CacheConfiguration<?, ?> ccfg = new CacheConfiguration<>(cmd.tableName());
ccfg.setQueryEntities(Collections.singleton(e));
ccfg.setSqlSchema(cmd.schemaName());
SchemaOperationException err = QueryUtils.checkQueryEntityConflicts(ccfg, ctx.cache().cacheDescriptors().values());
if (err != null)
throw err;
if (!F.isEmpty(cmd.cacheName()) && ctx.cache().cacheDescriptor(cmd.cacheName()) != null) {
ctx.query().dynamicAddQueryEntity(cmd.cacheName(), cmd.schemaName(), e, cmd.parallelism(), true).get();
} else {
ctx.query().dynamicTableCreate(cmd.schemaName(), e, cmd.templateName(), cmd.cacheName(), cmd.cacheGroup(), cmd.dataRegionName(), cmd.affinityKey(), cmd.atomicityMode(), cmd.writeSynchronizationMode(), cmd.backups(), cmd.ifNotExists(), cmd.encrypted(), cmd.parallelism());
}
}
} else if (cmdH2 instanceof GridSqlDropTable) {
GridSqlDropTable cmd = (GridSqlDropTable) cmdH2;
isDdlOnSchemaSupported(cmd.schemaName());
GridH2Table tbl = schemaMgr.dataTable(cmd.schemaName(), cmd.tableName());
if (tbl == null) {
if (!cmd.ifExists())
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd.tableName());
} else {
ctx.security().authorize(tbl.cacheName(), SecurityPermission.CACHE_DESTROY);
ctx.query().dynamicTableDrop(tbl.cacheName(), cmd.tableName(), cmd.ifExists());
}
} else if (cmdH2 instanceof GridSqlAlterTableAddColumn) {
GridSqlAlterTableAddColumn cmd = (GridSqlAlterTableAddColumn) cmdH2;
isDdlOnSchemaSupported(cmd.schemaName());
GridH2Table tbl = schemaMgr.dataTable(cmd.schemaName(), cmd.tableName());
if (tbl == null) {
if (!cmd.ifTableExists())
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd.tableName());
} else {
if (QueryUtils.isSqlType(tbl.rowDescriptor().type().valueClass()))
throw new SchemaOperationException("Cannot add column(s) because table was created " + "with " + PARAM_WRAP_VALUE + "=false option.");
List<QueryField> cols = new ArrayList<>(cmd.columns().length);
boolean allFieldsNullable = true;
for (GridSqlColumn col : cmd.columns()) {
if (tbl.doesColumnExist(col.columnName())) {
if ((!cmd.ifNotExists() || cmd.columns().length != 1)) {
throw new SchemaOperationException(SchemaOperationException.CODE_COLUMN_EXISTS, col.columnName());
} else {
cols = null;
break;
}
}
QueryField field = new QueryField(col.columnName(), getTypeClassName(col), col.column().isNullable(), col.defaultValue(), col.precision(), col.scale());
cols.add(field);
allFieldsNullable &= field.isNullable();
}
if (cols != null) {
assert tbl.rowDescriptor() != null;
if (!allFieldsNullable)
QueryUtils.checkNotNullAllowed(tbl.cacheInfo().config());
fut = ctx.query().dynamicColumnAdd(tbl.cacheName(), cmd.schemaName(), tbl.rowDescriptor().type().tableName(), cols, cmd.ifTableExists(), cmd.ifNotExists());
}
}
} else if (cmdH2 instanceof GridSqlAlterTableDropColumn) {
GridSqlAlterTableDropColumn cmd = (GridSqlAlterTableDropColumn) cmdH2;
isDdlOnSchemaSupported(cmd.schemaName());
GridH2Table tbl = schemaMgr.dataTable(cmd.schemaName(), cmd.tableName());
if (tbl == null) {
if (!cmd.ifTableExists())
throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd.tableName());
} else {
assert tbl.rowDescriptor() != null;
GridCacheContext cctx = tbl.cacheContext();
assert cctx != null;
if (cctx.mvccEnabled())
throw new IgniteSQLException("Cannot drop column(s) with enabled MVCC. " + "Operation is unsupported at the moment.", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
if (QueryUtils.isSqlType(tbl.rowDescriptor().type().valueClass()))
throw new SchemaOperationException("Cannot drop column(s) because table was created " + "with " + PARAM_WRAP_VALUE + "=false option.");
List<String> cols = new ArrayList<>(cmd.columns().length);
GridQueryTypeDescriptor type = tbl.rowDescriptor().type();
for (String colName : cmd.columns()) {
if (!tbl.doesColumnExist(colName)) {
if ((!cmd.ifExists() || cmd.columns().length != 1)) {
throw new SchemaOperationException(SchemaOperationException.CODE_COLUMN_NOT_FOUND, colName);
} else {
cols = null;
break;
}
}
SchemaOperationException err = QueryUtils.validateDropColumn(type, colName);
if (err != null)
throw err;
cols.add(colName);
}
if (cols != null) {
fut = ctx.query().dynamicColumnRemove(tbl.cacheName(), cmd.schemaName(), type.tableName(), cols, cmd.ifTableExists(), cmd.ifExists());
}
}
} else
throw new IgniteSQLException("Unsupported DDL operation: " + sql, IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
if (fut != null)
fut.get();
} catch (SchemaOperationException e) {
U.error(null, "DDL operation failure", e);
throw convert(e);
} catch (IgniteSQLException e) {
throw e;
} catch (Exception e) {
throw new IgniteSQLException(e.getMessage(), e);
}
}
Aggregations