use of org.apache.ignite.internal.managers.discovery.DiscoCache in project ignite by apache.
the class CacheAffinitySharedManager method processClientCachesChanges.
/**
* Process client cache start/close requests, called from exchange thread.
*
* @param msg Change request.
*/
void processClientCachesChanges(ClientCacheChangeDummyDiscoveryMessage msg) {
AffinityTopologyVersion topVer = cctx.exchange().readyAffinityVersion();
DiscoCache discoCache = cctx.discovery().discoCache(topVer);
boolean crd = cctx.localNode().equals(discoCache.oldestAliveServerNode());
Map<Integer, Boolean> startedCaches = processClientCacheStartRequests(msg, crd, topVer, discoCache);
Set<Integer> closedCaches = processCacheCloseRequests(msg, crd, topVer);
if (startedCaches != null || closedCaches != null)
scheduleClientChangeMessage(startedCaches, closedCaches);
}
use of org.apache.ignite.internal.managers.discovery.DiscoCache in project ignite by apache.
the class IgniteAuthenticationProcessor method start.
/**
* {@inheritDoc}
*/
@Override
public void start() throws IgniteCheckedException {
super.start();
if (isEnabled && !GridCacheUtils.isPersistenceEnabled(ctx.config())) {
isEnabled = false;
throw new IgniteCheckedException("Authentication can be enabled only for cluster with enabled persistence." + " Check the DataRegionConfiguration");
}
ctx.addNodeAttribute(IgniteNodeAttributes.ATTR_AUTHENTICATION_ENABLED, isEnabled);
GridDiscoveryManager discoMgr = ctx.discovery();
GridIoManager ioMgr = ctx.io();
discoMgr.setCustomEventListener(UserProposedMessage.class, new UserProposedListener());
discoMgr.setCustomEventListener(UserAcceptedMessage.class, new UserAcceptedListener());
discoLsnr = new DiscoveryEventListener() {
@Override
public void onEvent(DiscoveryEvent evt, DiscoCache discoCache) {
if (!isEnabled || ctx.isStopping())
return;
switch(evt.type()) {
case EVT_NODE_LEFT:
case EVT_NODE_FAILED:
onNodeLeft(evt.eventNode().id());
break;
case EVT_NODE_JOINED:
onNodeJoin(evt.eventNode());
break;
}
}
};
ctx.event().addDiscoveryEventListener(discoLsnr, DISCO_EVT_TYPES);
ioLsnr = new GridMessageListener() {
@Override
public void onMessage(UUID nodeId, Object msg, byte plc) {
if (!isEnabled || ctx.isStopping())
return;
if (msg instanceof UserManagementOperationFinishedMessage)
onFinishMessage(nodeId, (UserManagementOperationFinishedMessage) msg);
else if (msg instanceof UserAuthenticateRequestMessage)
onAuthenticateRequestMessage(nodeId, (UserAuthenticateRequestMessage) msg);
else if (msg instanceof UserAuthenticateResponseMessage)
onAuthenticateResponseMessage((UserAuthenticateResponseMessage) msg);
}
};
ioMgr.addMessageListener(GridTopic.TOPIC_AUTH, ioLsnr);
exec = new IgniteThreadPoolExecutor("auth", ctx.config().getIgniteInstanceName(), 1, 1, 0, new LinkedBlockingQueue<>());
}
use of org.apache.ignite.internal.managers.discovery.DiscoCache in project ignite by apache.
the class SnapshotRestoreProcess method prepareContext.
/**
* @param req Request to prepare cache group restore from the snapshot.
* @param metas Local snapshot metadatas.
* @return Snapshot restore operation context.
* @throws IgniteCheckedException If failed.
*/
private SnapshotRestoreContext prepareContext(SnapshotOperationRequest req, Collection<SnapshotMetadata> metas) throws IgniteCheckedException {
if (opCtx != null) {
throw new IgniteCheckedException(OP_REJECT_MSG + "The previous snapshot restore operation was not completed.");
}
GridCacheSharedContext<?, ?> cctx = ctx.cache().context();
// Collection of baseline nodes that must survive and additional discovery data required for the affinity calculation.
DiscoCache discoCache = ctx.discovery().discoCache();
if (!F.transform(discoCache.aliveBaselineNodes(), F.node2id()).containsAll(req.nodes()))
throw new IgniteCheckedException("Restore context cannot be inited since the required baseline nodes missed: " + discoCache);
DiscoCache discoCache0 = discoCache.copy(discoCache.version(), null);
if (F.isEmpty(metas))
return new SnapshotRestoreContext(req, discoCache0, Collections.emptyMap());
if (F.first(metas).pageSize() != cctx.database().pageSize()) {
throw new IgniteCheckedException("Incompatible memory page size " + "[snapshotPageSize=" + F.first(metas).pageSize() + ", local=" + cctx.database().pageSize() + ", snapshot=" + req.snapshotName() + ", nodeId=" + cctx.localNodeId() + ']');
}
Map<String, StoredCacheData> cfgsByName = new HashMap<>();
FilePageStoreManager pageStore = (FilePageStoreManager) cctx.pageStore();
// Metastorage can be restored only manually by directly copying files.
for (SnapshotMetadata meta : metas) {
for (File snpCacheDir : cctx.snapshotMgr().snapshotCacheDirectories(req.snapshotName(), meta.folderName(), name -> !METASTORAGE_CACHE_NAME.equals(name))) {
String grpName = FilePageStoreManager.cacheGroupName(snpCacheDir);
if (!F.isEmpty(req.groups()) && !req.groups().contains(grpName))
continue;
File cacheDir = pageStore.cacheWorkDir(snpCacheDir.getName().startsWith(CACHE_GRP_DIR_PREFIX), grpName);
if (cacheDir.exists()) {
if (!cacheDir.isDirectory()) {
throw new IgniteCheckedException("Unable to restore cache group, file with required directory " + "name already exists [group=" + grpName + ", file=" + cacheDir + ']');
}
if (cacheDir.list().length > 0) {
throw new IgniteCheckedException("Unable to restore cache group - directory is not empty. " + "Cache group should be destroyed manually before perform restore operation " + "[group=" + grpName + ", dir=" + cacheDir + ']');
}
if (!cacheDir.delete()) {
throw new IgniteCheckedException("Unable to remove empty cache directory " + "[group=" + grpName + ", dir=" + cacheDir + ']');
}
}
File tmpCacheDir = formatTmpDirName(cacheDir);
if (tmpCacheDir.exists()) {
throw new IgniteCheckedException("Unable to restore cache group, temp directory already exists " + "[group=" + grpName + ", dir=" + tmpCacheDir + ']');
}
pageStore.readCacheConfigurations(snpCacheDir, cfgsByName);
}
}
Map<Integer, StoredCacheData> cfgsById = cfgsByName.values().stream().collect(Collectors.toMap(v -> CU.cacheId(v.config().getName()), v -> v));
return new SnapshotRestoreContext(req, discoCache0, cfgsById);
}
use of org.apache.ignite.internal.managers.discovery.DiscoCache in project ignite by apache.
the class ClusterCachesInfo method isMergeConfigSupports.
/**
* @return {@code true} if grid supports merge of config and {@code False} otherwise.
*/
public boolean isMergeConfigSupports(ClusterNode joiningNode) {
DiscoCache discoCache = ctx.discovery().discoCache();
if (discoCache == null)
return true;
if (joiningNode != null && joiningNode.version().compareToIgnoreTimestamp(V_MERGE_CONFIG_SINCE) < 0)
return false;
Collection<ClusterNode> nodes = discoCache.allNodes();
for (ClusterNode node : nodes) {
IgniteProductVersion version = node.version();
if (version.compareToIgnoreTimestamp(V_MERGE_CONFIG_SINCE) < 0)
return false;
}
return true;
}
use of org.apache.ignite.internal.managers.discovery.DiscoCache in project ignite by apache.
the class IgniteTxManager method start0.
/**
* {@inheritDoc}
*/
@Override
protected void start0() throws IgniteCheckedException {
txHnd = new IgniteTxHandler(cctx);
deferredAckMsgSnd = new GridDeferredAckMessageSender<GridCacheVersion>(cctx.time(), cctx.kernalContext().closure()) {
@Override
public int getTimeout() {
return DEFERRED_ONE_PHASE_COMMIT_ACK_REQUEST_TIMEOUT;
}
@Override
public int getBufferSize() {
return DEFERRED_ONE_PHASE_COMMIT_ACK_REQUEST_BUFFER_SIZE;
}
@Override
public void finish(UUID nodeId, Collection<GridCacheVersion> vers) {
GridDhtTxOnePhaseCommitAckRequest ackReq = new GridDhtTxOnePhaseCommitAckRequest(vers);
cctx.kernalContext().gateway().readLock();
try {
cctx.io().send(nodeId, ackReq, GridIoPolicy.SYSTEM_POOL);
} catch (ClusterTopologyCheckedException ignored) {
if (log.isDebugEnabled())
log.debug("Failed to send one phase commit ack to backup node because it left grid: " + nodeId);
} catch (IgniteCheckedException e) {
log.error("Failed to send one phase commit ack to backup node [backup=" + nodeId + ']', e);
} finally {
cctx.kernalContext().gateway().readUnlock();
}
}
};
cctx.gridEvents().addDiscoveryEventListener(new TransactionRecoveryListener(), EVT_NODE_FAILED, EVT_NODE_LEFT);
cctx.gridEvents().addDiscoveryEventListener(new DiscoveryEventListener() {
@Override
public void onEvent(DiscoveryEvent evt, DiscoCache discoCache) {
if (evt.type() == EVT_NODE_FAILED || evt.type() == EVT_NODE_LEFT) {
UUID nodeId = evt.eventNode().id();
for (TxDeadlockFuture fut : deadlockDetectFuts.values()) fut.onNodeLeft(nodeId);
for (Map.Entry<GridCacheVersion, Object> entry : completedVersHashMap.entrySet()) {
Object obj = entry.getValue();
if (obj instanceof GridCacheReturnCompletableWrapper && nodeId.equals(((GridCacheReturnCompletableWrapper) obj).nodeId()))
removeTxReturn(entry.getKey());
}
}
suspendResumeForPessimisticSupported = IgniteFeatures.allNodesSupports(cctx.discovery().remoteNodes(), IgniteFeatures.SUSPEND_RESUME_PESSIMISTIC_TX);
}
}, EVT_NODE_FAILED, EVT_NODE_LEFT, EVT_NODE_JOINED);
this.txDeadlockDetection = new TxDeadlockDetection(cctx);
cctx.gridIO().addMessageListener(TOPIC_TX, new DeadlockDetectionListener());
cctx.txMetrics().onTxManagerStarted();
keyCollisionsInfo = new KeyCollisionsHolder();
distributedTransactionConfiguration = new DistributedTransactionConfiguration(cctx.kernalContext(), log, (String name, Long oldVal, Long newVal) -> {
if (!Objects.equals(oldVal, newVal)) {
scheduleDumpTask(IGNITE_LONG_OPERATIONS_DUMP_TIMEOUT, () -> cctx.kernalContext().closure().runLocalSafe(() -> cctx.kernalContext().cache().context().exchange().dumpLongRunningOperations(newVal)), newVal);
}
}, (String name, Integer oldVal, Integer newVal) -> {
if (!Objects.equals(oldVal, newVal)) {
scheduleDumpTask(IGNITE_DUMP_TX_COLLISIONS_INTERVAL, this::collectTxCollisionsInfo, newVal);
}
});
cctx.kernalContext().systemView().registerView(TXS_MON_LIST, TXS_MON_LIST_DESC, new TransactionViewWalker(), new ReadOnlyCollectionView2X<>(idMap.values(), nearIdMap.values()), TransactionView::new);
}
Aggregations