use of org.apache.ignite.internal.processors.cache.distributed.dht.IgniteClusterReadOnlyException in project ignite by apache.
the class GridCacheAtomicSequenceImpl method internalUpdate.
/**
* Synchronous sequence update operation. Will add given amount to the sequence value.
*
* @param l Increment amount.
* @param updateCall Cache call that will update sequence reservation count in accordance with l.
* @param updated If {@code true}, will return sequence value after update, otherwise will return sequence value
* prior to update.
* @return Sequence value.
* @throws IgniteException If update failed.
*/
private long internalUpdate(long l, @Nullable Callable<Long> updateCall, boolean updated) throws IgniteException {
checkRemoved();
assert l > 0;
if (ctx.shared().readOnlyMode()) {
throw U.convertException(new CacheInvalidStateException(new IgniteClusterReadOnlyException(String.format(CLUSTER_READ_ONLY_MODE_ERROR_MSG_FORMAT, "sequence", ctx.group().name(), ctx.name()))));
}
localUpdate.lock();
try {
// If reserved range isn't exhausted.
long locVal0 = locVal;
if (locVal0 + l <= upBound) {
locVal = locVal0 + l;
return updated ? locVal0 + l : locVal0;
}
} finally {
localUpdate.unlock();
}
AffinityTopologyVersion lockedVer = ctx.shared().lockedTopologyVersion(null);
// We need two separate locks here because two independent thread may attempt to update the sequence
// simultaneously, one thread with locked topology and other with unlocked.
// We cannot use the same lock for both cases because it leads to a deadlock when free-topology thread
// waits for topology change, and locked topology thread waits to acquire the lock.
// If a thread has locked topology, it must bypass sync with non-locked threads, but at the same time
// we do not want multiple threads to attempt to run identical cache updates.
ReentrantLock distLock = lockedVer == null ? distUpdateFreeTop : distUpdateLockedTop;
distLock.lock();
try {
if (updateCall == null)
updateCall = internalUpdate(l, updated);
try {
return CU.retryTopologySafe(updateCall);
} catch (Exception e) {
throw checkRemovedAfterFail(e);
}
} finally {
distLock.unlock();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.IgniteClusterReadOnlyException in project ignite by apache.
the class GridCacheProcessor method checkReadOnlyState.
/**
* Checks that cluster in a {@link ClusterState#ACTIVE_READ_ONLY} state.
*
* @param opName Operation name.
* @param cacheGrpNameClo Closure for getting cache group name, if it needed (optional).
* @param cacheNameClo Closure for getting cache name, if it needed (optional).
* @throws CacheException If cluster in a {@link ClusterState#ACTIVE_READ_ONLY} state.
*/
private void checkReadOnlyState(String opName, @Nullable IgniteOutClosure<String> cacheGrpNameClo, @Nullable IgniteOutClosure<String> cacheNameClo) {
if (sharedCtx.readOnlyMode()) {
String cacheName = cacheNameClo == null ? null : cacheNameClo.apply();
String cacheGrpName = cacheGrpNameClo == null ? null : cacheGrpNameClo.apply();
String errorMsg = format(CLUSTER_READ_ONLY_MODE_ERROR_MSG_FORMAT, opName, cacheGrpName, cacheName);
throw new CacheException(new IgniteClusterReadOnlyException(errorMsg));
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.IgniteClusterReadOnlyException in project ignite by apache.
the class ClusterCachesInfo method processStartNewCacheRequest.
/**
* @param exchangeActions Exchange actions to update.
* @param topVer Topology version.
* @param persistedCfgs {@code True} if process start of persisted caches during cluster activation.
* @param res Accumulator for cache change process results.
* @param req Cache change request.
* @param cacheName Cache name.
* @return True if there was no errors.
*/
private boolean processStartNewCacheRequest(ExchangeActions exchangeActions, AffinityTopologyVersion topVer, boolean persistedCfgs, CacheChangeProcessResult res, DynamicCacheChangeRequest req, String cacheName) {
assert exchangeActions != null;
CacheConfiguration<?, ?> ccfg = req.startCacheConfiguration();
IgniteCheckedException err = null;
if (ctx.cache().context().readOnlyMode()) {
err = new IgniteClusterReadOnlyException(String.format(CLUSTER_READ_ONLY_MODE_ERROR_MSG_FORMAT, "start cache", ccfg.getGroupName(), cacheName));
}
if (err == null) {
String conflictErr = checkCacheConflict(req.startCacheConfiguration());
if (conflictErr != null) {
U.warn(log, "Ignore cache start request. " + conflictErr);
err = new IgniteCheckedException("Failed to start cache. " + conflictErr);
}
}
if (err == null)
err = QueryUtils.checkQueryEntityConflicts(req.startCacheConfiguration(), registeredCaches.values());
if (err == null) {
GridEncryptionManager encMgr = ctx.encryption();
if (ccfg.isEncryptionEnabled()) {
if (encMgr.isMasterKeyChangeInProgress())
err = new IgniteCheckedException("Cache start failed. Master key change is in progress.");
else if (encMgr.masterKeyDigest() != null && !Arrays.equals(encMgr.masterKeyDigest(), req.masterKeyDigest())) {
err = new IgniteCheckedException("Cache start failed. The request was initiated before " + "the master key change and can't be processed.");
}
if (err != null)
U.warn(log, "Ignore cache start request during the master key change process.", err);
}
}
if (err == null && req.restartId() == null) {
IgniteSnapshotManager snapshotMgr = ctx.cache().context().snapshotMgr();
if (snapshotMgr.isRestoring(ccfg)) {
err = new IgniteCheckedException("Cache start failed. A cache or group with the same name is " + "currently being restored from a snapshot [cache=" + cacheName + (ccfg.getGroupName() == null ? "" : ", group=" + ccfg.getGroupName()) + ']');
}
}
if (err != null) {
if (persistedCfgs)
res.errs.add(err);
else
ctx.cache().completeCacheStartFuture(req, false, err);
return false;
}
assert req.cacheType() != null : req;
assert F.eq(ccfg.getName(), cacheName) : req;
int cacheId = CU.cacheId(cacheName);
CacheGroupDescriptor grpDesc = registerCacheGroup(exchangeActions, topVer, ccfg, cacheId, req.initiatingNodeId(), req.deploymentId(), req.encryptionKey(), req.cacheConfigurationEnrichment());
DynamicCacheDescriptor startDesc = new DynamicCacheDescriptor(ctx, ccfg, req.cacheType(), grpDesc, false, req.initiatingNodeId(), false, req.sql(), req.deploymentId(), req.schema(), req.cacheConfigurationEnrichment());
DynamicCacheDescriptor old = registeredCaches.put(ccfg.getName(), startDesc);
registeredCachesById.put(startDesc.cacheId(), startDesc);
restartingCaches.remove(ccfg.getName());
assert old == null;
ctx.discovery().setCacheFilter(startDesc.cacheId(), grpDesc.groupId(), ccfg.getName(), ccfg.getNearConfiguration() != null);
if (!persistedCfgs) {
ctx.discovery().addClientNode(cacheName, req.initiatingNodeId(), req.nearCacheConfiguration() != null);
}
res.addedDescs.add(startDesc);
exchangeActions.addCacheToStart(req, startDesc);
return true;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.IgniteClusterReadOnlyException in project ignite by apache.
the class DataStreamerImpl method load0.
/**
* @param entries Entries.
* @param resFut Result future.
* @param activeKeys Active keys.
* @param remaps Remaps count.
* @param remapNode Node for remap. In case update with {@code allowOverride() == false} fails on one node,
* we don't need to send update request to all affinity nodes again, if topology version does not changed.
* @param remapTopVer Topology version.
*/
private void load0(Collection<? extends DataStreamerEntry> entries, final GridFutureAdapter<Object> resFut, @Nullable final Collection<KeyCacheObjectWrapper> activeKeys, final int remaps, ClusterNode remapNode, AffinityTopologyVersion remapTopVer) {
try {
assert entries != null;
final boolean remap = remaps > 0;
if (!remap) {
// Failed data should be processed prior to new data.
acquireRemapSemaphore();
}
if (!isWarningPrinted) {
synchronized (this) {
if (!allowOverwrite() && !isWarningPrinted) {
U.warn(log, "Data streamer will not overwrite existing cache entries for better performance " + "(to change, set allowOverwrite to true)");
}
isWarningPrinted = true;
}
}
Map<ClusterNode, Collection<DataStreamerEntry>> mappings = new HashMap<>();
boolean initPda = ctx.deploy().enabled() && jobPda == null;
GridCacheAdapter cache = ctx.cache().internalCache(cacheName);
if (cache == null)
throw new IgniteCheckedException("Cache not created or already destroyed.");
GridCacheContext cctx = cache.context();
GridCacheGateway gate = null;
AffinityTopologyVersion topVer;
if (!cctx.isLocal()) {
GridDhtPartitionsExchangeFuture exchFut = ctx.cache().context().exchange().lastTopologyFuture();
if (!exchFut.isDone()) {
ExchangeActions acts = exchFut.exchangeActions();
if (acts != null && acts.cacheStopped(CU.cacheId(cacheName)))
throw new CacheStoppedException(cacheName);
}
// It is safe to block here even if the cache gate is acquired.
topVer = exchFut.get();
} else
topVer = ctx.cache().context().exchange().readyAffinityVersion();
List<List<ClusterNode>> assignments = cctx.affinity().assignments(topVer);
if (!allowOverwrite() && !cctx.isLocal()) {
// Cases where cctx required.
gate = cctx.gate();
gate.enter();
}
try {
for (DataStreamerEntry entry : entries) {
List<ClusterNode> nodes;
try {
KeyCacheObject key = entry.getKey();
assert key != null;
if (initPda) {
if (cacheObjCtx.addDeploymentInfo())
jobPda = new DataStreamerPda(key.value(cacheObjCtx, false), entry.getValue() != null ? entry.getValue().value(cacheObjCtx, false) : null, rcvr);
else if (rcvr != null)
jobPda = new DataStreamerPda(rcvr);
initPda = false;
}
if (key.partition() == -1)
key.partition(cctx.affinity().partition(key, false));
if (!allowOverwrite() && remapNode != null && F.eq(topVer, remapTopVer))
nodes = Collections.singletonList(remapNode);
else
nodes = nodes(key, topVer, cctx);
} catch (IgniteCheckedException e) {
resFut.onDone(e);
return;
}
if (F.isEmpty(nodes)) {
resFut.onDone(new ClusterTopologyException("Failed to map key to node " + "(no nodes with cache found in topology) [infos=" + entries.size() + ", cacheName=" + cacheName + ']'));
return;
}
for (ClusterNode node : nodes) {
Collection<DataStreamerEntry> col = mappings.get(node);
if (col == null)
mappings.put(node, col = new ArrayList<>());
col.add(entry);
}
}
for (final Map.Entry<ClusterNode, Collection<DataStreamerEntry>> e : mappings.entrySet()) {
final ClusterNode node = e.getKey();
final UUID nodeId = e.getKey().id();
Buffer buf = bufMappings.get(nodeId);
if (buf == null) {
Buffer old = bufMappings.putIfAbsent(nodeId, buf = new Buffer(e.getKey()));
if (old != null)
buf = old;
}
final Collection<DataStreamerEntry> entriesForNode = e.getValue();
IgniteInClosure<IgniteInternalFuture<?>> lsnr = new IgniteInClosure<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> t) {
try {
t.get();
if (activeKeys != null) {
for (DataStreamerEntry e : entriesForNode) activeKeys.remove(new KeyCacheObjectWrapper(e.getKey()));
if (activeKeys.isEmpty())
resFut.onDone();
} else {
assert entriesForNode.size() == 1;
// That has been a single key,
// so complete result future right away.
resFut.onDone();
}
} catch (IgniteClientDisconnectedCheckedException e1) {
if (log.isDebugEnabled())
log.debug("Future finished with disconnect error [nodeId=" + nodeId + ", err=" + e1 + ']');
resFut.onDone(e1);
} catch (IgniteCheckedException e1) {
if (log.isDebugEnabled())
log.debug("Future finished with error [nodeId=" + nodeId + ", err=" + e1 + ']');
if (cancelled) {
resFut.onDone(new IgniteCheckedException("Data streamer has been cancelled: " + DataStreamerImpl.this, e1));
} else if (remaps + 1 > maxRemapCnt) {
resFut.onDone(new IgniteCheckedException("Failed to finish operation (too many remaps): " + remaps, e1));
} else if (X.hasCause(e1, IgniteClusterReadOnlyException.class)) {
resFut.onDone(new IgniteClusterReadOnlyException("Failed to finish operation. Cluster in read-only mode!", e1));
} else {
try {
remapSem.acquire();
final Runnable r = new Runnable() {
@Override
public void run() {
try {
if (cancelled)
closedException();
load0(entriesForNode, resFut, activeKeys, remaps + 1, node, topVer);
} catch (Throwable ex) {
resFut.onDone(new IgniteCheckedException("DataStreamer remapping failed. ", ex));
} finally {
remapSem.release();
}
}
};
dataToRemap.add(r);
if (!remapOwning.get() && remapOwning.compareAndSet(false, true)) {
ctx.closure().callLocalSafe(new GPC<Boolean>() {
@Override
public Boolean call() {
boolean locked = true;
while (locked || !dataToRemap.isEmpty()) {
if (!locked && !remapOwning.compareAndSet(false, true))
return false;
try {
Runnable r = dataToRemap.poll();
if (r != null)
r.run();
} finally {
if (!dataToRemap.isEmpty())
locked = true;
else {
remapOwning.set(false);
locked = false;
}
}
}
return true;
}
}, true);
}
} catch (InterruptedException e2) {
resFut.onDone(e2);
}
}
}
}
};
GridCompoundFuture opFut = new SilentCompoundFuture();
opFut.listen(lsnr);
final List<GridFutureAdapter<?>> futs;
try {
futs = buf.update(entriesForNode, topVer, assignments, opFut, remap);
opFut.markInitialized();
} catch (IgniteInterruptedCheckedException e1) {
resFut.onDone(e1);
return;
}
if (ctx.discovery().node(nodeId) == null) {
if (bufMappings.remove(nodeId, buf)) {
final Buffer buf0 = buf;
waitAffinityAndRun(new GridPlainRunnable() {
@Override
public void run() {
buf0.onNodeLeft();
if (futs != null) {
Throwable ex = new ClusterTopologyCheckedException("Failed to wait for request completion (node has left): " + nodeId);
for (int i = 0; i < futs.size(); i++) futs.get(i).onDone(ex);
}
}
}, ctx.discovery().topologyVersion(), false);
}
}
}
} finally {
if (gate != null)
gate.leave();
}
} catch (Exception ex) {
resFut.onDone(new IgniteCheckedException("DataStreamer data loading failed.", ex));
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.IgniteClusterReadOnlyException in project ignite by apache.
the class IgniteH2Indexing method executeDml.
/**
* Execute an all-ready {@link SqlFieldsQuery}.
*
* @param qryDesc Plan key.
* @param qryParams Parameters.
* @param dml DML.
* @param cancel Query cancel state holder.
* @return Query result.
*/
private List<? extends FieldsQueryCursor<List<?>>> executeDml(QueryDescriptor qryDesc, QueryParameters qryParams, QueryParserResultDml dml, GridQueryCancel cancel) {
IndexingQueryFilter filter = (qryDesc.local() ? backupFilter(null, qryParams.partitions()) : null);
long qryId = registerRunningQuery(qryDesc, qryParams, cancel, dml.statement());
Exception failReason = null;
try (TraceSurroundings ignored = MTC.support(ctx.tracing().create(SQL_DML_QRY_EXECUTE, MTC.span()))) {
if (!dml.mvccEnabled() && !updateInTxAllowed && ctx.cache().context().tm().inUserTx()) {
throw new IgniteSQLException("DML statements are not allowed inside a transaction over " + "cache(s) with TRANSACTIONAL atomicity mode (change atomicity mode to " + "TRANSACTIONAL_SNAPSHOT or disable this error message with system property " + "\"-DIGNITE_ALLOW_DML_INSIDE_TRANSACTION=true\")");
}
if (!qryDesc.local()) {
return executeUpdateDistributed(qryId, qryDesc, qryParams, dml, cancel);
} else {
UpdateResult updRes = executeUpdate(qryId, qryDesc, qryParams, dml, true, filter, cancel);
return singletonList(new QueryCursorImpl<>(new Iterable<List<?>>() {
@Override
public Iterator<List<?>> iterator() {
return new IgniteSingletonIterator<>(singletonList(updRes.counter()));
}
}, cancel, true, false));
}
} catch (IgniteException e) {
failReason = e;
throw e;
} catch (IgniteCheckedException e) {
failReason = e;
IgniteClusterReadOnlyException roEx = X.cause(e, IgniteClusterReadOnlyException.class);
if (roEx != null) {
throw new IgniteSQLException("Failed to execute DML statement. Cluster in read-only mode [stmt=" + qryDesc.sql() + ", params=" + Arrays.deepToString(qryParams.arguments()) + "]", IgniteQueryErrorCode.CLUSTER_READ_ONLY_MODE_ENABLED, e);
}
throw new IgniteSQLException("Failed to execute DML statement [stmt=" + qryDesc.sql() + ", params=" + Arrays.deepToString(qryParams.arguments()) + "]", e);
} finally {
runningQryMgr.unregister(qryId, failReason);
}
}
Aggregations