use of org.apache.ignite.internal.IgniteFutureTimeoutCheckedException in project gridgain by gridgain.
the class ConnectionClientPool method reserveClient.
/**
* Returns existing or just created client to node.
*
* @param node Node to which client should be open.
* @param connIdx Connection index.
* @return The existing or just created client.
* @throws IgniteCheckedException Thrown if any exception occurs.
*/
public GridCommunicationClient reserveClient(ClusterNode node, int connIdx) throws IgniteCheckedException {
assert node != null;
assert (connIdx >= 0 && connIdx < cfg.connectionsPerNode()) || !(cfg.usePairedConnections() && usePairedConnections(node, attrs.pairedConnection())) : connIdx;
if (locNodeSupplier.get().isClient()) {
if (node.isClient()) {
if (DISABLED_CLIENT_PORT.equals(node.attribute(attrs.port())))
throw new IgniteSpiException("Cannot send message to the client node with no server socket opened.");
}
}
UUID nodeId = node.id();
if (log.isDebugEnabled())
log.debug("The node client is going to reserve a connection [nodeId=" + node.id() + ", connIdx=" + connIdx + "]");
while (true) {
GridCommunicationClient[] curClients = clients.get(nodeId);
GridCommunicationClient client = curClients != null && connIdx < curClients.length ? curClients[connIdx] : null;
if (client == null) {
if (stopping)
throw new IgniteSpiException("Node is stopping.");
// Do not allow concurrent connects.
GridFutureAdapter<GridCommunicationClient> fut = new ConnectFuture();
ConnectionKey connKey = new ConnectionKey(nodeId, connIdx, -1);
GridFutureAdapter<GridCommunicationClient> oldFut = clientFuts.putIfAbsent(connKey, fut);
if (oldFut == null) {
try {
GridCommunicationClient[] curClients0 = clients.get(nodeId);
GridCommunicationClient client0 = curClients0 != null && connIdx < curClients0.length ? curClients0[connIdx] : null;
if (client0 == null) {
client0 = createCommunicationClient(node, connIdx);
if (client0 != null) {
addNodeClient(node, connIdx, client0);
if (client0 instanceof GridTcpNioCommunicationClient) {
GridTcpNioCommunicationClient tcpClient = ((GridTcpNioCommunicationClient) client0);
if (tcpClient.session().closeTime() > 0 && removeNodeClient(nodeId, client0)) {
if (log.isDebugEnabled()) {
log.debug("Session was closed after client creation, will retry " + "[node=" + node + ", client=" + client0 + ']');
}
client0 = null;
}
}
} else {
U.sleep(200);
if (nodeGetter.apply(node.id()) == null)
throw new ClusterTopologyCheckedException("Failed to send message " + "(node left topology): " + node);
}
}
fut.onDone(client0);
} catch (NodeUnreachableException e) {
log.warning(e.getMessage());
fut = handleUnreachableNodeException(node, connIdx, fut, e);
} catch (Throwable e) {
fut.onDone(e);
if (e instanceof IgniteTooManyOpenFilesException)
throw e;
if (e instanceof Error)
throw (Error) e;
} finally {
clientFuts.remove(connKey, fut);
}
} else
fut = oldFut;
long clientReserveWaitTimeout = registry != null ? registry.getSystemWorkerBlockedTimeout() / 3 : cfg.connectionTimeout() / 3;
long currTimeout = System.currentTimeMillis();
// This cycle will eventually quit when future is completed by concurrent thread reserving client.
while (true) {
try {
client = fut.get(clientReserveWaitTimeout, TimeUnit.MILLISECONDS);
break;
} catch (IgniteFutureTimeoutCheckedException ignored) {
currTimeout += clientReserveWaitTimeout;
if (log.isDebugEnabled())
log.debug("Still waiting for reestablishing connection to node [nodeId=" + node.id() + ", waitingTime=" + currTimeout + "ms]");
if (registry != null) {
GridWorker wrkr = registry.worker(Thread.currentThread().getName());
if (wrkr != null)
wrkr.updateHeartbeat();
}
}
}
if (client == null) {
if (clusterStateProvider.isLocalNodeDisconnected())
throw new IgniteCheckedException("Unable to create TCP client due to local node disconnecting.");
else
continue;
}
if (nodeGetter.apply(nodeId) == null) {
if (removeNodeClient(nodeId, client))
client.forceClose();
throw new IgniteSpiException("Destination node is not in topology: " + node.id());
}
}
assert connIdx == client.connectionIndex() : client;
if (client.reserve())
return client;
else
// Client has just been closed by idle worker. Help it and try again.
removeNodeClient(nodeId, client);
}
}
use of org.apache.ignite.internal.IgniteFutureTimeoutCheckedException in project gridgain by gridgain.
the class ClientSlowDiscoveryTransactionRemapTest method testTransactionRemapWithTimeout.
/**
*/
@Test
public void testTransactionRemapWithTimeout() throws Exception {
TestTransactionEngine engine = new TestTransactionEngine<>(clnt.cache(CACHE_NAME));
IgniteInternalFuture<?> txFut = GridTestUtils.runAsync(() -> {
try (Transaction tx = clnt.transactions().txStart(concurrency, isolation, 1_000, 1_000_000)) {
operation.apply(engine);
tx.commit();
}
});
try {
txFut.get(2, TimeUnit.SECONDS);
} catch (IgniteFutureTimeoutCheckedException te) {
// Expected.
} finally {
clientDiscoSpiBlock.countDown();
}
// After resume second client join, transaction should be timed out and rolled back.
if (concurrency == PESSIMISTIC) {
assertThrowsWithCause((Callable<Object>) txFut::get, TransactionTimeoutException.class);
// Check that initial data is not changed by rollbacked transaction.
for (int k = 0; k < KEYS_SET; k++) Assert.assertEquals("Cache consistency is broken for key: " + k, 0, clnt.cache(CACHE_NAME).get(k));
} else {
txFut.get();
engine.consistencyCheck();
}
}
use of org.apache.ignite.internal.IgniteFutureTimeoutCheckedException in project gridgain by gridgain.
the class DataStreamerImpl method doFlush.
/**
* Performs flush.
*
* @throws IgniteCheckedException If failed.
*/
private void doFlush() throws IgniteCheckedException {
lastFlushTime = U.currentTimeMillis();
List<IgniteInternalFuture> activeFuts0 = null;
int doneCnt = 0;
flushAllThreadsBufs();
for (IgniteInternalFuture<?> f : activeFuts) {
if (!f.isDone()) {
if (activeFuts0 == null)
activeFuts0 = new ArrayList<>((int) (activeFuts.size() * 1.2));
activeFuts0.add(f);
} else {
f.get();
doneCnt++;
}
}
if (activeFuts0 == null || activeFuts0.isEmpty())
return;
while (true) {
if (disconnectErr != null)
throw disconnectErr;
Queue<IgniteInternalFuture<?>> q = null;
for (Buffer buf : bufMappings.values()) {
IgniteInternalFuture<?> flushFut = buf.flush();
if (flushFut != null) {
if (q == null)
q = new ArrayDeque<>(bufMappings.size() * 2);
q.add(flushFut);
}
}
if (q != null) {
assert !q.isEmpty();
boolean err = false;
long startTimeMillis = U.currentTimeMillis();
for (IgniteInternalFuture fut = q.poll(); fut != null; fut = q.poll()) {
try {
if (timeout == DFLT_UNLIMIT_TIMEOUT)
fut.get();
else {
long timeRemain = timeout - U.currentTimeMillis() + startTimeMillis;
if (timeRemain <= 0)
throw new IgniteDataStreamerTimeoutException("Data streamer exceeded timeout on flush.");
fut.get(timeRemain);
}
} catch (IgniteClientDisconnectedCheckedException e) {
if (log.isDebugEnabled())
log.debug("Failed to flush buffer: " + e);
throw CU.convertToCacheException(e);
} catch (IgniteFutureTimeoutCheckedException e) {
if (log.isDebugEnabled())
log.debug("Failed to flush buffer: " + e);
throw new IgniteDataStreamerTimeoutException("Data streamer exceeded timeout on flush.", e);
} catch (IgniteCheckedException e) {
if (log.isDebugEnabled())
log.debug("Failed to flush buffer: " + e);
err = true;
if (X.cause(e, IgniteClusterReadOnlyException.class) != null)
throw e;
}
}
if (err)
// Remaps needed - flush buffers.
continue;
}
doneCnt = 0;
for (int i = 0; i < activeFuts0.size(); i++) {
IgniteInternalFuture f = activeFuts0.get(i);
if (f == null)
doneCnt++;
else if (f.isDone()) {
f.get();
doneCnt++;
activeFuts0.set(i, null);
} else
break;
}
if (doneCnt == activeFuts0.size())
return;
}
}
use of org.apache.ignite.internal.IgniteFutureTimeoutCheckedException in project gridgain by gridgain.
the class CheckpointTimeoutLock method checkpointReadLock.
/**
* Gets the checkpoint read lock. While this lock is held, checkpoint thread will not acquireSnapshotWorker memory
* state.
*
* @throws IgniteException If failed.
*/
public void checkpointReadLock() {
if (checkpointReadWriteLock.isWriteLockHeldByCurrentThread())
return;
long timeout = checkpointReadLockTimeout;
long start = U.currentTimeMillis();
boolean interrupted = false;
try {
for (; ; ) {
try {
if (timeout > 0 && (U.currentTimeMillis() - start) >= timeout)
failCheckpointReadLock();
try {
if (timeout > 0) {
if (!checkpointReadWriteLock.tryReadLock(timeout - (U.currentTimeMillis() - start), TimeUnit.MILLISECONDS))
failCheckpointReadLock();
} else
checkpointReadWriteLock.readLock();
} catch (InterruptedException e) {
interrupted = true;
continue;
}
if (stop) {
checkpointReadWriteLock.readUnlock();
throw new IgniteException(new NodeStoppingException("Failed to perform cache update: node is stopping."));
}
if (checkpointReadWriteLock.getReadHoldCount() > 1 || safeToUpdatePageMemories() || checkpointer.runner() == null)
break;
else {
// If the checkpoint is triggered outside of the lock,
// it could cause the checkpoint to fire again for the same reason
// (due to a data race between collecting dirty pages and triggering the checkpoint)
CheckpointProgress checkpoint = checkpointer.scheduleCheckpoint(0, "too many dirty pages");
checkpointReadWriteLock.readUnlock();
if (timeout > 0 && U.currentTimeMillis() - start >= timeout)
failCheckpointReadLock();
try {
checkpoint.futureFor(LOCK_RELEASED).getUninterruptibly();
} catch (IgniteFutureTimeoutCheckedException e) {
failCheckpointReadLock();
} catch (IgniteCheckedException e) {
throw new IgniteException("Failed to wait for checkpoint begin.", e);
}
}
} catch (CheckpointReadLockTimeoutException e) {
log.error(e.getMessage(), e);
timeout = 0;
}
}
} finally {
if (interrupted)
Thread.currentThread().interrupt();
}
}
use of org.apache.ignite.internal.IgniteFutureTimeoutCheckedException in project gridgain by gridgain.
the class CacheObjectBinaryProcessorImpl method metadata.
/**
* {@inheritDoc}
*/
@Nullable
@Override
public BinaryType metadata(final int typeId, final int schemaId) {
BinaryMetadataHolder holder = metadataLocCache.get(typeId);
if (ctx.clientNode()) {
if (holder == null || !holder.metadata().hasSchema(schemaId)) {
if (log.isDebugEnabled())
log.debug("Waiting for client metadata update" + " [typeId=" + typeId + ", schemaId=" + schemaId + ", pendingVer=" + (holder == null ? "NA" : holder.pendingVersion()) + ", acceptedVer=" + (holder == null ? "NA" : holder.acceptedVersion()) + ']');
try {
transport.requestUpToDateMetadata(typeId).get();
} catch (IgniteCheckedException ignored) {
// No-op.
}
holder = metadataLocCache.get(typeId);
IgniteFuture<?> reconnectFut0 = reconnectFut;
if (holder == null && reconnectFut0 != null)
throw new IgniteClientDisconnectedException(reconnectFut0, "Client node disconnected.");
if (log.isDebugEnabled())
log.debug("Finished waiting for client metadata update" + " [typeId=" + typeId + ", schemaId=" + schemaId + ", pendingVer=" + (holder == null ? "NA" : holder.pendingVersion()) + ", acceptedVer=" + (holder == null ? "NA" : holder.acceptedVersion()) + ']');
}
} else {
if (holder != null && IgniteThread.current() instanceof IgniteDiscoveryThread)
return holder.metadata().wrap(binaryCtx);
else if (holder != null && (holder.pendingVersion() - holder.acceptedVersion() > 0)) {
if (log.isDebugEnabled())
log.debug("Waiting for metadata update" + " [typeId=" + typeId + ", schemaId=" + schemaId + ", pendingVer=" + holder.pendingVersion() + ", acceptedVer=" + holder.acceptedVersion() + ']');
long t0 = System.nanoTime();
GridFutureAdapter<MetadataUpdateResult> fut = transport.awaitMetadataUpdate(typeId, holder.pendingVersion());
try {
fut.get();
} catch (IgniteCheckedException e) {
log.error("Failed to wait for metadata update [typeId=" + typeId + ", schemaId=" + schemaId + ']', e);
}
if (log.isDebugEnabled())
log.debug("Finished waiting for metadata update" + " [typeId=" + typeId + ", waitTime=" + NANOSECONDS.convert(System.nanoTime() - t0, MILLISECONDS) + "ms" + ", schemaId=" + schemaId + ", pendingVer=" + holder.pendingVersion() + ", acceptedVer=" + holder.acceptedVersion() + ']');
holder = metadataLocCache.get(typeId);
} else if (holder == null || !holder.metadata().hasSchema(schemaId)) {
// Last resort waiting.
U.warn(log, "Schema is missing while no metadata updates are in progress " + "(will wait for schema update within timeout defined by " + IGNITE_WAIT_SCHEMA_UPDATE + " system property)" + " [typeId=" + typeId + ", missingSchemaId=" + schemaId + ", pendingVer=" + (holder == null ? "NA" : holder.pendingVersion()) + ", acceptedVer=" + (holder == null ? "NA" : holder.acceptedVersion()) + ", binMetaUpdateTimeout=" + waitSchemaTimeout + ']');
long t0 = System.nanoTime();
GridFutureAdapter<?> fut = transport.awaitSchemaUpdate(typeId, schemaId);
try {
fut.get(waitSchemaTimeout);
} catch (IgniteFutureTimeoutCheckedException e) {
log.error("Timed out while waiting for schema update [typeId=" + typeId + ", schemaId=" + schemaId + ']');
} catch (IgniteCheckedException ignored) {
// No-op.
}
holder = metadataLocCache.get(typeId);
if (log.isDebugEnabled() && holder != null && holder.metadata().hasSchema(schemaId))
log.debug("Found the schema after wait" + " [typeId=" + typeId + ", waitTime=" + NANOSECONDS.convert(System.nanoTime() - t0, MILLISECONDS) + "ms" + ", schemaId=" + schemaId + ", pendingVer=" + holder.pendingVersion() + ", acceptedVer=" + holder.acceptedVersion() + ']');
}
}
if (holder != null && metadataFileStore != null) {
try {
metadataFileStore.waitForWriteCompletion(typeId, holder.pendingVersion());
} catch (IgniteCheckedException e) {
log.warning("Failed to wait for metadata write operation for [typeId=" + typeId + ", typeVer=" + holder.acceptedVersion() + ']', e);
return null;
}
}
return holder != null ? holder.metadata().wrap(binaryCtx) : null;
}
Aggregations