use of org.apache.ignite.internal.IgniteInterruptedCheckedException in project ignite by apache.
the class GridServiceProcessor method redeploy.
/**
* Redeploys local services based on assignments.
*
* @param assigns Assignments.
*/
private void redeploy(GridServiceAssignments assigns) {
String svcName = assigns.name();
Integer assignCnt = assigns.assigns().get(ctx.localNodeId());
if (assignCnt == null)
assignCnt = 0;
Collection<ServiceContextImpl> ctxs;
synchronized (locSvcs) {
ctxs = locSvcs.get(svcName);
if (ctxs == null)
locSvcs.put(svcName, ctxs = new ArrayList<>());
}
Collection<ServiceContextImpl> toInit = new ArrayList<>();
synchronized (ctxs) {
if (ctxs.size() > assignCnt) {
int cancelCnt = ctxs.size() - assignCnt;
cancel(ctxs, cancelCnt);
} else if (ctxs.size() < assignCnt) {
int createCnt = assignCnt - ctxs.size();
for (int i = 0; i < createCnt; i++) {
ServiceContextImpl svcCtx = new ServiceContextImpl(assigns.name(), UUID.randomUUID(), assigns.cacheName(), assigns.affinityKey(), Executors.newSingleThreadExecutor(threadFactory));
ctxs.add(svcCtx);
toInit.add(svcCtx);
}
}
}
for (final ServiceContextImpl svcCtx : toInit) {
final Service svc;
try {
svc = copyAndInject(assigns.configuration());
// Initialize service.
svc.init(svcCtx);
svcCtx.service(svc);
} catch (Throwable e) {
U.error(log, "Failed to initialize service (service will not be deployed): " + assigns.name(), e);
synchronized (ctxs) {
ctxs.removeAll(toInit);
}
if (e instanceof Error)
throw (Error) e;
if (e instanceof RuntimeException)
throw (RuntimeException) e;
return;
}
if (log.isInfoEnabled())
log.info("Starting service instance [name=" + svcCtx.name() + ", execId=" + svcCtx.executionId() + ']');
// Start service in its own thread.
final ExecutorService exe = svcCtx.executor();
exe.execute(new Runnable() {
@Override
public void run() {
try {
svc.execute(svcCtx);
} catch (InterruptedException | IgniteInterruptedCheckedException ignore) {
if (log.isDebugEnabled())
log.debug("Service thread was interrupted [name=" + svcCtx.name() + ", execId=" + svcCtx.executionId() + ']');
} catch (IgniteException e) {
if (e.hasCause(InterruptedException.class) || e.hasCause(IgniteInterruptedCheckedException.class)) {
if (log.isDebugEnabled())
log.debug("Service thread was interrupted [name=" + svcCtx.name() + ", execId=" + svcCtx.executionId() + ']');
} else {
U.error(log, "Service execution stopped with error [name=" + svcCtx.name() + ", execId=" + svcCtx.executionId() + ']', e);
}
} catch (Throwable e) {
log.error("Service execution stopped with error [name=" + svcCtx.name() + ", execId=" + svcCtx.executionId() + ']', e);
if (e instanceof Error)
throw (Error) e;
} finally {
// Suicide.
exe.shutdownNow();
}
}
});
}
}
use of org.apache.ignite.internal.IgniteInterruptedCheckedException in project ignite by apache.
the class IpcSharedMemoryServerEndpoint method accept.
/** {@inheritDoc} */
@SuppressWarnings("ErrorNotRethrown")
@Override
public IpcEndpoint accept() throws IgniteCheckedException {
while (!Thread.currentThread().isInterrupted()) {
Socket sock = null;
boolean accepted = false;
try {
sock = srvSock.accept();
accepted = true;
InputStream inputStream = sock.getInputStream();
ObjectInputStream in = new ObjectInputStream(inputStream);
ObjectOutputStream out = new ObjectOutputStream(sock.getOutputStream());
IpcSharedMemorySpace inSpace = null;
IpcSharedMemorySpace outSpace = null;
boolean err = true;
try {
IpcSharedMemoryInitRequest req = (IpcSharedMemoryInitRequest) in.readObject();
if (log.isDebugEnabled())
log.debug("Processing request: " + req);
IgnitePair<String> p = inOutToken(req.pid(), size);
String file1 = p.get1();
String file2 = p.get2();
assert file1 != null;
assert file2 != null;
// Create tokens.
new File(file1).createNewFile();
new File(file2).createNewFile();
if (log.isDebugEnabled())
log.debug("Created token files: " + p);
inSpace = new IpcSharedMemorySpace(file1, req.pid(), pid, size, true, log);
outSpace = new IpcSharedMemorySpace(file2, pid, req.pid(), size, false, log);
IpcSharedMemoryClientEndpoint ret = new IpcSharedMemoryClientEndpoint(inSpace, outSpace, log);
out.writeObject(new IpcSharedMemoryInitResponse(file2, outSpace.sharedMemoryId(), file1, inSpace.sharedMemoryId(), pid, size));
err = !in.readBoolean();
endpoints.add(ret);
return ret;
} catch (UnsatisfiedLinkError e) {
throw IpcSharedMemoryUtils.linkError(e);
} catch (IOException e) {
if (log.isDebugEnabled())
log.debug("Failed to process incoming connection " + "(was connection closed by another party):" + e.getMessage());
} catch (ClassNotFoundException e) {
U.error(log, "Failed to process incoming connection.", e);
} catch (ClassCastException e) {
String msg = "Failed to process incoming connection (most probably, shared memory " + "rest endpoint has been configured by mistake).";
LT.warn(log, msg);
sendErrorResponse(out, e);
} catch (IpcOutOfSystemResourcesException e) {
if (!omitOutOfResourcesWarn)
LT.warn(log, OUT_OF_RESOURCES_MSG);
sendErrorResponse(out, e);
} catch (IgniteCheckedException e) {
LT.error(log, e, "Failed to process incoming shared memory connection.");
sendErrorResponse(out, e);
} finally {
// Exception has been thrown, need to free system resources.
if (err) {
if (inSpace != null)
inSpace.forceClose();
// Safety.
if (outSpace != null)
outSpace.forceClose();
}
}
} catch (IOException e) {
if (!Thread.currentThread().isInterrupted() && !accepted)
throw new IgniteCheckedException("Failed to accept incoming connection.", e);
if (!closed)
LT.error(log, null, "Failed to process incoming shared memory connection: " + e.getMessage());
} finally {
U.closeQuiet(sock);
}
}
throw new IgniteInterruptedCheckedException("Socket accept was interrupted.");
}
use of org.apache.ignite.internal.IgniteInterruptedCheckedException in project ignite by apache.
the class TcpDiscoveryJdbcIpFinder method checkSchema.
/**
* Checks correctness of existing DB schema.
*
* @throws org.apache.ignite.spi.IgniteSpiException If schema wasn't properly initialized.
*/
private void checkSchema() throws IgniteSpiException {
try {
U.await(initLatch);
} catch (IgniteInterruptedCheckedException e) {
throw new IgniteSpiException("Thread has been interrupted.", e);
}
Connection conn = null;
Statement stmt = null;
try {
conn = dataSrc.getConnection();
conn.setTransactionIsolation(TRANSACTION_READ_COMMITTED);
// Check if tbl_addrs exists and database initialized properly.
stmt = conn.createStatement();
stmt.execute(chkQry);
} catch (SQLException e) {
throw new IgniteSpiException("IP finder has not been properly initialized.", e);
} finally {
U.closeQuiet(stmt);
U.closeQuiet(conn);
}
}
use of org.apache.ignite.internal.IgniteInterruptedCheckedException in project ignite by apache.
the class ServerImpl method pingNode.
/**
* Pings the node by its address to see if it's alive.
*
* @param addr Address of the node.
* @param nodeId Node ID to ping. In case when client node ID is not null this node ID is an ID of the router node.
* @param clientNodeId Client node ID.
* @return ID of the remote node and "client exists" flag if node alive or {@code null} if the remote node has
* left a topology during the ping process.
* @throws IgniteCheckedException If an error occurs.
*/
@Nullable
private IgniteBiTuple<UUID, Boolean> pingNode(InetSocketAddress addr, @Nullable UUID nodeId, @Nullable UUID clientNodeId) throws IgniteCheckedException {
assert addr != null;
UUID locNodeId = getLocalNodeId();
IgniteSpiOperationTimeoutHelper timeoutHelper = new IgniteSpiOperationTimeoutHelper(spi, clientNodeId == null);
if (F.contains(spi.locNodeAddrs, addr)) {
if (clientNodeId == null)
return F.t(getLocalNodeId(), false);
ClientMessageWorker clientWorker = clientMsgWorkers.get(clientNodeId);
if (clientWorker == null)
return F.t(getLocalNodeId(), false);
boolean clientPingRes;
try {
clientPingRes = clientWorker.ping(timeoutHelper);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IgniteInterruptedCheckedException(e);
}
return F.t(getLocalNodeId(), clientPingRes);
}
GridPingFutureAdapter<IgniteBiTuple<UUID, Boolean>> fut = new GridPingFutureAdapter<>();
GridPingFutureAdapter<IgniteBiTuple<UUID, Boolean>> oldFut = pingMap.putIfAbsent(addr, fut);
if (oldFut != null)
return oldFut.get();
else {
Collection<Throwable> errs = null;
try {
Socket sock = null;
int reconCnt = 0;
boolean openedSock = false;
while (true) {
try {
if (addr.isUnresolved())
addr = new InetSocketAddress(InetAddress.getByName(addr.getHostName()), addr.getPort());
long tstamp = U.currentTimeMillis();
sock = spi.createSocket();
fut.sock = sock;
sock = spi.openSocket(sock, addr, timeoutHelper);
openedSock = true;
spi.writeToSocket(sock, new TcpDiscoveryPingRequest(locNodeId, clientNodeId), timeoutHelper.nextTimeoutChunk(spi.getSocketTimeout()));
TcpDiscoveryPingResponse res = spi.readMessage(sock, null, timeoutHelper.nextTimeoutChunk(spi.getAckTimeout()));
if (locNodeId.equals(res.creatorNodeId())) {
if (log.isDebugEnabled())
log.debug("Ping response from local node: " + res);
break;
}
spi.stats.onClientSocketInitialized(U.currentTimeMillis() - tstamp);
IgniteBiTuple<UUID, Boolean> t = F.t(res.creatorNodeId(), res.clientExists());
fut.onDone(t);
return t;
} catch (IOException | IgniteCheckedException e) {
if (nodeId != null && !nodeAlive(nodeId)) {
if (log.isDebugEnabled())
log.debug("Failed to ping the node (has left or leaving topology): [nodeId=" + nodeId + ']');
fut.onDone((IgniteBiTuple<UUID, Boolean>) null);
return null;
}
if (errs == null)
errs = new ArrayList<>();
errs.add(e);
reconCnt++;
if (!openedSock && reconCnt == 2)
break;
if (timeoutHelper.checkFailureTimeoutReached(e))
break;
else if (!spi.failureDetectionTimeoutEnabled() && reconCnt == spi.getReconnectCount())
break;
} finally {
U.closeQuiet(sock);
}
}
} catch (Throwable t) {
fut.onDone(t);
if (t instanceof Error)
throw t;
throw U.cast(t);
} finally {
if (!fut.isDone())
fut.onDone(U.exceptionWithSuppressed("Failed to ping node by address: " + addr, errs));
boolean b = pingMap.remove(addr, fut);
assert b;
}
return fut.get();
}
}
use of org.apache.ignite.internal.IgniteInterruptedCheckedException in project ignite by apache.
the class HadoopShuffleJob method flush.
/**
* @return Future.
*/
@SuppressWarnings("unchecked")
public IgniteInternalFuture<?> flush() throws IgniteCheckedException {
if (log.isDebugEnabled())
log.debug("Flushing job " + job.id() + " on address " + locReduceAddr);
flushed = true;
if (totalReducerCnt == 0)
return new GridFinishedFuture<>();
if (!stripeMappers) {
U.await(ioInitLatch);
GridWorker snd0 = snd;
if (snd0 != null) {
if (log.isDebugEnabled())
log.debug("Cancelling sender thread.");
snd0.cancel();
try {
snd0.join();
if (log.isDebugEnabled())
log.debug("Finished waiting for sending thread to complete on shuffle job flush: " + job.id());
} catch (InterruptedException e) {
throw new IgniteInterruptedCheckedException(e);
}
}
// With flush.
collectUpdatesAndSend(true);
if (log.isDebugEnabled())
log.debug("Finished sending collected updates to remote reducers: " + job.id());
}
GridCompoundFuture fut = new GridCompoundFuture<>();
if (embedded) {
boolean sent = false;
for (Map.Entry<T, HadoopShuffleRemoteState> rmtStateEntry : remoteShuffleStates().entrySet()) {
T dest = rmtStateEntry.getKey();
HadoopShuffleRemoteState rmtState = rmtStateEntry.getValue();
HadoopShuffleFinishRequest req = new HadoopShuffleFinishRequest(job.id(), rmtState.messageCount());
io.apply(dest, req);
if (log.isDebugEnabled())
log.debug("Sent shuffle finish request [jobId=" + job.id() + ", dest=" + dest + ", req=" + req + ']');
fut.add(rmtState.future());
sent = true;
}
if (sent)
fut.markInitialized();
else
return new GridFinishedFuture<>();
} else {
for (IgniteBiTuple<HadoopShuffleMessage, GridFutureAdapter<?>> tup : sentMsgs.values()) fut.add(tup.get2());
fut.markInitialized();
if (log.isDebugEnabled())
log.debug("Collected futures to compound futures for flush: " + sentMsgs.size());
}
return fut;
}
Aggregations