use of org.apache.ignite.internal.IgniteInterruptedCheckedException in project ignite by apache.
the class TcpDiscoveryGoogleStorageIpFinder method init.
/**
* Google Cloud Storage initialization.
*
* @throws IgniteSpiException In case of error.
*/
private void init() throws IgniteSpiException {
if (initGuard.compareAndSet(false, true)) {
if (srvcAccountId == null || srvcAccountP12FilePath == null || projectName == null || bucketName == null) {
throw new IgniteSpiException("One or more of the required parameters is not set [serviceAccountId=" + srvcAccountId + ", serviceAccountP12FilePath=" + srvcAccountP12FilePath + ", projectName=" + projectName + ", bucketName=" + bucketName + "]");
}
try {
NetHttpTransport httpTransport;
try {
httpTransport = GoogleNetHttpTransport.newTrustedTransport();
} catch (GeneralSecurityException | IOException e) {
throw new IgniteSpiException(e);
}
GoogleCredential cred;
try {
cred = new GoogleCredential.Builder().setTransport(httpTransport).setJsonFactory(JacksonFactory.getDefaultInstance()).setServiceAccountId(srvcAccountId).setServiceAccountPrivateKeyFromP12File(new File(srvcAccountP12FilePath)).setServiceAccountScopes(Collections.singleton(StorageScopes.DEVSTORAGE_FULL_CONTROL)).build();
} catch (Exception e) {
throw new IgniteSpiException("Failed to authenticate on Google Cloud Platform", e);
}
try {
storage = new Storage.Builder(httpTransport, JacksonFactory.getDefaultInstance(), cred).setApplicationName(projectName).build();
} catch (Exception e) {
throw new IgniteSpiException("Failed to open a storage for given project name: " + projectName, e);
}
boolean createBucket = false;
try {
Storage.Buckets.Get getBucket = storage.buckets().get(bucketName);
getBucket.setProjection("full");
getBucket.execute();
} catch (GoogleJsonResponseException e) {
if (e.getStatusCode() == 404) {
U.warn(log, "Bucket doesn't exist, will create it [bucketName=" + bucketName + "]");
createBucket = true;
} else
throw new IgniteSpiException("Failed to open the bucket: " + bucketName, e);
} catch (Exception e) {
throw new IgniteSpiException("Failed to open the bucket: " + bucketName, e);
}
if (createBucket) {
Bucket newBucket = new Bucket();
newBucket.setName(bucketName);
try {
Storage.Buckets.Insert insertBucket = storage.buckets().insert(projectName, newBucket);
insertBucket.setProjection("full");
insertBucket.setPredefinedDefaultObjectAcl("projectPrivate");
insertBucket.execute();
} catch (Exception e) {
throw new IgniteSpiException("Failed to create the bucket: " + bucketName, e);
}
}
} finally {
initLatch.countDown();
}
} else {
try {
U.await(initLatch);
} catch (IgniteInterruptedCheckedException e) {
throw new IgniteSpiException("Thread has been interrupted.", e);
}
if (storage == null)
throw new IgniteSpiException("IpFinder has not been initialized properly");
}
}
use of org.apache.ignite.internal.IgniteInterruptedCheckedException in project ignite by apache.
the class GridReduceQueryExecutor method query.
/**
* @param schemaName Schema name.
* @param qry Query.
* @param keepBinary Keep binary.
* @param enforceJoinOrder Enforce join order of tables.
* @param timeoutMillis Timeout in milliseconds.
* @param cancel Query cancel.
* @param params Query parameters.
* @param parts Partitions.
* @param lazy Lazy execution flag.
* @return Rows iterator.
*/
public Iterator<List<?>> query(String schemaName, final GridCacheTwoStepQuery qry, boolean keepBinary, boolean enforceJoinOrder, int timeoutMillis, GridQueryCancel cancel, Object[] params, final int[] parts, boolean lazy) {
if (F.isEmpty(params))
params = EMPTY_PARAMS;
final boolean isReplicatedOnly = qry.isReplicatedOnly();
// Fail if all caches are replicated and explicit partitions are set.
for (int attempt = 0; ; attempt++) {
if (attempt != 0) {
try {
// Wait for exchange.
Thread.sleep(attempt * 10);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new CacheException("Query was interrupted.", e);
}
}
final long qryReqId = qryIdGen.incrementAndGet();
final ReduceQueryRun r = new ReduceQueryRun(qryReqId, qry.originalSql(), schemaName, h2.connectionForSchema(schemaName), qry.mapQueries().size(), qry.pageSize(), U.currentTimeMillis(), cancel);
AffinityTopologyVersion topVer = h2.readyTopologyVersion();
// Check if topology is changed while retrying on locked topology.
if (h2.serverTopologyChanged(topVer) && ctx.cache().context().lockedTopologyVersion(null) != null) {
throw new CacheException(new TransactionException("Server topology is changed during query " + "execution inside a transaction. It's recommended to rollback and retry transaction."));
}
List<Integer> cacheIds = qry.cacheIds();
Collection<ClusterNode> nodes;
// Explicit partition mapping for unstable topology.
Map<ClusterNode, IntArray> partsMap = null;
// Explicit partitions mapping for query.
Map<ClusterNode, IntArray> qryMap = null;
// Partitions are not supported for queries over all replicated caches.
if (parts != null) {
boolean replicatedOnly = true;
for (Integer cacheId : cacheIds) {
if (!cacheContext(cacheId).isReplicated()) {
replicatedOnly = false;
break;
}
}
if (replicatedOnly)
throw new CacheException("Partitions are not supported for replicated caches");
}
if (qry.isLocal())
nodes = singletonList(ctx.discovery().localNode());
else {
NodesForPartitionsResult nodesParts = nodesForPartitions(cacheIds, topVer, parts, isReplicatedOnly);
nodes = nodesParts.nodes();
partsMap = nodesParts.partitionsMap();
qryMap = nodesParts.queryPartitionsMap();
if (nodes == null)
// Retry.
continue;
assert !nodes.isEmpty();
if (isReplicatedOnly || qry.explain()) {
ClusterNode locNode = ctx.discovery().localNode();
// Always prefer local node if possible.
if (nodes.contains(locNode))
nodes = singletonList(locNode);
else {
// Select random data node to run query on a replicated data or
// get EXPLAIN PLAN from a single node.
nodes = singletonList(F.rand(nodes));
}
}
}
int tblIdx = 0;
final boolean skipMergeTbl = !qry.explain() && qry.skipMergeTable();
final int segmentsPerIndex = qry.explain() || isReplicatedOnly ? 1 : findFirstPartitioned(cacheIds).config().getQueryParallelism();
int replicatedQrysCnt = 0;
final Collection<ClusterNode> finalNodes = nodes;
for (GridCacheSqlQuery mapQry : qry.mapQueries()) {
GridMergeIndex idx;
if (!skipMergeTbl) {
GridMergeTable tbl;
try {
tbl = createMergeTable(r.connection(), mapQry, qry.explain());
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
idx = tbl.getMergeIndex();
fakeTable(r.connection(), tblIdx++).innerTable(tbl);
} else
idx = GridMergeIndexUnsorted.createDummy(ctx);
// If the query has only replicated tables, we have to run it on a single node only.
if (!mapQry.isPartitioned()) {
ClusterNode node = F.rand(nodes);
mapQry.node(node.id());
replicatedQrysCnt++;
// Replicated tables can have only 1 segment.
idx.setSources(singletonList(node), 1);
} else
idx.setSources(nodes, segmentsPerIndex);
idx.setPageSize(r.pageSize());
r.indexes().add(idx);
}
r.latch(new CountDownLatch(isReplicatedOnly ? 1 : (r.indexes().size() - replicatedQrysCnt) * nodes.size() * segmentsPerIndex + replicatedQrysCnt));
runs.put(qryReqId, r);
boolean release = true;
try {
cancel.checkCancelled();
if (ctx.clientDisconnected()) {
throw new CacheException("Query was cancelled, client node disconnected.", new IgniteClientDisconnectedException(ctx.cluster().clientReconnectFuture(), "Client node disconnected."));
}
List<GridCacheSqlQuery> mapQrys = qry.mapQueries();
if (qry.explain()) {
mapQrys = new ArrayList<>(qry.mapQueries().size());
for (GridCacheSqlQuery mapQry : qry.mapQueries()) mapQrys.add(new GridCacheSqlQuery("EXPLAIN " + mapQry.query()).parameterIndexes(mapQry.parameterIndexes()));
}
final boolean distributedJoins = qry.distributedJoins();
cancel.set(new Runnable() {
@Override
public void run() {
send(finalNodes, new GridQueryCancelRequest(qryReqId), null, false);
}
});
boolean retry = false;
// Always enforce join order on map side to have consistent behavior.
int flags = GridH2QueryRequest.FLAG_ENFORCE_JOIN_ORDER;
if (distributedJoins)
flags |= GridH2QueryRequest.FLAG_DISTRIBUTED_JOINS;
if (qry.isLocal())
flags |= GridH2QueryRequest.FLAG_IS_LOCAL;
if (qry.explain())
flags |= GridH2QueryRequest.FLAG_EXPLAIN;
if (isReplicatedOnly)
flags |= GridH2QueryRequest.FLAG_REPLICATED;
if (lazy && mapQrys.size() == 1)
flags |= GridH2QueryRequest.FLAG_LAZY;
GridH2QueryRequest req = new GridH2QueryRequest().requestId(qryReqId).topologyVersion(topVer).pageSize(r.pageSize()).caches(qry.cacheIds()).tables(distributedJoins ? qry.tables() : null).partitions(convert(partsMap)).queries(mapQrys).parameters(params).flags(flags).timeout(timeoutMillis).schemaName(schemaName);
if (send(nodes, req, parts == null ? null : new ExplicitPartitionsSpecializer(qryMap), false)) {
awaitAllReplies(r, nodes, cancel);
Object state = r.state();
if (state != null) {
if (state instanceof CacheException) {
CacheException err = (CacheException) state;
if (err.getCause() instanceof IgniteClientDisconnectedException)
throw err;
if (wasCancelled(err))
// Throw correct exception.
throw new QueryCancelledException();
throw new CacheException("Failed to run map query remotely." + err.getMessage(), err);
}
if (state instanceof AffinityTopologyVersion) {
retry = true;
// If remote node asks us to retry then we have outdated full partition map.
h2.awaitForReadyTopologyVersion((AffinityTopologyVersion) state);
}
}
} else
// Send failed.
retry = true;
Iterator<List<?>> resIter = null;
if (!retry) {
if (skipMergeTbl) {
resIter = new GridMergeIndexIterator(this, finalNodes, r, qryReqId, qry.distributedJoins());
release = false;
} else {
cancel.checkCancelled();
UUID locNodeId = ctx.localNodeId();
H2Utils.setupConnection(r.connection(), false, enforceJoinOrder);
GridH2QueryContext.set(new GridH2QueryContext(locNodeId, locNodeId, qryReqId, REDUCE).pageSize(r.pageSize()).distributedJoinMode(OFF));
try {
if (qry.explain())
return explainPlan(r.connection(), qry, params);
GridCacheSqlQuery rdc = qry.reduceQuery();
ResultSet res = h2.executeSqlQueryWithTimer(r.connection(), rdc.query(), F.asList(rdc.parameters(params)), // The statement will cache some extra thread local objects.
false, timeoutMillis, cancel);
resIter = new H2FieldsIterator(res);
} finally {
GridH2QueryContext.clearThreadLocal();
}
}
}
if (retry) {
if (Thread.currentThread().isInterrupted())
throw new IgniteInterruptedCheckedException("Query was interrupted.");
continue;
}
return new GridQueryCacheObjectsIterator(resIter, h2.objectContext(), keepBinary);
} catch (IgniteCheckedException | RuntimeException e) {
release = true;
U.closeQuiet(r.connection());
if (e instanceof CacheException) {
if (wasCancelled((CacheException) e))
throw new CacheException("Failed to run reduce query locally.", new QueryCancelledException());
throw (CacheException) e;
}
Throwable cause = e;
if (e instanceof IgniteCheckedException) {
Throwable disconnectedErr = ((IgniteCheckedException) e).getCause(IgniteClientDisconnectedException.class);
if (disconnectedErr != null)
cause = disconnectedErr;
}
throw new CacheException("Failed to run reduce query locally.", cause);
} finally {
if (release) {
releaseRemoteResources(finalNodes, r, qryReqId, qry.distributedJoins());
if (!skipMergeTbl) {
for (int i = 0, mapQrys = qry.mapQueries().size(); i < mapQrys; i++) // Drop all merge tables.
fakeTable(null, i).innerTable(null);
}
}
}
}
}
use of org.apache.ignite.internal.IgniteInterruptedCheckedException in project ignite by apache.
the class IgniteProcessProxy method close.
/**
* {@inheritDoc}
*/
@Override
public void close() throws IgniteException {
if (locJvmGrid != null) {
final CountDownLatch rmtNodeStoppedLatch = new CountDownLatch(1);
locJvmGrid.events().localListen(new IgnitePredicateX<Event>() {
@Override
public boolean applyx(Event e) {
if (((DiscoveryEvent) e).eventNode().id().equals(id)) {
rmtNodeStoppedLatch.countDown();
return false;
}
return true;
}
}, EventType.EVT_NODE_LEFT, EventType.EVT_NODE_FAILED);
compute().run(new StopGridTask(localJvmGrid().name(), true));
try {
assert U.await(rmtNodeStoppedLatch, 15, TimeUnit.SECONDS) : "NodeId=" + id;
} catch (IgniteInterruptedCheckedException e) {
throw new IgniteException(e);
}
}
try {
getProcess().kill();
} catch (Exception e) {
X.printerr("Could not kill process after close.", e);
}
}
Aggregations