use of org.apache.ignite.spi.IgniteSpiException in project ignite by apache.
the class TcpDiscoveryKubernetesIpFinder method getRegisteredAddresses.
/**
* {@inheritDoc}
*/
@Override
public Collection<InetSocketAddress> getRegisteredAddresses() throws IgniteSpiException {
init();
Collection<InetSocketAddress> addrs = new ArrayList<>();
try {
if (log.isDebugEnabled())
log.debug("Getting Apache Ignite endpoints from: " + url);
HttpsURLConnection conn = (HttpsURLConnection) url.openConnection();
conn.setHostnameVerifier(trustAllHosts);
conn.setSSLSocketFactory(ctx.getSocketFactory());
conn.addRequestProperty("Authorization", "Bearer " + serviceAccountToken(accountToken));
// Sending the request and processing a response.
ObjectMapper mapper = new ObjectMapper();
Endpoints endpoints = mapper.readValue(conn.getInputStream(), Endpoints.class);
if (endpoints != null) {
if (endpoints.subsets != null && !endpoints.subsets.isEmpty()) {
for (Subset subset : endpoints.subsets) {
if (subset.addresses != null && !subset.addresses.isEmpty()) {
for (Address address : subset.addresses) {
addrs.add(new InetSocketAddress(address.ip, 0));
if (log.isDebugEnabled())
log.debug("Added an address to the list: " + address.ip);
}
}
}
}
}
} catch (Exception e) {
throw new IgniteSpiException("Failed to retrieve Ignite pods IP addresses.", e);
}
return addrs;
}
use of org.apache.ignite.spi.IgniteSpiException in project ignite by apache.
the class TcpDiscoveryKubernetesIpFinder method init.
/**
* Kubernetes IP finder initialization.
*
* @throws IgniteSpiException In case of error.
*/
private void init() throws IgniteSpiException {
if (initGuard.compareAndSet(false, true)) {
if (serviceName == null || serviceName.isEmpty() || namespace == null || namespace.isEmpty() || master == null || master.isEmpty() || accountToken == null || accountToken.isEmpty()) {
throw new IgniteSpiException("One or more configuration parameters are invalid [setServiceName=" + serviceName + ", setNamespace=" + namespace + ", setMasterUrl=" + master + ", setAccountToken=" + accountToken + "]");
}
try {
// Preparing the URL and SSL context to be used for connection purposes.
String path = String.format("/api/v1/namespaces/%s/endpoints/%s", namespace, serviceName);
url = new URL(master + path);
ctx = SSLContext.getInstance("SSL");
ctx.init(null, trustAll, new SecureRandom());
} catch (Exception e) {
throw new IgniteSpiException("Failed to connect to Ignite's Kubernetes Service.", e);
} finally {
initLatch.countDown();
}
} else {
try {
U.await(initLatch);
} catch (IgniteInterruptedCheckedException e) {
throw new IgniteSpiException("Thread has been interrupted.", e);
}
if (url == null || ctx == null)
throw new IgniteSpiException("IP finder has not been initialized properly.");
}
}
use of org.apache.ignite.spi.IgniteSpiException in project ignite by apache.
the class TcpDiscoveryCloudIpFinder method initComputeService.
/**
* Initializes Apache jclouds compute service.
*/
private void initComputeService() {
if (initGuard.compareAndSet(false, true))
try {
if (provider == null)
throw new IgniteSpiException("Cloud provider is not set.");
if (identity == null)
throw new IgniteSpiException("Cloud identity is not set.");
if (credential != null && credentialPath != null)
throw new IgniteSpiException("Both credential and credentialPath are set. Use only one method.");
if (credentialPath != null)
credential = getCredentialFromFile();
try {
ContextBuilder ctxBuilder = ContextBuilder.newBuilder(provider);
ctxBuilder.credentials(identity, credential);
Properties properties = new Properties();
properties.setProperty(Constants.PROPERTY_SO_TIMEOUT, JCLOUD_CONNECTION_TIMEOUT);
properties.setProperty(Constants.PROPERTY_CONNECTION_TIMEOUT, JCLOUD_CONNECTION_TIMEOUT);
if (!F.isEmpty(regions))
properties.setProperty(LocationConstants.PROPERTY_REGIONS, keysSetToStr(regions));
if (!F.isEmpty(zones))
properties.setProperty(LocationConstants.PROPERTY_ZONES, keysSetToStr(zones));
ctxBuilder.overrides(properties);
computeService = ctxBuilder.buildView(ComputeServiceContext.class).getComputeService();
if (!F.isEmpty(zones) || !F.isEmpty(regions)) {
nodesFilter = new Predicate<ComputeMetadata>() {
@Override
public boolean apply(ComputeMetadata computeMetadata) {
String region = null;
String zone = null;
Location location = computeMetadata.getLocation();
while (location != null) {
switch(location.getScope()) {
case ZONE:
zone = location.getId();
break;
case REGION:
region = location.getId();
break;
}
location = location.getParent();
}
if (regions != null && region != null && !regions.contains(region))
return false;
if (zones != null && zone != null && !zones.contains(zone))
return false;
return true;
}
};
}
} catch (Exception e) {
throw new IgniteSpiException("Failed to connect to the provider: " + provider, e);
}
} finally {
initLatch.countDown();
}
else {
try {
U.await(initLatch);
} catch (IgniteInterruptedCheckedException e) {
throw new IgniteSpiException("Thread has been interrupted.", e);
}
if (computeService == null)
throw new IgniteSpiException("Ip finder has not been initialized properly.");
}
}
use of org.apache.ignite.spi.IgniteSpiException in project ignite by apache.
the class SecurityUtils method withSecurityContext.
/**
* Marshals specified security context and adds it to the node attributes.
*
* @param secCtx Security context to be added.
* @param nodeAttrs Cluster node attributes to which security context attribute is to be added.
* @param marsh Marshaller.
* @return New copy of node attributes with security context attribute added.
* @throws IgniteCheckedException If security context serialization exception occurs.
*/
public static Map<String, Object> withSecurityContext(SecurityContext secCtx, Map<String, Object> nodeAttrs, Marshaller marsh) throws IgniteCheckedException {
if (!(secCtx instanceof Serializable))
throw new IgniteSpiException("Authentication subject is not serializable.");
Map<String, Object> res = new HashMap<>(nodeAttrs);
res.put(IgniteNodeAttributes.ATTR_SECURITY_SUBJECT_V2, U.marshal(marsh, secCtx));
return res;
}
use of org.apache.ignite.spi.IgniteSpiException in project ignite by apache.
the class GridDhtPartitionDemander method handleSupplyMessage.
/**
* Handles supply message from {@code nodeId} with specified {@code topicId}.
*
* Supply message contains entries to populate rebalancing partitions.
*
* There is a cyclic process:
* Populate rebalancing partitions with entries from Supply message.
* If not all partitions specified in {@link #rebalanceFut} were rebalanced or marked as missed
* send new Demand message to request next batch of entries.
*
* @param nodeId Node id.
* @param supplyMsg Supply message.
*/
public void handleSupplyMessage(final UUID nodeId, final GridDhtPartitionSupplyMessage supplyMsg) {
AffinityTopologyVersion topVer = supplyMsg.topologyVersion();
RebalanceFuture fut = rebalanceFut;
ClusterNode node = ctx.node(nodeId);
fut.cancelLock.readLock().lock();
try {
String errMsg = null;
if (fut.isDone())
errMsg = "rebalance completed";
else if (node == null)
errMsg = "supplier has left cluster";
else if (!rebalanceFut.isActual(supplyMsg.rebalanceId()))
errMsg = "topology changed";
if (errMsg != null) {
if (log.isDebugEnabled()) {
log.debug("Supply message has been ignored (" + errMsg + ") [" + demandRoutineInfo(nodeId, supplyMsg) + ']');
}
return;
}
if (log.isDebugEnabled())
log.debug("Received supply message [" + demandRoutineInfo(nodeId, supplyMsg) + ']');
// Check whether there were error during supplying process.
Throwable msgExc = null;
final GridDhtPartitionTopology top = grp.topology();
if (supplyMsg.classError() != null)
msgExc = supplyMsg.classError();
else if (supplyMsg.error() != null)
msgExc = supplyMsg.error();
if (msgExc != null) {
GridDhtPartitionMap partMap = top.localPartitionMap();
Set<Integer> unstableParts = supplyMsg.infos().keySet().stream().filter(p -> partMap.get(p) == MOVING).collect(Collectors.toSet());
U.error(log, "Rebalancing routine has failed, some partitions could be unavailable for reading" + " [" + demandRoutineInfo(nodeId, supplyMsg) + ", unavailablePartitions=" + S.compact(unstableParts) + ']', msgExc);
fut.error(nodeId);
return;
}
fut.receivedBytes.addAndGet(supplyMsg.messageSize());
if (grp.sharedGroup()) {
for (GridCacheContext cctx : grp.caches()) {
if (cctx.statisticsEnabled()) {
long keysCnt = supplyMsg.keysForCache(cctx.cacheId());
if (keysCnt != -1)
cctx.cache().metrics0().onRebalancingKeysCountEstimateReceived(keysCnt);
// Can not be calculated per cache.
cctx.cache().metrics0().onRebalanceBatchReceived(supplyMsg.messageSize());
}
}
} else {
GridCacheContext cctx = grp.singleCacheContext();
if (cctx.statisticsEnabled()) {
if (supplyMsg.estimatedKeysCount() != -1)
cctx.cache().metrics0().onRebalancingKeysCountEstimateReceived(supplyMsg.estimatedKeysCount());
cctx.cache().metrics0().onRebalanceBatchReceived(supplyMsg.messageSize());
}
}
try {
AffinityAssignment aff = grp.affinity().cachedAffinity(topVer);
// Preload.
for (Map.Entry<Integer, CacheEntryInfoCollection> e : supplyMsg.infos().entrySet()) {
int p = e.getKey();
if (aff.get(p).contains(ctx.localNode())) {
GridDhtLocalPartition part;
try {
part = top.localPartition(p, topVer, true);
} catch (GridDhtInvalidPartitionException err) {
assert !topVer.equals(top.lastTopologyChangeVersion());
if (log.isDebugEnabled()) {
log.debug("Failed to get partition for rebalancing [" + "grp=" + grp.cacheOrGroupName() + ", err=" + err + ", p=" + p + ", topVer=" + topVer + ", lastTopVer=" + top.lastTopologyChangeVersion() + ']');
}
continue;
}
assert part != null;
boolean last = supplyMsg.last().containsKey(p);
if (part.state() == MOVING) {
boolean reserved = part.reserve();
assert reserved : "Failed to reserve partition [igniteInstanceName=" + ctx.igniteInstanceName() + ", grp=" + grp.cacheOrGroupName() + ", part=" + part + ']';
part.beforeApplyBatch(last);
try {
long[] byteRcv = { 0 };
GridIterableAdapter<GridCacheEntryInfo> infosWrap = new GridIterableAdapter<>(new IteratorWrapper<GridCacheEntryInfo>(e.getValue().infos().iterator()) {
/**
* {@inheritDoc}
*/
@Override
public GridCacheEntryInfo nextX() throws IgniteCheckedException {
GridCacheEntryInfo i = super.nextX();
byteRcv[0] += i.marshalledSize(ctx.cacheObjectContext(i.cacheId()));
return i;
}
});
try {
if (grp.mvccEnabled())
mvccPreloadEntries(topVer, node, p, infosWrap);
else {
preloadEntries(topVer, part, infosWrap);
rebalanceFut.onReceivedKeys(p, e.getValue().infos().size(), node);
}
} catch (GridDhtInvalidPartitionException ignored) {
if (log.isDebugEnabled())
log.debug("Partition became invalid during rebalancing (will ignore): " + p);
}
fut.processed.get(p).increment();
fut.onReceivedBytes(p, byteRcv[0], node);
// If message was last for this partition, then we take ownership.
if (last)
ownPartition(fut, p, nodeId, supplyMsg);
} finally {
part.release();
}
} else {
if (last)
fut.partitionDone(nodeId, p, false);
if (log.isDebugEnabled())
log.debug("Skipping rebalancing partition (state is not MOVING): " + '[' + demandRoutineInfo(nodeId, supplyMsg) + ", p=" + p + ']');
}
} else {
fut.partitionDone(nodeId, p, false);
if (log.isDebugEnabled())
log.debug("Skipping rebalancing partition (affinity changed): " + '[' + demandRoutineInfo(nodeId, supplyMsg) + ", p=" + p + ']');
}
}
// Only request partitions based on latest topology version.
for (Integer miss : supplyMsg.missed()) {
if (aff.get(miss).contains(ctx.localNode()))
fut.partitionMissed(nodeId, miss);
}
for (Integer miss : supplyMsg.missed()) fut.partitionDone(nodeId, miss, false);
GridDhtPartitionDemandMessage d = new GridDhtPartitionDemandMessage(supplyMsg.rebalanceId(), supplyMsg.topologyVersion(), grp.groupId());
d.timeout(grp.preloader().timeout());
if (!fut.isDone()) {
// Send demand message.
try {
ctx.io().sendOrderedMessage(node, d.topic(), d.convertIfNeeded(node.version()), grp.ioPolicy(), grp.preloader().timeout());
if (log.isDebugEnabled())
log.debug("Send next demand message [" + demandRoutineInfo(nodeId, supplyMsg) + "]");
} catch (ClusterTopologyCheckedException e) {
if (log.isDebugEnabled())
log.debug("Supplier has left [" + demandRoutineInfo(nodeId, supplyMsg) + ", errMsg=" + e.getMessage() + ']');
}
} else {
if (log.isDebugEnabled())
log.debug("Will not request next demand message [" + demandRoutineInfo(nodeId, supplyMsg) + ", rebalanceFuture=" + fut + ']');
}
} catch (IgniteSpiException | IgniteCheckedException e) {
fut.error(nodeId);
LT.error(log, e, "Error during rebalancing [" + demandRoutineInfo(nodeId, supplyMsg) + ", err=" + e + ']');
}
} finally {
fut.cancelLock.readLock().unlock();
}
}
Aggregations