use of org.apache.ignite.spi.IgniteSpiException in project ignite by apache.
the class S3CheckpointSpi method spiStart.
/**
* {@inheritDoc}
*/
@SuppressWarnings({ "BusyWait" })
@Override
public void spiStart(String igniteInstanceName) throws IgniteSpiException {
// Start SPI start stopwatch.
startStopwatch();
assertParameter(cred != null, "awsCredentials != null");
if (log.isDebugEnabled()) {
log.debug(configInfo("awsCredentials", cred));
log.debug(configInfo("clientConfiguration", cfg));
log.debug(configInfo("bucketNameSuffix", bucketNameSuffix));
log.debug(configInfo("bucketEndpoint", bucketEndpoint));
log.debug(configInfo("SSEAlgorithm", sseAlg));
}
if (cfg == null)
U.warn(log, "Amazon client configuration is not set (will use default).");
if (F.isEmpty(bucketNameSuffix)) {
U.warn(log, "Bucket name suffix is null or empty (will use default bucket name).");
bucketName = BUCKET_NAME_PREFIX + DFLT_BUCKET_NAME_SUFFIX;
} else
bucketName = BUCKET_NAME_PREFIX + bucketNameSuffix;
s3 = cfg != null ? new AmazonS3Client(cred, cfg) : new AmazonS3Client(cred);
if (!F.isEmpty(bucketEndpoint))
s3.setEndpoint(bucketEndpoint);
if (!s3.doesBucketExist(bucketName)) {
try {
s3.createBucket(bucketName);
if (log.isDebugEnabled())
log.debug("Created S3 bucket: " + bucketName);
while (!s3.doesBucketExist(bucketName)) try {
U.sleep(200);
} catch (IgniteInterruptedCheckedException e) {
throw new IgniteSpiException("Thread has been interrupted.", e);
}
} catch (AmazonClientException e) {
try {
if (!s3.doesBucketExist(bucketName))
throw new IgniteSpiException("Failed to create bucket: " + bucketName, e);
} catch (AmazonClientException ignored) {
throw new IgniteSpiException("Failed to create bucket: " + bucketName, e);
}
}
}
Collection<S3TimeData> s3TimeDataLst = new LinkedList<>();
try {
ObjectListing list = s3.listObjects(bucketName);
while (true) {
for (S3ObjectSummary sum : list.getObjectSummaries()) {
S3CheckpointData data = read(sum.getKey());
if (data != null) {
s3TimeDataLst.add(new S3TimeData(data.getExpireTime(), data.getKey()));
if (log.isDebugEnabled())
log.debug("Registered existing checkpoint from key: " + data.getKey());
}
}
if (list.isTruncated())
list = s3.listNextBatchOfObjects(list);
else
break;
}
} catch (AmazonClientException e) {
throw new IgniteSpiException("Failed to read checkpoint bucket: " + bucketName, e);
} catch (IgniteCheckedException e) {
throw new IgniteSpiException("Failed to marshal/unmarshal objects in bucket: " + bucketName, e);
}
// Track expiration for only those data that are made by this node
timeoutWrk = new S3TimeoutWorker();
timeoutWrk.add(s3TimeDataLst);
timeoutWrk.start();
registerMBean(igniteInstanceName, new S3CheckpointSpiMBeanImpl(this), S3CheckpointSpiMBean.class);
// Ack ok start.
if (log.isDebugEnabled())
log.debug(startInfo());
}
use of org.apache.ignite.spi.IgniteSpiException in project ignite by apache.
the class TcpDiscoveryS3IpFinder method getRegisteredAddresses.
/**
* {@inheritDoc}
*/
@Override
public Collection<InetSocketAddress> getRegisteredAddresses() throws IgniteSpiException {
initClient();
Collection<InetSocketAddress> addrs = new LinkedList<>();
try {
ObjectListing list = s3.listObjects(bucketName);
while (true) {
for (S3ObjectSummary sum : list.getObjectSummaries()) {
String key = sum.getKey();
StringTokenizer st = new StringTokenizer(key, DELIM);
if (st.countTokens() != 2)
U.error(log, "Failed to parse S3 entry due to invalid format: " + key);
else {
String addrStr = st.nextToken();
String portStr = st.nextToken();
int port = -1;
try {
port = Integer.parseInt(portStr);
} catch (NumberFormatException e) {
U.error(log, "Failed to parse port for S3 entry: " + key, e);
}
if (port != -1)
try {
addrs.add(new InetSocketAddress(addrStr, port));
} catch (IllegalArgumentException e) {
U.error(log, "Failed to parse port for S3 entry: " + key, e);
}
}
}
if (list.isTruncated())
list = s3.listNextBatchOfObjects(list);
else
break;
}
} catch (AmazonClientException e) {
throw new IgniteSpiException("Failed to list objects in the bucket: " + bucketName, e);
}
return addrs;
}
use of org.apache.ignite.spi.IgniteSpiException in project ignite by apache.
the class S3CheckpointSpiSelfTest method afterSpiStopped.
/**
* @throws Exception If error.
*/
@Override
protected void afterSpiStopped() throws Exception {
AWSCredentials cred = new BasicAWSCredentials(IgniteS3TestSuite.getAccessKey(), IgniteS3TestSuite.getSecretKey());
AmazonS3 s3 = new AmazonS3Client(cred);
String bucketName = S3CheckpointSpi.BUCKET_NAME_PREFIX + "unit-test-bucket";
try {
ObjectListing list = s3.listObjects(bucketName);
while (true) {
for (S3ObjectSummary sum : list.getObjectSummaries()) s3.deleteObject(bucketName, sum.getKey());
if (list.isTruncated())
list = s3.listNextBatchOfObjects(list);
else
break;
}
} catch (AmazonClientException e) {
throw new IgniteSpiException("Failed to read checkpoint bucket: " + bucketName, e);
}
}
use of org.apache.ignite.spi.IgniteSpiException in project ignite by apache.
the class GridDhtPartitionDemander method handleSupplyMessage.
/**
* Handles supply message from {@code nodeId} with specified {@code topicId}.
*
* Supply message contains entries to populate rebalancing partitions.
*
* There is a cyclic process:
* Populate rebalancing partitions with entries from Supply message.
* If not all partitions specified in {@link #rebalanceFut} were rebalanced or marked as missed
* send new Demand message to request next batch of entries.
*
* @param topicId Topic id.
* @param nodeId Node id.
* @param supply Supply message.
*/
public void handleSupplyMessage(int topicId, final UUID nodeId, final GridDhtPartitionSupplyMessage supply) {
AffinityTopologyVersion topVer = supply.topologyVersion();
final RebalanceFuture fut = rebalanceFut;
ClusterNode node = ctx.node(nodeId);
if (node == null)
return;
if (// Topology already changed (for the future that supply message based on).
topologyChanged(fut))
return;
if (!fut.isActual(supply.rebalanceId())) {
// Supple message based on another future.
return;
}
if (log.isDebugEnabled())
log.debug("Received supply message [grp=" + grp.cacheOrGroupName() + ", msg=" + supply + ']');
// Check whether there were class loading errors on unmarshal
if (supply.classError() != null) {
U.warn(log, "Rebalancing from node cancelled [grp=" + grp.cacheOrGroupName() + ", node=" + nodeId + "]. Class got undeployed during preloading: " + supply.classError());
fut.cancel(nodeId);
return;
}
final GridDhtPartitionTopology top = grp.topology();
if (grp.sharedGroup()) {
for (GridCacheContext cctx : grp.caches()) {
if (cctx.statisticsEnabled()) {
long keysCnt = supply.keysForCache(cctx.cacheId());
if (keysCnt != -1)
cctx.cache().metrics0().onRebalancingKeysCountEstimateReceived(keysCnt);
// Can not be calculated per cache.
cctx.cache().metrics0().onRebalanceBatchReceived(supply.messageSize());
}
}
} else {
GridCacheContext cctx = grp.singleCacheContext();
if (cctx.statisticsEnabled()) {
if (supply.estimatedKeysCount() != -1)
cctx.cache().metrics0().onRebalancingKeysCountEstimateReceived(supply.estimatedKeysCount());
cctx.cache().metrics0().onRebalanceBatchReceived(supply.messageSize());
}
}
try {
AffinityAssignment aff = grp.affinity().cachedAffinity(topVer);
GridCacheContext cctx = grp.sharedGroup() ? null : grp.singleCacheContext();
// Preload.
for (Map.Entry<Integer, CacheEntryInfoCollection> e : supply.infos().entrySet()) {
int p = e.getKey();
if (aff.get(p).contains(ctx.localNode())) {
GridDhtLocalPartition part = top.localPartition(p, topVer, true);
assert part != null;
boolean last = supply.last().containsKey(p);
if (part.state() == MOVING) {
boolean reserved = part.reserve();
assert reserved : "Failed to reserve partition [igniteInstanceName=" + ctx.igniteInstanceName() + ", grp=" + grp.cacheOrGroupName() + ", part=" + part + ']';
part.lock();
try {
// Loop through all received entries and try to preload them.
for (GridCacheEntryInfo entry : e.getValue().infos()) {
if (!preloadEntry(node, p, entry, topVer)) {
if (log.isDebugEnabled())
log.debug("Got entries for invalid partition during " + "preloading (will skip) [p=" + p + ", entry=" + entry + ']');
break;
}
if (grp.sharedGroup() && (cctx == null || cctx.cacheId() != entry.cacheId()))
cctx = ctx.cacheContext(entry.cacheId());
if (cctx != null && cctx.statisticsEnabled())
cctx.cache().metrics0().onRebalanceKeyReceived();
}
// then we take ownership.
if (last) {
top.own(part);
fut.partitionDone(nodeId, p);
if (log.isDebugEnabled())
log.debug("Finished rebalancing partition: " + part);
}
} finally {
part.unlock();
part.release();
}
} else {
if (last)
fut.partitionDone(nodeId, p);
if (log.isDebugEnabled())
log.debug("Skipping rebalancing partition (state is not MOVING): " + part);
}
} else {
fut.partitionDone(nodeId, p);
if (log.isDebugEnabled())
log.debug("Skipping rebalancing partition (it does not belong on current node): " + p);
}
}
// Only request partitions based on latest topology version.
for (Integer miss : supply.missed()) {
if (aff.get(miss).contains(ctx.localNode()))
fut.partitionMissed(nodeId, miss);
}
for (Integer miss : supply.missed()) fut.partitionDone(nodeId, miss);
GridDhtPartitionDemandMessage d = new GridDhtPartitionDemandMessage(supply.rebalanceId(), supply.topologyVersion(), grp.groupId());
d.timeout(grp.config().getRebalanceTimeout());
d.topic(rebalanceTopics.get(topicId));
if (!topologyChanged(fut) && !fut.isDone()) {
// Send demand message.
try {
ctx.io().sendOrderedMessage(node, rebalanceTopics.get(topicId), d.convertIfNeeded(node.version()), grp.ioPolicy(), grp.config().getRebalanceTimeout());
} catch (ClusterTopologyCheckedException e) {
if (log.isDebugEnabled()) {
log.debug("Node left during rebalancing [grp=" + grp.cacheOrGroupName() + ", node=" + node.id() + ", msg=" + e.getMessage() + ']');
}
}
}
} catch (IgniteSpiException | IgniteCheckedException e) {
LT.error(log, e, "Error during rebalancing [grp=" + grp.cacheOrGroupName() + ", srcNode=" + node.id() + ", err=" + e + ']');
}
}
use of org.apache.ignite.spi.IgniteSpiException in project ignite by apache.
the class TcpDiscoveryZookeeperIpFinder method registerAddresses.
/**
* {@inheritDoc}
*/
@Override
public void registerAddresses(Collection<InetSocketAddress> addrs) throws IgniteSpiException {
init();
if (log.isInfoEnabled())
log.info("Registering addresses with ZooKeeper IP Finder: " + addrs);
Set<InetSocketAddress> registrationsToIgnore = Sets.newHashSet();
if (!allowDuplicateRegistrations) {
try {
for (ServiceInstance<IgniteInstanceDetails> sd : discovery.queryForInstances(serviceName)) registrationsToIgnore.add(new InetSocketAddress(sd.getAddress(), sd.getPort()));
} catch (Exception e) {
log.warning("Error while finding currently registered services to avoid duplicate registrations", e);
throw new IgniteSpiException(e);
}
}
for (InetSocketAddress addr : addrs) {
if (registrationsToIgnore.contains(addr))
continue;
try {
ServiceInstance<IgniteInstanceDetails> si = ServiceInstance.<IgniteInstanceDetails>builder().name(serviceName).uriSpec(URI_SPEC).address(addr.getAddress().getHostAddress()).port(addr.getPort()).build();
ourInstances.put(addr, si);
discovery.registerService(si);
} catch (Exception e) {
log.warning(String.format("Error while registering an address from ZooKeeper IP Finder " + "[message=%s,addresses=%s]", e.getMessage(), addr), e);
}
}
}
Aggregations