use of org.apache.ignite.IgniteException in project ignite by apache.
the class CacheJdbcBlobStore method init.
/**
* Initializes store.
*
* @throws IgniteException If failed to initialize.
*/
private void init() {
if (initLatch.getCount() > 0) {
if (initGuard.compareAndSet(false, true)) {
if (log.isDebugEnabled())
log.debug("Initializing cache store.");
if (F.isEmpty(connUrl))
throw new IgniteException("Failed to initialize cache store (connection URL is not provided).");
if (!initSchema) {
initLatch.countDown();
return;
}
if (F.isEmpty(createTblQry))
throw new IgniteException("Failed to initialize cache store (create table query is not provided).");
Connection conn = null;
Statement stmt = null;
try {
conn = openConnection(false);
stmt = conn.createStatement();
stmt.execute(createTblQry);
conn.commit();
initOk = true;
} catch (SQLException e) {
throw new IgniteException("Failed to create database table.", e);
} finally {
U.closeQuiet(stmt);
closeConnection(conn);
initLatch.countDown();
}
} else {
try {
U.await(initLatch);
} catch (IgniteInterruptedCheckedException e) {
throw new IgniteException(e);
}
}
}
if (!initOk)
throw new IgniteException("Cache store was not properly initialized.");
}
use of org.apache.ignite.IgniteException in project ignite by apache.
the class ComputeTaskInternalFuture method finishedFuture.
/**
* @param ctx Context.
* @param taskCls Task class.
* @param e Error.
* @return Finished task future.
*/
public static <R> ComputeTaskInternalFuture<R> finishedFuture(final GridKernalContext ctx, final Class<?> taskCls, IgniteCheckedException e) {
assert ctx != null;
assert taskCls != null;
assert e != null;
final long time = U.currentTimeMillis();
final IgniteUuid id = IgniteUuid.fromUuid(ctx.localNodeId());
ComputeTaskSession ses = new ComputeTaskSession() {
@Override
public String getTaskName() {
return taskCls.getName();
}
@Override
public UUID getTaskNodeId() {
return ctx.localNodeId();
}
@Override
public long getStartTime() {
return time;
}
@Override
public long getEndTime() {
return time;
}
@Override
public IgniteUuid getId() {
return id;
}
@Override
public ClassLoader getClassLoader() {
return null;
}
@Override
public Collection<ComputeJobSibling> getJobSiblings() throws IgniteException {
return Collections.emptyList();
}
@Override
public Collection<ComputeJobSibling> refreshJobSiblings() throws IgniteException {
return Collections.emptyList();
}
@Nullable
@Override
public ComputeJobSibling getJobSibling(IgniteUuid jobId) throws IgniteException {
return null;
}
@Override
public void setAttribute(Object key, @Nullable Object val) throws IgniteException {
}
@Nullable
@Override
public <K, V> V getAttribute(K key) {
return null;
}
@Override
public void setAttributes(Map<?, ?> attrs) throws IgniteException {
// No-op.
}
@Override
public Map<?, ?> getAttributes() {
return Collections.emptyMap();
}
@Override
public void addAttributeListener(ComputeTaskSessionAttributeListener lsnr, boolean rewind) {
// No-op.
}
@Override
public boolean removeAttributeListener(ComputeTaskSessionAttributeListener lsnr) {
return false;
}
@Override
public <K, V> V waitForAttribute(K key, long timeout) throws InterruptedException {
throw new InterruptedException("Session was closed.");
}
@Override
public <K, V> boolean waitForAttribute(K key, @Nullable V val, long timeout) throws InterruptedException {
throw new InterruptedException("Session was closed.");
}
@Override
public Map<?, ?> waitForAttributes(Collection<?> keys, long timeout) throws InterruptedException {
throw new InterruptedException("Session was closed.");
}
@Override
public boolean waitForAttributes(Map<?, ?> attrs, long timeout) throws InterruptedException {
throw new InterruptedException("Session was closed.");
}
@Override
public void saveCheckpoint(String key, Object state) {
throw new IgniteException("Session was closed.");
}
@Override
public void saveCheckpoint(String key, Object state, ComputeTaskSessionScope scope, long timeout) {
throw new IgniteException("Session was closed.");
}
@Override
public void saveCheckpoint(String key, Object state, ComputeTaskSessionScope scope, long timeout, boolean overwrite) {
throw new IgniteException("Session was closed.");
}
@Nullable
@Override
public <T> T loadCheckpoint(String key) throws IgniteException {
throw new IgniteException("Session was closed.");
}
@Override
public boolean removeCheckpoint(String key) throws IgniteException {
throw new IgniteException("Session was closed.");
}
@Override
public Collection<UUID> getTopology() {
return Collections.emptyList();
}
@Override
public IgniteFuture<?> mapFuture() {
return new IgniteFinishedFutureImpl<Object>();
}
};
ComputeTaskInternalFuture<R> fut = new ComputeTaskInternalFuture<>(ses, ctx);
fut.onDone(e);
return fut;
}
use of org.apache.ignite.IgniteException in project ignite by apache.
the class IgniteKernal method createHadoopComponent.
/**
* Create Hadoop component.
*
* @return Non-null Hadoop component: workable or no-op.
* @throws IgniteCheckedException If the component is mandatory and cannot be initialized.
*/
private HadoopProcessorAdapter createHadoopComponent() throws IgniteCheckedException {
boolean mandatory = cfg.getHadoopConfiguration() != null;
if (mandatory) {
if (cfg.isPeerClassLoadingEnabled())
throw new IgniteCheckedException("Hadoop module cannot be used with peer class loading enabled " + "(set IgniteConfiguration.peerClassLoadingEnabled to \"false\").");
HadoopProcessorAdapter res = IgniteComponentType.HADOOP.createIfInClassPath(ctx, true);
res.validateEnvironment();
return res;
} else {
HadoopProcessorAdapter cmp = null;
if (!ctx.hadoopHelper().isNoOp() && cfg.isPeerClassLoadingEnabled()) {
U.warn(log, "Hadoop module is found in classpath, but will not be started because peer class " + "loading is enabled (set IgniteConfiguration.peerClassLoadingEnabled to \"false\" if you want " + "to use Hadoop module).");
} else {
cmp = IgniteComponentType.HADOOP.createIfInClassPath(ctx, false);
try {
cmp.validateEnvironment();
} catch (IgniteException | IgniteCheckedException e) {
U.quietAndWarn(log, "Hadoop module will not start due to exception: " + e.getMessage());
cmp = null;
}
}
if (cmp == null)
cmp = IgniteComponentType.HADOOP.create(ctx, true);
return cmp;
}
}
use of org.apache.ignite.IgniteException in project ignite by apache.
the class IgfsTask method map.
/** {@inheritDoc} */
@Nullable
@Override
public final Map<? extends ComputeJob, ClusterNode> map(List<ClusterNode> subgrid, @Nullable IgfsTaskArgs<T> args) {
assert ignite != null;
assert args != null;
IgniteFileSystem fs = ignite.fileSystem(args.igfsName());
IgfsProcessorAdapter igfsProc = ((IgniteKernal) ignite).context().igfs();
Map<ComputeJob, ClusterNode> splitMap = new HashMap<>();
Map<UUID, ClusterNode> nodes = mapSubgrid(subgrid);
for (IgfsPath path : args.paths()) {
IgfsFile file = fs.info(path);
if (file == null) {
if (args.skipNonExistentFiles())
continue;
else
throw new IgniteException("Failed to process IGFS file because it doesn't exist: " + path);
}
Collection<IgfsBlockLocation> aff = fs.affinity(path, 0, file.length(), args.maxRangeLength());
long totalLen = 0;
for (IgfsBlockLocation loc : aff) {
ClusterNode node = null;
for (UUID nodeId : loc.nodeIds()) {
node = nodes.get(nodeId);
if (node != null)
break;
}
if (node == null)
throw new IgniteException("Failed to find any of block affinity nodes in subgrid [loc=" + loc + ", subgrid=" + subgrid + ']');
IgfsJob job = createJob(path, new IgfsFileRange(file.path(), loc.start(), loc.length()), args);
if (job != null) {
ComputeJob jobImpl = igfsProc.createJob(job, fs.name(), file.path(), loc.start(), loc.length(), args.recordResolver());
splitMap.put(jobImpl, node);
}
totalLen += loc.length();
}
assert totalLen == file.length();
}
return splitMap;
}
use of org.apache.ignite.IgniteException in project ignite by apache.
the class GridifyDefaultRangeTask method map.
/** {@inheritDoc} */
@Override
public Map<? extends ComputeJob, ClusterNode> map(List<ClusterNode> subgrid, GridifyRangeArgument arg) {
assert !subgrid.isEmpty() : "Subgrid should not be empty: " + subgrid;
assert ignite != null : "Grid instance could not be injected";
if (splitSize < threshold && splitSize != 0 && threshold != 0) {
throw new IgniteException("Incorrect Gridify annotation parameters. Value for parameter " + "'splitSize' should not be less than parameter 'threshold' [splitSize=" + splitSize + ", threshold=" + threshold + ']');
}
Collection<ClusterNode> exclNodes = new LinkedList<>();
// Filter nodes.
if (nodeFilter != null) {
for (ClusterNode node : subgrid) {
if (!nodeFilter.apply(node, ses))
exclNodes.add(node);
}
if (exclNodes.size() == subgrid.size())
throw new IgniteException("Failed to execute on grid where all nodes excluded.");
}
int inputPerNode = splitSize;
// Calculate input elements size per node for default annotation splitSize parameter.
if (splitSize <= 0) {
// For iterable input splitSize will be assigned with threshold value.
if (threshold > 0 && arg.getInputSize() == UNKNOWN_SIZE)
inputPerNode = threshold;
else // Otherwise, splitSize equals (inputSize / nodesCount)
{
assert arg.getInputSize() != UNKNOWN_SIZE;
int gridSize = subgrid.size() - exclNodes.size();
gridSize = (gridSize <= 0 ? subgrid.size() : gridSize);
inputPerNode = calculateInputSizePerNode(gridSize, arg.getInputSize(), threshold, limitedSplit);
if (log.isDebugEnabled()) {
log.debug("Calculated input elements size per node [inputSize=" + arg.getInputSize() + ", gridSize=" + gridSize + ", threshold=" + threshold + ", limitedSplit=" + limitedSplit + ", inputPerNode=" + inputPerNode + ']');
}
}
}
GridifyArgumentBuilder argBuilder = new GridifyArgumentBuilder();
Iterator<?> inputIter = arg.getInputIterator();
while (inputIter.hasNext()) {
Collection<Object> nodeInput = new LinkedList<>();
for (int i = 0; i < inputPerNode && inputIter.hasNext(); i++) nodeInput.add(inputIter.next());
// Create job argument.
GridifyArgument jobArg = argBuilder.createJobArgument(arg, nodeInput);
ComputeJob job = new GridifyJobAdapter(jobArg);
mapper.send(job, balancer.getBalancedNode(job, exclNodes));
}
// Map method can return null because job already sent by continuous mapper.
return null;
}
Aggregations