use of org.elasticsearch.ElasticSearchException in project fess by codelibs.
the class FessEsClient method delete.
public boolean delete(final String index, final String type, final String id, final long version) {
try {
final DeleteRequestBuilder builder = client.prepareDelete(index, type, id).setRefreshPolicy(RefreshPolicy.IMMEDIATE);
if (version > 0) {
builder.setVersion(version);
}
final DeleteResponse response = builder.execute().actionGet(ComponentUtil.getFessConfig().getIndexDeleteTimeout());
return response.getResult() == Result.DELETED;
} catch (final ElasticsearchException e) {
throw new FessEsClientException("Failed to delete: " + index + "/" + type + "/" + id + "/" + version, e);
}
}
use of org.elasticsearch.ElasticSearchException in project spoon by INRIA.
the class HunspellService method loadDictionary.
/**
* Loads the hunspell dictionary for the given local.
*
* @param locale The locale of the hunspell dictionary to be loaded.
* @param nodeSettings The node level settings
* @param env The node environment (from which the conf path will be resolved)
* @return The loaded Hunspell dictionary
* @throws Exception when loading fails (due to IO errors or malformed dictionary files)
*/
private Dictionary loadDictionary(String locale, Settings nodeSettings, Environment env) throws Exception {
if (logger.isDebugEnabled()) {
logger.debug("Loading hunspell dictionary [{}]...", locale);
}
Path dicDir = hunspellDir.resolve(locale);
if (FileSystemUtils.isAccessibleDirectory(dicDir, logger) == false) {
throw new ElasticsearchException(String.format(Locale.ROOT, "Could not find hunspell dictionary [%s]", locale));
}
// merging node settings with hunspell dictionary specific settings
Settings dictSettings = HUNSPELL_DICTIONARY_OPTIONS.get(nodeSettings);
nodeSettings = loadDictionarySettings(dicDir, dictSettings.getByPrefix(locale + "."));
boolean ignoreCase = nodeSettings.getAsBoolean("ignore_case", defaultIgnoreCase);
Path[] affixFiles = FileSystemUtils.files(dicDir, "*.aff");
if (affixFiles.length == 0) {
throw new ElasticsearchException(String.format(Locale.ROOT, "Missing affix file for hunspell dictionary [%s]", locale));
}
if (affixFiles.length != 1) {
throw new ElasticsearchException(String.format(Locale.ROOT, "Too many affix files exist for hunspell dictionary [%s]", locale));
}
InputStream affixStream = null;
Path[] dicFiles = FileSystemUtils.files(dicDir, "*.dic");
List<InputStream> dicStreams = new ArrayList<>(dicFiles.length);
try {
for (int i = 0; i < dicFiles.length; i++) {
dicStreams.add(Files.newInputStream(dicFiles[i]));
}
affixStream = Files.newInputStream(affixFiles[0]);
try (Directory tmp = new SimpleFSDirectory(env.tmpFile())) {
return new Dictionary(tmp, "hunspell", affixStream, dicStreams, ignoreCase);
}
} catch (Exception e) {
logger.error("Could not load hunspell dictionary [{}]", e, locale);
throw e;
} finally {
IOUtils.close(affixStream);
IOUtils.close(dicStreams);
}
}
use of org.elasticsearch.ElasticSearchException in project elasticsearch-indexing-proxy by codelibs.
the class ProxyActionFilter method getExecutor.
@SuppressWarnings("unchecked")
private <Request extends ActionRequest, Response extends ActionResponse> Supplier<Response> getExecutor(final Task task, final String action, final Request request) {
if (BulkAction.NAME.equals(action)) {
final long startTime = System.nanoTime();
int count = 0;
final BulkRequest req = (BulkRequest) request;
for (final DocWriteRequest<?> subReq : req.requests()) {
if (indexingProxyService.isTargetIndex(subReq.index())) {
count++;
}
}
if (count == 0) {
return null;
} else if (count != req.requests().size()) {
throw new ElasticsearchException("Mixed target requests. ({} != {})", count, req.requests().size());
}
return () -> {
final List<BulkItemResponse> responseList = new ArrayList<>(req.requests().size());
for (int i = 0; i < req.requests().size(); i++) {
final DocWriteRequest<?> dwr = req.requests().get(i);
if (dwr instanceof IndexRequest) {
final IndexRequest r = (IndexRequest) dwr;
final String id = r.id() == null ? INDEX_UUID : r.id();
final IndexResponse response = new IndexResponse(new ShardId(new Index(r.index(), INDEX_UUID), 0), r.type(), id, r.version(), true);
responseList.add(new BulkItemResponse(i, r.opType(), response));
} else if (dwr instanceof UpdateRequest) {
final UpdateRequest r = (UpdateRequest) dwr;
final String id = r.id() == null ? INDEX_UUID : r.id();
final UpdateResponse response = new UpdateResponse(new ShardId(new Index(r.index(), INDEX_UUID), 0), r.type(), id, r.version(), Result.CREATED);
responseList.add(new BulkItemResponse(i, r.opType(), response));
} else if (dwr instanceof DeleteRequest) {
final DeleteRequest r = (DeleteRequest) dwr;
final String id = r.id() == null ? INDEX_UUID : r.id();
final DeleteResponse response = new DeleteResponse(new ShardId(new Index(r.index(), INDEX_UUID), 0), r.type(), id, r.version(), true);
response.setShardInfo(new ReplicationResponse.ShardInfo(1, 1, ReplicationResponse.EMPTY));
responseList.add(new BulkItemResponse(i, r.opType(), response));
} else {
responseList.add(new BulkItemResponse(i, dwr.opType(), new BulkItemResponse.Failure(dwr.index(), dwr.type(), dwr.id(), new ElasticsearchException("Unknown request: " + dwr))));
}
}
return (Response) new BulkResponse(responseList.toArray(new BulkItemResponse[responseList.size()]), (System.nanoTime() - startTime) / 1000000);
};
} else if (DeleteAction.NAME.equals(action)) {
final DeleteRequest req = (DeleteRequest) request;
if (!indexingProxyService.isTargetIndex(req.index())) {
return null;
}
return () -> {
final String id = req.id() == null ? INDEX_UUID : req.id();
final DeleteResponse res = new DeleteResponse(new ShardId(new Index(req.index(), INDEX_UUID), 0), req.type(), id, req.version(), true);
res.setShardInfo(new ReplicationResponse.ShardInfo(1, 1, ReplicationResponse.EMPTY));
return (Response) res;
};
} else if (DeleteByQueryAction.NAME.equals(action)) {
final long startTime = System.nanoTime();
int count = 0;
final DeleteByQueryRequest req = (DeleteByQueryRequest) request;
for (final String index : req.indices()) {
if (indexingProxyService.isTargetIndex(index)) {
count++;
}
}
if (count == 0) {
return null;
} else if (count != req.indices().length) {
throw new ElasticsearchException("Mixed target requests. ({} != {})", count, req.indices().length);
}
return () -> {
return (Response) new BulkByScrollResponse(TimeValue.timeValueNanos(System.nanoTime() - startTime), new BulkByScrollTask.Status(null, 0, 0, 0, 0, 0, 0, 0, 0, 0, TimeValue.ZERO, 0, null, TimeValue.ZERO), Collections.emptyList(), Collections.emptyList(), false);
};
} else if (IndexAction.NAME.equals(action)) {
final IndexRequest req = (IndexRequest) request;
if (!indexingProxyService.isTargetIndex(req.index())) {
return null;
}
return () -> {
final String id = req.id() == null ? INDEX_UUID : req.id();
return (Response) new IndexResponse(new ShardId(new Index(req.index(), INDEX_UUID), 0), req.type(), id, req.version(), true);
};
} else if (UpdateAction.NAME.equals(action)) {
final UpdateRequest req = (UpdateRequest) request;
if (!indexingProxyService.isTargetIndex(req.index())) {
return null;
}
return () -> {
final String id = req.id() == null ? INDEX_UUID : req.id();
return (Response) new UpdateResponse(new ShardId(new Index(req.index(), INDEX_UUID), 0), req.type(), id, req.version(), Result.CREATED);
};
} else if (UpdateByQueryAction.NAME.equals(action)) {
final long startTime = System.nanoTime();
int count = 0;
final UpdateByQueryRequest req = (UpdateByQueryRequest) request;
for (final String index : req.indices()) {
if (indexingProxyService.isTargetIndex(index)) {
count++;
}
}
if (count == 0) {
return null;
} else if (count != req.indices().length) {
throw new ElasticsearchException("Mixed target requests. ({} != {})", count, req.indices().length);
}
return () -> {
return (Response) new BulkByScrollResponse(TimeValue.timeValueNanos(System.nanoTime() - startTime), new BulkByScrollTask.Status(null, 0, 0, 0, 0, 0, 0, 0, 0, 0, TimeValue.ZERO, 0, null, TimeValue.ZERO), Collections.emptyList(), Collections.emptyList(), false);
};
}
return null;
}
use of org.elasticsearch.ElasticSearchException in project elasticsearch-indexing-proxy by codelibs.
the class RestIndexingProxyProcessAction method sendResponse.
protected void sendResponse(final RestChannel channel, final Map<String, Object> params, final boolean pretty) {
try {
final XContentBuilder builder = JsonXContent.contentBuilder();
if (pretty) {
builder.prettyPrint();
}
builder.startObject();
builder.field("acknowledged", true);
if (params != null) {
for (final Map.Entry<String, Object> entry : params.entrySet()) {
builder.field(entry.getKey(), entry.getValue());
}
}
builder.endObject();
channel.sendResponse(new BytesRestResponse(OK, builder));
} catch (final IOException e) {
throw new ElasticsearchException("Failed to create a resposne.", e);
}
}
use of org.elasticsearch.ElasticSearchException in project elasticsearch-indexing-proxy by codelibs.
the class RequestSender method processRequests.
private void processRequests(final StreamInput streamInput) {
heartbeat = System.currentTimeMillis();
if (terminated) {
IOUtils.closeQuietly(streamInput);
logger.warn("[Sender][" + index + "] Terminate DocIndexer.");
return;
}
requestPosition++;
try {
if (logger.isDebugEnabled()) {
logger.debug("RequestSender(" + index + ") is processing requests.");
}
if (streamInput.available() > 0) {
final short classType = streamInput.readShort();
switch(classType) {
case RequestUtils.TYPE_DELETE:
processDeleteRequest(streamInput);
break;
case RequestUtils.TYPE_DELETE_BY_QUERY:
processDeleteByQueryRequest(streamInput);
break;
case RequestUtils.TYPE_INDEX:
processIndexRequest(streamInput);
break;
case RequestUtils.TYPE_UPDATE:
processUpdateRequest(streamInput);
break;
case RequestUtils.TYPE_UPDATE_BY_QUERY:
processUpdateByQueryRequest(streamInput);
break;
case RequestUtils.TYPE_BULK:
processBulkRequest(streamInput);
break;
default:
throw new ElasticsearchException("Unknown request type: " + classType);
}
} else {
IOUtils.closeQuietly(streamInput);
long fileSize = 0;
if (FileAccessUtils.existsFile(path)) {
fileSize = AccessController.doPrivileged((PrivilegedAction<Long>) () -> {
try {
return Files.size(path);
} catch (final IOException e) {
throw new ElasticsearchException("Failed to read " + path.toAbsolutePath(), e);
}
});
}
logger.info("[Sender][{}] Indexed: {} {} {}", index, path.toAbsolutePath(), requestPosition - 1, fileSize);
processNext(getNextValue(filePosition));
}
} catch (final Exception e) {
IOUtils.closeQuietly(streamInput);
retryWithError("Failed to access streamInput.", e);
// retry
}
}
Aggregations