use of org.elasticsearch.action.ActionListener in project crate by crate.
the class BulkRetryCoordinatorTest method testNoPendingOperationsOnFailedExecution.
@Test
public void testNoPendingOperationsOnFailedExecution() throws Exception {
ThreadPool threadPool = mock(ThreadPool.class);
BulkRetryCoordinator coordinator = new BulkRetryCoordinator(threadPool);
BulkRequestExecutor<ShardUpsertRequest> executor = (request, listener) -> {
listener.onFailure(new InterruptedException("Dummy execution failed"));
};
final SettableFuture<ShardResponse> future = SettableFuture.create();
coordinator.retry(shardRequest(), executor, new ActionListener<ShardResponse>() {
@Override
public void onResponse(ShardResponse shardResponse) {
}
@Override
public void onFailure(Throwable e) {
future.set(null);
}
});
ShardResponse response = future.get();
assertNull(response);
assertEquals(0, coordinator.numPendingOperations());
}
use of org.elasticsearch.action.ActionListener in project crate by crate.
the class BulkRetryCoordinatorTest method testParallelSuccessfulExecution.
@Test
public void testParallelSuccessfulExecution() throws Exception {
ThreadPool threadPool = mock(ThreadPool.class);
final BulkRetryCoordinator coordinator = new BulkRetryCoordinator(threadPool);
final BulkRequestExecutor<ShardUpsertRequest> executor = (request, listener) -> {
listener.onResponse(new ShardResponse());
};
final CountDownLatch latch = new CountDownLatch(1000);
ExecutorService executorService = Executors.newFixedThreadPool(10, daemonThreadFactory("DummyThreadPool"));
for (int i = 0; i < 1000; i++) {
executorService.submit(new Runnable() {
@Override
public void run() {
coordinator.retry(shardRequest(), executor, new ActionListener<ShardResponse>() {
@Override
public void onResponse(ShardResponse shardResponse) {
latch.countDown();
}
@Override
public void onFailure(Throwable e) {
}
});
}
});
}
latch.await();
assertEquals(0, coordinator.numPendingOperations());
executorService.awaitTermination(5, TimeUnit.SECONDS);
executorService.shutdown();
}
use of org.elasticsearch.action.ActionListener in project pancm_project by xuwujing.
the class EsHighLevelRestTest1 method delete.
/**
* 删除
*
* @throws IOException
*/
private static void delete() throws IOException {
String type = "_doc";
String index = "test1";
// 唯一编号
String id = "1";
DeleteRequest deleteRequest = new DeleteRequest();
deleteRequest.id(id);
deleteRequest.index(index);
deleteRequest.type(type);
// 设置超时时间
deleteRequest.timeout(TimeValue.timeValueMinutes(2));
// 设置刷新策略"wait_for"
// 保持此请求打开,直到刷新使此请求的内容可以搜索为止。此刷新策略与高索引和搜索吞吐量兼容,但它会导致请求等待响应,直到发生刷新
deleteRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL);
// 同步删除
DeleteResponse deleteResponse = client.delete(deleteRequest, RequestOptions.DEFAULT);
/*
* 异步删除操作
*/
// 进行监听
ActionListener<DeleteResponse> listener = new ActionListener<DeleteResponse>() {
@Override
public void onResponse(DeleteResponse deleteResponse) {
System.out.println("响应:" + deleteResponse);
}
@Override
public void onFailure(Exception e) {
System.out.println("删除监听异常:" + e.getMessage());
}
};
// 异步删除
// client.deleteAsync(deleteRequest, RequestOptions.DEFAULT, listener);
ReplicationResponse.ShardInfo shardInfo = deleteResponse.getShardInfo();
// 如果处理成功碎片的数量少于总碎片的情况,说明还在处理或者处理发生异常
if (shardInfo.getTotal() != shardInfo.getSuccessful()) {
System.out.println("需要处理的碎片总量:" + shardInfo.getTotal());
System.out.println("处理成功的碎片总量:" + shardInfo.getSuccessful());
}
if (shardInfo.getFailed() > 0) {
for (ReplicationResponse.ShardInfo.Failure failure : shardInfo.getFailures()) {
String reason = failure.reason();
}
}
System.out.println("删除成功!");
}
use of org.elasticsearch.action.ActionListener in project pancm_project by xuwujing.
the class EsHighLevelRestTest1 method bulk.
/**
* 批量操作示例
*
* @throws InterruptedException
*/
private static void bulk() throws IOException, InterruptedException {
// 类型
String type = "_doc";
String index = "student";
BulkRequest request = new BulkRequest();
Map<String, Object> map = new HashMap<>();
map.put("uid", 123);
map.put("age", 11);
map.put("name", "虚无境");
map.put("class", 9);
map.put("grade", 400);
map.put("createtm", "2019-11-04");
map.put("updatetm", "2019-11-05 21:04:55.268");
// 批量新增,存在会直接覆盖
request.add(new IndexRequest(index, type, "1").source(XContentType.JSON, "field", "foo"));
request.add(new IndexRequest(index, type, "2").source(XContentType.JSON, "field", "bar"));
request.add(new IndexRequest(index, type, "3").source(XContentType.JSON, "field", "baz"));
// 可以进行修改/删除/新增 操作
// docAsUpsert 为true表示存在更新,不存在插入,为false表示不存在就是不做更新
request.add(new UpdateRequest(index, type, "2").doc(XContentType.JSON, "field", "test").docAsUpsert(true));
request.add(new DeleteRequest(index, type, "3"));
request.add(new IndexRequest(index, type, "4").source(XContentType.JSON, "field", "baz"));
BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT);
ActionListener<BulkResponse> listener3 = new ActionListener<BulkResponse>() {
@Override
public void onResponse(BulkResponse response) {
System.out.println("====" + response.buildFailureMessage());
}
@Override
public void onFailure(Exception e) {
System.out.println("====---" + e.getMessage());
}
};
client.bulkAsync(request, RequestOptions.DEFAULT, listener3);
// 可以快速检查一个或多个操作是否失败 true是有至少一个失败!
if (bulkResponse.hasFailures()) {
System.out.println("有一个操作失败!");
}
// 对处理结果进行遍历操作并根据不同的操作进行处理
for (BulkItemResponse bulkItemResponse : bulkResponse) {
DocWriteResponse itemResponse = bulkItemResponse.getResponse();
// 操作失败的进行处理
if (bulkItemResponse.isFailed()) {
BulkItemResponse.Failure failure = bulkItemResponse.getFailure();
}
if (bulkItemResponse.getOpType() == DocWriteRequest.OpType.INDEX || bulkItemResponse.getOpType() == DocWriteRequest.OpType.CREATE) {
IndexResponse indexResponse = (IndexResponse) itemResponse;
System.out.println("新增失败!" + indexResponse.toString());
} else if (bulkItemResponse.getOpType() == DocWriteRequest.OpType.UPDATE) {
UpdateResponse updateResponse = (UpdateResponse) itemResponse;
System.out.println("更新失败!" + updateResponse.toString());
} else if (bulkItemResponse.getOpType() == DocWriteRequest.OpType.DELETE) {
DeleteResponse deleteResponse = (DeleteResponse) itemResponse;
System.out.println("删除失败!" + deleteResponse.toString());
}
}
System.out.println("批量执行成功!");
/*
* 批量执行处理器相关示例代码
*/
// 批量处理器的监听器设置
BulkProcessor.Listener listener = new BulkProcessor.Listener() {
// 在执行BulkRequest的每次执行之前调用,这个方法允许知道将要在BulkRequest中执行的操作的数量
@Override
public void beforeBulk(long executionId, BulkRequest request) {
int numberOfActions = request.numberOfActions();
logger.debug("Executing bulk [{}] with {} requests", executionId, numberOfActions);
}
// 在每次执行BulkRequest之后调用,这个方法允许知道BulkResponse是否包含错误
@Override
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
if (response.hasFailures()) {
logger.warn("Bulk [{}] executed with failures", executionId);
} else {
logger.debug("Bulk [{}] completed in {} milliseconds", executionId, response.getTook().getMillis());
}
}
// 如果BulkRequest失败,则调用该方法,该方法允许知道失败
@Override
public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
logger.error("Failed to execute bulk", failure);
}
};
BiConsumer<BulkRequest, ActionListener<BulkResponse>> bulkConsumer = (request2, bulkListener) -> client.bulkAsync(request, RequestOptions.DEFAULT, bulkListener);
// 创建一个批量执行的处理器
BulkProcessor bulkProcessor = BulkProcessor.builder(bulkConsumer, listener).build();
BulkProcessor.Builder builder = BulkProcessor.builder(bulkConsumer, listener);
// 根据当前添加的操作数量设置刷新新批量请求的时间(默认为1000,使用-1禁用它)
builder.setBulkActions(500);
// 根据当前添加的操作大小设置刷新新批量请求的时间(默认为5Mb,使用-1禁用)
builder.setBulkSize(new ByteSizeValue(1L, ByteSizeUnit.MB));
// 设置允许执行的并发请求数量(默认为1,使用0只允许执行单个请求)
builder.setConcurrentRequests(0);
// 设置刷新间隔如果间隔通过,则刷新任何挂起的BulkRequest(默认为未设置)
builder.setFlushInterval(TimeValue.timeValueSeconds(10L));
// 设置一个常量后退策略,该策略最初等待1秒并重试最多3次。
builder.setBackoffPolicy(BackoffPolicy.constantBackoff(TimeValue.timeValueSeconds(1L), 3));
IndexRequest one = new IndexRequest(index, type, "1").source(XContentType.JSON, "title", "In which order are my Elasticsearch queries executed?");
IndexRequest two = new IndexRequest(index, type, "2").source(XContentType.JSON, "title", "Current status and upcoming changes in Elasticsearch");
IndexRequest three = new IndexRequest(index, type, "3").source(XContentType.JSON, "title", "The Future of Federated Search in Elasticsearch");
bulkProcessor.add(one);
bulkProcessor.add(two);
bulkProcessor.add(three);
// 如果所有大容量请求都已完成,则该方法返回true;如果在所有大容量请求完成之前的等待时间已经过去,则返回false
boolean terminated = bulkProcessor.awaitClose(30L, TimeUnit.SECONDS);
System.out.println("请求的响应结果:" + terminated);
}
use of org.elasticsearch.action.ActionListener in project elasticsearch-indexing-proxy by codelibs.
the class IndexingProxyService method launchRequestSender.
private void launchRequestSender(final String index, final long filePosition, final long version, final ActionListener<Map<String, Object>> listener) {
if (logger.isDebugEnabled()) {
logger.debug("Launching RequestSender(" + index + ")");
}
final Map<String, Object> source = new HashMap<>();
source.put(IndexingProxyPlugin.NODE_NAME, nodeName());
source.put(IndexingProxyPlugin.FILE_POSITION, filePosition);
source.put(IndexingProxyPlugin.TIMESTAMP, new Date());
source.put(DOC_TYPE, "index");
final IndexRequestBuilder builder = client.prepareIndex(IndexingProxyPlugin.INDEX_NAME, IndexingProxyPlugin.TYPE_NAME, index).setSource(source).setRefreshPolicy(RefreshPolicy.WAIT_UNTIL);
if (version > 0) {
builder.setVersion(version);
} else {
builder.setCreate(true);
}
builder.execute(wrap(res -> {
if (res.getResult() == Result.CREATED || res.getResult() == Result.UPDATED) {
final RequestSender sender = new RequestSender(settings, client, threadPool, namedWriteableRegistry, nodeName(), dataPath, index, dataFileFormat, docSenderMap, logger);
final RequestSender oldSender = docSenderMap.put(index, sender);
if (oldSender != null) {
oldSender.terminate();
}
threadPool.schedule(TimeValue.ZERO, Names.GENERIC, sender);
listener.onResponse(source);
} else {
listener.onFailure(new ElasticsearchException("Failed to update .idxproxy index: " + res));
}
}, listener::onFailure));
}
Aggregations