use of org.elasticsearch.action.update.UpdateRequest in project pancm_project by xuwujing.
the class EsHighLevelRestTest1 method update.
/**
* 更新操作
*
* @throws IOException
*/
private static void update() throws IOException {
String type = "_doc";
String index = "test1";
// 唯一编号
String id = "1";
UpdateRequest upateRequest = new UpdateRequest();
upateRequest.id(id);
upateRequest.index(index);
upateRequest.type(type);
// 依旧可以使用Map这种集合作为更新条件
Map<String, Object> jsonMap = new HashMap<>();
jsonMap.put("uid", 12345);
jsonMap.put("phone", 123456789019L);
jsonMap.put("msgcode", 2);
jsonMap.put("sendtime", "2019-03-14 01:57:04");
jsonMap.put("message", "xuwujing study Elasticsearch");
upateRequest.doc(jsonMap);
// upsert 方法表示如果数据不存在,那么就新增一条
upateRequest.docAsUpsert(true);
client.update(upateRequest, RequestOptions.DEFAULT);
System.out.println("更新成功!");
}
use of org.elasticsearch.action.update.UpdateRequest in project pancm_project by xuwujing.
the class EsHighLevelRestTest1 method bulk.
/**
* 批量操作示例
*
* @throws InterruptedException
*/
private static void bulk() throws IOException, InterruptedException {
// 类型
String type = "_doc";
String index = "student";
BulkRequest request = new BulkRequest();
Map<String, Object> map = new HashMap<>();
map.put("uid", 123);
map.put("age", 11);
map.put("name", "虚无境");
map.put("class", 9);
map.put("grade", 400);
map.put("createtm", "2019-11-04");
map.put("updatetm", "2019-11-05 21:04:55.268");
// 批量新增,存在会直接覆盖
request.add(new IndexRequest(index, type, "1").source(XContentType.JSON, "field", "foo"));
request.add(new IndexRequest(index, type, "2").source(XContentType.JSON, "field", "bar"));
request.add(new IndexRequest(index, type, "3").source(XContentType.JSON, "field", "baz"));
// 可以进行修改/删除/新增 操作
// docAsUpsert 为true表示存在更新,不存在插入,为false表示不存在就是不做更新
request.add(new UpdateRequest(index, type, "2").doc(XContentType.JSON, "field", "test").docAsUpsert(true));
request.add(new DeleteRequest(index, type, "3"));
request.add(new IndexRequest(index, type, "4").source(XContentType.JSON, "field", "baz"));
BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT);
ActionListener<BulkResponse> listener3 = new ActionListener<BulkResponse>() {
@Override
public void onResponse(BulkResponse response) {
System.out.println("====" + response.buildFailureMessage());
}
@Override
public void onFailure(Exception e) {
System.out.println("====---" + e.getMessage());
}
};
client.bulkAsync(request, RequestOptions.DEFAULT, listener3);
// 可以快速检查一个或多个操作是否失败 true是有至少一个失败!
if (bulkResponse.hasFailures()) {
System.out.println("有一个操作失败!");
}
// 对处理结果进行遍历操作并根据不同的操作进行处理
for (BulkItemResponse bulkItemResponse : bulkResponse) {
DocWriteResponse itemResponse = bulkItemResponse.getResponse();
// 操作失败的进行处理
if (bulkItemResponse.isFailed()) {
BulkItemResponse.Failure failure = bulkItemResponse.getFailure();
}
if (bulkItemResponse.getOpType() == DocWriteRequest.OpType.INDEX || bulkItemResponse.getOpType() == DocWriteRequest.OpType.CREATE) {
IndexResponse indexResponse = (IndexResponse) itemResponse;
System.out.println("新增失败!" + indexResponse.toString());
} else if (bulkItemResponse.getOpType() == DocWriteRequest.OpType.UPDATE) {
UpdateResponse updateResponse = (UpdateResponse) itemResponse;
System.out.println("更新失败!" + updateResponse.toString());
} else if (bulkItemResponse.getOpType() == DocWriteRequest.OpType.DELETE) {
DeleteResponse deleteResponse = (DeleteResponse) itemResponse;
System.out.println("删除失败!" + deleteResponse.toString());
}
}
System.out.println("批量执行成功!");
/*
* 批量执行处理器相关示例代码
*/
// 批量处理器的监听器设置
BulkProcessor.Listener listener = new BulkProcessor.Listener() {
// 在执行BulkRequest的每次执行之前调用,这个方法允许知道将要在BulkRequest中执行的操作的数量
@Override
public void beforeBulk(long executionId, BulkRequest request) {
int numberOfActions = request.numberOfActions();
logger.debug("Executing bulk [{}] with {} requests", executionId, numberOfActions);
}
// 在每次执行BulkRequest之后调用,这个方法允许知道BulkResponse是否包含错误
@Override
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
if (response.hasFailures()) {
logger.warn("Bulk [{}] executed with failures", executionId);
} else {
logger.debug("Bulk [{}] completed in {} milliseconds", executionId, response.getTook().getMillis());
}
}
// 如果BulkRequest失败,则调用该方法,该方法允许知道失败
@Override
public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
logger.error("Failed to execute bulk", failure);
}
};
BiConsumer<BulkRequest, ActionListener<BulkResponse>> bulkConsumer = (request2, bulkListener) -> client.bulkAsync(request, RequestOptions.DEFAULT, bulkListener);
// 创建一个批量执行的处理器
BulkProcessor bulkProcessor = BulkProcessor.builder(bulkConsumer, listener).build();
BulkProcessor.Builder builder = BulkProcessor.builder(bulkConsumer, listener);
// 根据当前添加的操作数量设置刷新新批量请求的时间(默认为1000,使用-1禁用它)
builder.setBulkActions(500);
// 根据当前添加的操作大小设置刷新新批量请求的时间(默认为5Mb,使用-1禁用)
builder.setBulkSize(new ByteSizeValue(1L, ByteSizeUnit.MB));
// 设置允许执行的并发请求数量(默认为1,使用0只允许执行单个请求)
builder.setConcurrentRequests(0);
// 设置刷新间隔如果间隔通过,则刷新任何挂起的BulkRequest(默认为未设置)
builder.setFlushInterval(TimeValue.timeValueSeconds(10L));
// 设置一个常量后退策略,该策略最初等待1秒并重试最多3次。
builder.setBackoffPolicy(BackoffPolicy.constantBackoff(TimeValue.timeValueSeconds(1L), 3));
IndexRequest one = new IndexRequest(index, type, "1").source(XContentType.JSON, "title", "In which order are my Elasticsearch queries executed?");
IndexRequest two = new IndexRequest(index, type, "2").source(XContentType.JSON, "title", "Current status and upcoming changes in Elasticsearch");
IndexRequest three = new IndexRequest(index, type, "3").source(XContentType.JSON, "title", "The Future of Federated Search in Elasticsearch");
bulkProcessor.add(one);
bulkProcessor.add(two);
bulkProcessor.add(three);
// 如果所有大容量请求都已完成,则该方法返回true;如果在所有大容量请求完成之前的等待时间已经过去,则返回false
boolean terminated = bulkProcessor.awaitClose(30L, TimeUnit.SECONDS);
System.out.println("请求的响应结果:" + terminated);
}
use of org.elasticsearch.action.update.UpdateRequest in project pancm_project by xuwujing.
the class EsAggregationSearchTest method bulk.
/**
* 批量操作示例
*
* @throws InterruptedException
*/
private static void bulk() throws IOException {
// 类型
String type = "_doc";
String index = "student";
BulkRequest request = new BulkRequest();
int k = 10;
List<Map<String, Object>> mapList = new ArrayList<>();
LocalDateTime ldt = LocalDateTime.now();
for (int i = 1; i <= k; i++) {
Map<String, Object> map = new HashMap<>();
map.put("uid", i);
map.put("age", i);
map.put("name", "虚无境" + (i % 3));
map.put("class", i % 10);
map.put("grade", 400 + i);
map.put("createtm", ldt.plusDays(i).format(DateTimeFormatter.ofPattern("yyyy-MM-dd")));
map.put("updatetm", ldt.plusDays(i).format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS")));
if (i == 5) {
map.put("updatetm", "2019-11-31 21:04:55.268");
}
mapList.add(map);
}
for (int i = 0; i < mapList.size(); i++) {
Map<String, Object> map = mapList.get(i);
String id = map.get("uid").toString();
// 可以进行修改/删除/新增 操作
// docAsUpsert 为true表示存在更新,不存在插入,为false表示不存在就是不做更新
request.add(new UpdateRequest(index, type, id).doc(map, XContentType.JSON).docAsUpsert(true).retryOnConflict(5));
}
client.bulk(request, RequestOptions.DEFAULT);
System.out.println("批量执行成功!");
}
use of org.elasticsearch.action.update.UpdateRequest in project elasticsearch-indexing-proxy by codelibs.
the class IndexingProxyService method dumpRequests.
public void dumpRequests(final int filePosition, ActionListener<String> listener) {
final Path path = dataPath.resolve(String.format(dataFileFormat, filePosition) + IndexingProxyPlugin.DATA_EXTENTION);
if (FileAccessUtils.existsFile(path)) {
try (IndexingProxyStreamInput streamInput = AccessController.doPrivileged((PrivilegedAction<IndexingProxyStreamInput>) () -> {
try {
return new IndexingProxyStreamInput(Files.newInputStream(path), namedWriteableRegistry);
} catch (final IOException e) {
throw new ElasticsearchException("Failed to read " + path.toAbsolutePath(), e);
}
})) {
final StringBuilder buf = new StringBuilder(10000);
while (streamInput.available() > 0) {
final short classType = streamInput.readShort();
switch(classType) {
case RequestUtils.TYPE_DELETE:
DeleteRequest deleteRequest = RequestUtils.createDeleteRequest(client, streamInput, null).request();
buf.append(deleteRequest.toString());
break;
case RequestUtils.TYPE_DELETE_BY_QUERY:
DeleteByQueryRequest deleteByQueryRequest = RequestUtils.createDeleteByQueryRequest(client, streamInput, null).request();
buf.append(deleteByQueryRequest.toString());
buf.append(' ');
buf.append(deleteByQueryRequest.getSearchRequest().toString().replace("\n", ""));
break;
case RequestUtils.TYPE_INDEX:
IndexRequest indexRequest = RequestUtils.createIndexRequest(client, streamInput, null).request();
buf.append(indexRequest.toString());
break;
case RequestUtils.TYPE_UPDATE:
UpdateRequest updateRequest = RequestUtils.createUpdateRequest(client, streamInput, null).request();
buf.append("update {[").append(updateRequest.index()).append("][").append(updateRequest.type()).append("][").append(updateRequest.id()).append("] source[").append(updateRequest.toXContent(JsonXContent.contentBuilder(), ToXContent.EMPTY_PARAMS).string()).append("]}");
break;
case RequestUtils.TYPE_UPDATE_BY_QUERY:
UpdateByQueryRequest updateByQueryRequest = RequestUtils.createUpdateByQueryRequest(client, streamInput, null).request();
buf.append(updateByQueryRequest.toString());
buf.append(' ');
buf.append(updateByQueryRequest.getSearchRequest().toString().replace("\n", ""));
break;
case RequestUtils.TYPE_BULK:
BulkRequest bulkRequest = RequestUtils.createBulkRequest(client, streamInput, null).request();
buf.append("bulk [");
buf.append(bulkRequest.requests().stream().map(req -> {
if (req instanceof UpdateRequest) {
UpdateRequest upreq = (UpdateRequest) req;
try {
return "update {[" + upreq.index() + "][" + upreq.type() + "][" + upreq.id() + "] source[" + upreq.toXContent(JsonXContent.contentBuilder(), ToXContent.EMPTY_PARAMS).string() + "]}";
} catch (IOException e) {
return e.getMessage();
}
} else {
return req.toString();
}
}).collect(Collectors.joining(",")));
buf.append("]");
break;
default:
listener.onFailure(new ElasticsearchException("Unknown request type: " + classType));
}
buf.append('\n');
}
listener.onResponse(buf.toString());
} catch (IOException e) {
listener.onFailure(e);
}
} else {
listener.onFailure(new ElasticsearchException("The data file does not exist: " + dataPath));
}
}
use of org.elasticsearch.action.update.UpdateRequest in project elasticsearch-indexing-proxy by codelibs.
the class RequestUtils method createUpdateRequest.
public static UpdateRequestBuilder createUpdateRequest(final Client client, final StreamInput streamInput, final String index) throws IOException {
final UpdateRequestBuilder builder = client.prepareUpdate();
final UpdateRequest request = builder.request();
request.readFrom(streamInput);
if (index != null) {
request.index(index);
}
return builder;
}
Aggregations