use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.index.IndexResponse in project storm-elastic-search by hmsonline.
the class ElasticSearchBolt method execute.
@Override
public void execute(Tuple tuple) {
String id = null;
String indexName = null;
String type = null;
String document = null;
try {
id = this.tupleMapper.mapToId(tuple);
indexName = this.tupleMapper.mapToIndex(tuple);
type = this.tupleMapper.mapToType(tuple);
document = this.tupleMapper.mapToDocument(tuple);
byte[] byteBuffer = document.getBytes();
IndexResponse response = this.client.prepareIndex(indexName, type, id).setSource(byteBuffer).execute().actionGet();
LOG.debug("Indexed Document[ " + id + "], Type[" + type + "], Index[" + indexName + "], Version [" + response.getVersion() + "]");
collector.ack(tuple);
} catch (Exception e) {
LOG.error("Unable to index Document[ " + id + "], Type[" + type + "], Index[" + indexName + "]", e);
collector.ack(tuple);
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.index.IndexResponse in project pancm_project by xuwujing.
the class EsHighLevelRestTest1 method bulk.
/**
* 批量操作示例
*
* @throws InterruptedException
*/
private static void bulk() throws IOException, InterruptedException {
// 类型
String type = "_doc";
String index = "student";
BulkRequest request = new BulkRequest();
Map<String, Object> map = new HashMap<>();
map.put("uid", 123);
map.put("age", 11);
map.put("name", "虚无境");
map.put("class", 9);
map.put("grade", 400);
map.put("createtm", "2019-11-04");
map.put("updatetm", "2019-11-05 21:04:55.268");
// 批量新增,存在会直接覆盖
request.add(new IndexRequest(index, type, "1").source(XContentType.JSON, "field", "foo"));
request.add(new IndexRequest(index, type, "2").source(XContentType.JSON, "field", "bar"));
request.add(new IndexRequest(index, type, "3").source(XContentType.JSON, "field", "baz"));
// 可以进行修改/删除/新增 操作
// docAsUpsert 为true表示存在更新,不存在插入,为false表示不存在就是不做更新
request.add(new UpdateRequest(index, type, "2").doc(XContentType.JSON, "field", "test").docAsUpsert(true));
request.add(new DeleteRequest(index, type, "3"));
request.add(new IndexRequest(index, type, "4").source(XContentType.JSON, "field", "baz"));
BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT);
ActionListener<BulkResponse> listener3 = new ActionListener<BulkResponse>() {
@Override
public void onResponse(BulkResponse response) {
System.out.println("====" + response.buildFailureMessage());
}
@Override
public void onFailure(Exception e) {
System.out.println("====---" + e.getMessage());
}
};
client.bulkAsync(request, RequestOptions.DEFAULT, listener3);
// 可以快速检查一个或多个操作是否失败 true是有至少一个失败!
if (bulkResponse.hasFailures()) {
System.out.println("有一个操作失败!");
}
// 对处理结果进行遍历操作并根据不同的操作进行处理
for (BulkItemResponse bulkItemResponse : bulkResponse) {
DocWriteResponse itemResponse = bulkItemResponse.getResponse();
// 操作失败的进行处理
if (bulkItemResponse.isFailed()) {
BulkItemResponse.Failure failure = bulkItemResponse.getFailure();
}
if (bulkItemResponse.getOpType() == DocWriteRequest.OpType.INDEX || bulkItemResponse.getOpType() == DocWriteRequest.OpType.CREATE) {
IndexResponse indexResponse = (IndexResponse) itemResponse;
System.out.println("新增失败!" + indexResponse.toString());
} else if (bulkItemResponse.getOpType() == DocWriteRequest.OpType.UPDATE) {
UpdateResponse updateResponse = (UpdateResponse) itemResponse;
System.out.println("更新失败!" + updateResponse.toString());
} else if (bulkItemResponse.getOpType() == DocWriteRequest.OpType.DELETE) {
DeleteResponse deleteResponse = (DeleteResponse) itemResponse;
System.out.println("删除失败!" + deleteResponse.toString());
}
}
System.out.println("批量执行成功!");
/*
* 批量执行处理器相关示例代码
*/
// 批量处理器的监听器设置
BulkProcessor.Listener listener = new BulkProcessor.Listener() {
// 在执行BulkRequest的每次执行之前调用,这个方法允许知道将要在BulkRequest中执行的操作的数量
@Override
public void beforeBulk(long executionId, BulkRequest request) {
int numberOfActions = request.numberOfActions();
logger.debug("Executing bulk [{}] with {} requests", executionId, numberOfActions);
}
// 在每次执行BulkRequest之后调用,这个方法允许知道BulkResponse是否包含错误
@Override
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
if (response.hasFailures()) {
logger.warn("Bulk [{}] executed with failures", executionId);
} else {
logger.debug("Bulk [{}] completed in {} milliseconds", executionId, response.getTook().getMillis());
}
}
// 如果BulkRequest失败,则调用该方法,该方法允许知道失败
@Override
public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
logger.error("Failed to execute bulk", failure);
}
};
BiConsumer<BulkRequest, ActionListener<BulkResponse>> bulkConsumer = (request2, bulkListener) -> client.bulkAsync(request, RequestOptions.DEFAULT, bulkListener);
// 创建一个批量执行的处理器
BulkProcessor bulkProcessor = BulkProcessor.builder(bulkConsumer, listener).build();
BulkProcessor.Builder builder = BulkProcessor.builder(bulkConsumer, listener);
// 根据当前添加的操作数量设置刷新新批量请求的时间(默认为1000,使用-1禁用它)
builder.setBulkActions(500);
// 根据当前添加的操作大小设置刷新新批量请求的时间(默认为5Mb,使用-1禁用)
builder.setBulkSize(new ByteSizeValue(1L, ByteSizeUnit.MB));
// 设置允许执行的并发请求数量(默认为1,使用0只允许执行单个请求)
builder.setConcurrentRequests(0);
// 设置刷新间隔如果间隔通过,则刷新任何挂起的BulkRequest(默认为未设置)
builder.setFlushInterval(TimeValue.timeValueSeconds(10L));
// 设置一个常量后退策略,该策略最初等待1秒并重试最多3次。
builder.setBackoffPolicy(BackoffPolicy.constantBackoff(TimeValue.timeValueSeconds(1L), 3));
IndexRequest one = new IndexRequest(index, type, "1").source(XContentType.JSON, "title", "In which order are my Elasticsearch queries executed?");
IndexRequest two = new IndexRequest(index, type, "2").source(XContentType.JSON, "title", "Current status and upcoming changes in Elasticsearch");
IndexRequest three = new IndexRequest(index, type, "3").source(XContentType.JSON, "title", "The Future of Federated Search in Elasticsearch");
bulkProcessor.add(one);
bulkProcessor.add(two);
bulkProcessor.add(three);
// 如果所有大容量请求都已完成,则该方法返回true;如果在所有大容量请求完成之前的等待时间已经过去,则返回false
boolean terminated = bulkProcessor.awaitClose(30L, TimeUnit.SECONDS);
System.out.println("请求的响应结果:" + terminated);
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.index.IndexResponse in project elasticsearch-river-rabbitmq by elastic.
the class RabbitMQIntegrationTest method launchTest.
private void launchTest(XContentBuilder river, final int numMessages, final int numDocsPerMessage, InjectorHook injectorHook, boolean delete, boolean update) throws Exception {
final String dbName = getDbName();
logger.info(" --> create index [{}]", dbName);
try {
client().admin().indices().prepareDelete(dbName).get();
} catch (IndexMissingException e) {
// No worries.
}
try {
createIndex(dbName);
} catch (IndexMissingException e) {
// No worries.
}
ensureGreen(dbName);
logger.info(" -> Checking rabbitmq running");
// We try to connect to RabbitMQ.
// If it's not launched, we don't fail the test but only log it
Channel channel = null;
Connection connection = null;
try {
logger.info(" --> connecting to rabbitmq");
ConnectionFactory factory = new ConnectionFactory();
factory.setHost("localhost");
factory.setPort(AMQP.PROTOCOL.PORT);
connection = factory.newConnection();
} catch (ConnectException ce) {
throw new Exception("RabbitMQ service is not launched on localhost:" + AMQP.PROTOCOL.PORT + ". Can not start Integration test. " + "Launch `rabbitmq-server`.", ce);
}
try {
logger.info(" -> Creating [{}] channel", dbName);
channel = connection.createChannel();
logger.info(" -> Creating queue [{}]", dbName);
channel.queueDeclare(getDbName(), true, false, false, null);
// We purge the queue in case of something is remaining there
logger.info(" -> Purging [{}] channel", dbName);
channel.queuePurge(getDbName());
logger.info(" -> Put [{}] messages with [{}] documents each = [{}] docs", numMessages, numDocsPerMessage, numMessages * numDocsPerMessage);
final Set<String> removed = new HashSet<String>();
int nbUpdated = 0;
for (int i = 0; i < numMessages; i++) {
StringBuffer message = new StringBuffer();
for (int j = 0; j < numDocsPerMessage; j++) {
if (logger.isTraceEnabled()) {
logger.trace(" -> Indexing document [{}] - [{}][{}]", i + "_" + j, i, j);
}
message.append("{ \"index\" : { \"_index\" : \"" + dbName + "\", \"_type\" : \"typex\", \"_id\" : \"" + i + "_" + j + "\" } }\n");
message.append("{ \"field\" : \"" + i + "_" + j + "\",\"numeric\" : " + i * j + " }\n");
// Sometime we update a document
if (update && rarely()) {
String id = between(0, i) + "_" + between(0, j);
// We can only update if it has not been removed :)
if (!removed.contains(id)) {
logger.debug(" -> Updating document [{}] - [{}][{}]", id, i, j);
message.append("{ \"update\" : { \"_index\" : \"" + dbName + "\", \"_type\" : \"typex\", \"_id\" : \"" + id + "\" } }\n");
message.append("{ \"doc\": { \"foo\" : \"bar\", \"field2\" : \"" + i + "_" + j + "\" }}\n");
nbUpdated++;
}
}
// Sometime we delete a document
if (delete && rarely()) {
String id = between(0, i) + "_" + between(0, j);
if (!removed.contains(id)) {
logger.debug(" -> Removing document [{}] - [{}][{}]", id, i, j);
message.append("{ \"delete\" : { \"_index\" : \"" + dbName + "\", \"_type\" : \"typex\", \"_id\" : \"" + id + "\" } }\n");
removed.add(id);
}
}
}
channel.basicPublish("", dbName, null, message.toString().getBytes(StandardCharsets.UTF_8));
}
logger.info(" -> We removed [{}] docs and updated [{}] docs", removed.size(), nbUpdated);
if (injectorHook != null) {
logger.info(" -> Injecting extra data");
injectorHook.inject();
}
logger.info(" --> create river");
IndexResponse indexResponse = index("_river", dbName, "_meta", river);
assertTrue(indexResponse.isCreated());
logger.info("--> checking that river [{}] was created", dbName);
assertThat(awaitBusy(new Predicate<Object>() {
public boolean apply(Object obj) {
GetResponse response = client().prepareGet(RiverIndexName.Conf.DEFAULT_INDEX_NAME, dbName, "_status").get();
return response.isExists();
}
}, 5, TimeUnit.SECONDS), equalTo(true));
// Check that docs are still processed by the river
logger.info(" --> waiting for expected number of docs: [{}]", numDocsPerMessage * numMessages - removed.size());
assertThat(awaitBusy(new Predicate<Object>() {
public boolean apply(Object obj) {
try {
refresh();
int expected = numDocsPerMessage * numMessages - removed.size();
CountResponse response = client().prepareCount(dbName).get();
logger.debug(" -> got {} docs, expected {}", response.getCount(), expected);
return response.getCount() == expected;
} catch (IndexMissingException e) {
return false;
}
}
}, 20, TimeUnit.SECONDS), equalTo(true));
} finally {
if (channel != null && channel.isOpen()) {
channel.close();
}
if (connection != null && connection.isOpen()) {
connection.close();
}
// Deletes the river
GetResponse response = client().prepareGet(RiverIndexName.Conf.DEFAULT_INDEX_NAME, dbName, "_status").get();
if (response.isExists()) {
client().prepareDelete(RiverIndexName.Conf.DEFAULT_INDEX_NAME, dbName, "_meta").get();
client().prepareDelete(RiverIndexName.Conf.DEFAULT_INDEX_NAME, dbName, "_status").get();
}
assertThat(awaitBusy(new Predicate<Object>() {
public boolean apply(Object obj) {
GetResponse response = client().prepareGet(RiverIndexName.Conf.DEFAULT_INDEX_NAME, dbName, "_status").get();
return response.isExists();
}
}, 5, TimeUnit.SECONDS), equalTo(false));
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.index.IndexResponse in project baseio by generallycloud.
the class TestPut method test.
@SuppressWarnings("resource")
public static void test() throws Exception {
Settings esSettings = Settings.builder().build();
/*
这里的连接方式指的是没有安装x-pack插件,如果安装了x-pack则参考{@link ElasticsearchXPackClient}
1. java客户端的方式是以tcp协议在9300端口上进行通信
2. http客户端的方式是以http协议在9200端口上进行通信
*/
TransportClient client = new PreBuiltTransportClient(esSettings).addTransportAddress(new TransportAddress(InetAddress.getByName("localhost"), 9300));
System.out.println("ElasticsearchClient 连接成功");
String index = "twitter";
IndexResponse putResponse = client.prepareIndex(index, "tweet", "1").setSource(jsonBuilder().startObject().field("user", "kimchy").field("postDate", new Date()).field("message", "trying out Elasticsearch").endObject()).get();
// Index name
String _index = putResponse.getIndex();
// Type name
String _type = putResponse.getType();
// Document ID (generated or not)
String _id = putResponse.getId();
// Version (if it's the first time you index this document, you will get: 1)
long _version = putResponse.getVersion();
// status has stored current instance statement.
RestStatus status = putResponse.status();
System.out.println(_index);
System.out.println(_type);
System.out.println(_id);
System.out.println(_version);
System.out.println(status);
GetRequest getRequest = new GetRequest("twitter", _type, _id);
GetResponse getResponse = client.get(getRequest).get();
System.out.println(getResponse.getSource());
client.close();
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.index.IndexResponse in project bw-calendar-engine by Bedework.
the class BwIndexEsImpl method indexEvent.
private IndexResponse indexEvent(final EventInfo ei) throws CalFacadeException {
try {
/* If it's not recurring or a stand-alone instance index it */
final BwEvent ev = ei.getEvent();
if (!ev.testRecurring() && (ev.getRecurrenceId() == null)) {
return indexEvent(ei, ItemKind.master, ev.getDtstart(), ev.getDtend(), // ev.getRecurrenceId(),
null, null);
}
if (ev.getRecurrenceId() != null) {
error("Not implemented - index of single override");
return null;
}
/* Delete all instances of this event: we'll do a delete by query
* We need to find all with the same path and uid.
*/
/* TODO - do a query for all recurrence ids and delete the ones
we don't want.
*/
deleteEvent(ei);
/* Create a list of all instance date/times before overrides. */
final int maxYears;
final int maxInstances;
final DateLimits dl = new DateLimits();
if (ev.getPublick()) {
maxYears = unauthpars.getMaxYears();
maxInstances = unauthpars.getMaxInstances();
} else {
maxYears = authpars.getMaxYears();
maxInstances = authpars.getMaxInstances();
}
final RecurPeriods rp = RecurUtil.getPeriods(ev, maxYears, maxInstances);
if (rp.instances.isEmpty()) {
// No instances for an alleged recurring event.
return null;
// throw new CalFacadeException(CalFacadeException.noRecurrenceInstances);
}
final String stzid = ev.getDtstart().getTzid();
int instanceCt = maxInstances;
final boolean dateOnly = ev.getDtstart().getDateType();
/* First build a table of overrides so we can skip these later
*/
final Map<String, String> overrides = new HashMap<>();
/*
if (!Util.isEmpty(ei.getOverrideProxies())) {
for (BwEvent ov: ei.getOverrideProxies()) {
overrides.put(ov.getRecurrenceId(), ov.getRecurrenceId());
}
}
*/
final IndexResponse iresp;
if (!Util.isEmpty(ei.getOverrides())) {
for (final EventInfo oei : ei.getOverrides()) {
final BwEvent ov = oei.getEvent();
overrides.put(ov.getRecurrenceId(), ov.getRecurrenceId());
final String start;
if (ov.getDtstart().getDateType()) {
start = ov.getRecurrenceId().substring(0, 8);
} else {
start = ov.getRecurrenceId();
}
final BwDateTime rstart = BwDateTime.makeBwDateTime(ov.getDtstart().getDateType(), start, stzid);
final BwDateTime rend = rstart.addDuration(BwDuration.makeDuration(ov.getDuration()));
/*iresp = */
indexEvent(oei, ItemKind.override, rstart, rend, ov.getRecurrenceId(), dl);
instanceCt--;
}
}
for (final Period p : rp.instances) {
String dtval = p.getStart().toString();
if (dateOnly) {
dtval = dtval.substring(0, 8);
}
final BwDateTime rstart = BwDateTime.makeBwDateTime(dateOnly, dtval, stzid);
if (overrides.get(rstart.getDate()) != null) {
// Overrides indexed separately - skip this instance.
continue;
}
final String recurrenceId = rstart.getDate();
dtval = p.getEnd().toString();
if (dateOnly) {
dtval = dtval.substring(0, 8);
}
final BwDateTime rend = BwDateTime.makeBwDateTime(dateOnly, dtval, stzid);
/*iresp = */
indexEvent(ei, entity, rstart, rend, recurrenceId, dl);
instanceCt--;
if (instanceCt == 0) {
// That's all you're getting from me
break;
}
}
// </editor-fold>
// <editor-fold desc="Emit the master event with a date range covering the entire period.">
final BwDateTime start = BwDateTime.makeBwDateTime(dateOnly, dl.minStart, stzid);
final BwDateTime end = BwDateTime.makeBwDateTime(dateOnly, dl.maxEnd, stzid);
iresp = indexEvent(ei, ItemKind.master, start, end, null, null);
return iresp;
} catch (final CalFacadeException cfe) {
throw cfe;
} catch (final Throwable t) {
throw new CalFacadeException(t);
}
}
Aggregations