use of org.elasticsearch.common.base.Predicate in project rssriver by dadoonet.
the class RssRiverAllParametersTest method existSomeDocs.
private void existSomeDocs(final String index, final String source) throws InterruptedException {
// We wait up to 5 seconds before considering a failing test
assertThat("Some documents should exist...", awaitBusy(new Predicate<Object>() {
@Override
public boolean apply(Object o) {
QueryBuilder query;
if (source == null) {
query = QueryBuilders.matchAllQuery();
} else {
query = QueryBuilders.queryString(source).defaultField("feedname");
}
CountResponse response = client().prepareCount(index).setQuery(query).execute().actionGet();
return response.getCount() > 0;
}
}, 10, TimeUnit.SECONDS), equalTo(true));
}
use of org.elasticsearch.common.base.Predicate in project rssriver by dadoonet.
the class RssRiverAllParametersTest method waitForChange.
private void waitForChange(final String riverName, final String lastupdate_id) throws InterruptedException {
String date = "";
GetResponse getResponse = client().prepareGet("_river", riverName, lastupdate_id).setFields("rss." + lastupdate_id).execute().actionGet();
if (getResponse.isExists()) {
date = getResponse.getField("rss." + lastupdate_id).getValue().toString();
}
final String finalDate = date;
assertThat("Date should have changed " + date, awaitBusy(new Predicate<Object>() {
@Override
public boolean apply(Object o) {
GetResponse getResponse = client().prepareGet("_river", riverName, lastupdate_id).setFields("rss." + lastupdate_id).execute().actionGet();
String new_date = "";
if (getResponse.isExists() && getResponse.getField("rss." + lastupdate_id) != null) {
new_date = getResponse.getField("rss." + lastupdate_id).getValue().toString();
}
return !finalDate.equals(new_date);
}
}, 10, TimeUnit.SECONDS), equalTo(true));
}
use of org.elasticsearch.common.base.Predicate in project elasticsearch-river-rabbitmq by elastic.
the class RabbitMQIntegrationTest method launchTest.
private void launchTest(XContentBuilder river, final int numMessages, final int numDocsPerMessage, InjectorHook injectorHook, boolean delete, boolean update) throws Exception {
final String dbName = getDbName();
logger.info(" --> create index [{}]", dbName);
try {
client().admin().indices().prepareDelete(dbName).get();
} catch (IndexMissingException e) {
// No worries.
}
try {
createIndex(dbName);
} catch (IndexMissingException e) {
// No worries.
}
ensureGreen(dbName);
logger.info(" -> Checking rabbitmq running");
// We try to connect to RabbitMQ.
// If it's not launched, we don't fail the test but only log it
Channel channel = null;
Connection connection = null;
try {
logger.info(" --> connecting to rabbitmq");
ConnectionFactory factory = new ConnectionFactory();
factory.setHost("localhost");
factory.setPort(AMQP.PROTOCOL.PORT);
connection = factory.newConnection();
} catch (ConnectException ce) {
throw new Exception("RabbitMQ service is not launched on localhost:" + AMQP.PROTOCOL.PORT + ". Can not start Integration test. " + "Launch `rabbitmq-server`.", ce);
}
try {
logger.info(" -> Creating [{}] channel", dbName);
channel = connection.createChannel();
logger.info(" -> Creating queue [{}]", dbName);
channel.queueDeclare(getDbName(), true, false, false, null);
// We purge the queue in case of something is remaining there
logger.info(" -> Purging [{}] channel", dbName);
channel.queuePurge(getDbName());
logger.info(" -> Put [{}] messages with [{}] documents each = [{}] docs", numMessages, numDocsPerMessage, numMessages * numDocsPerMessage);
final Set<String> removed = new HashSet<String>();
int nbUpdated = 0;
for (int i = 0; i < numMessages; i++) {
StringBuffer message = new StringBuffer();
for (int j = 0; j < numDocsPerMessage; j++) {
if (logger.isTraceEnabled()) {
logger.trace(" -> Indexing document [{}] - [{}][{}]", i + "_" + j, i, j);
}
message.append("{ \"index\" : { \"_index\" : \"" + dbName + "\", \"_type\" : \"typex\", \"_id\" : \"" + i + "_" + j + "\" } }\n");
message.append("{ \"field\" : \"" + i + "_" + j + "\",\"numeric\" : " + i * j + " }\n");
// Sometime we update a document
if (update && rarely()) {
String id = between(0, i) + "_" + between(0, j);
// We can only update if it has not been removed :)
if (!removed.contains(id)) {
logger.debug(" -> Updating document [{}] - [{}][{}]", id, i, j);
message.append("{ \"update\" : { \"_index\" : \"" + dbName + "\", \"_type\" : \"typex\", \"_id\" : \"" + id + "\" } }\n");
message.append("{ \"doc\": { \"foo\" : \"bar\", \"field2\" : \"" + i + "_" + j + "\" }}\n");
nbUpdated++;
}
}
// Sometime we delete a document
if (delete && rarely()) {
String id = between(0, i) + "_" + between(0, j);
if (!removed.contains(id)) {
logger.debug(" -> Removing document [{}] - [{}][{}]", id, i, j);
message.append("{ \"delete\" : { \"_index\" : \"" + dbName + "\", \"_type\" : \"typex\", \"_id\" : \"" + id + "\" } }\n");
removed.add(id);
}
}
}
channel.basicPublish("", dbName, null, message.toString().getBytes(StandardCharsets.UTF_8));
}
logger.info(" -> We removed [{}] docs and updated [{}] docs", removed.size(), nbUpdated);
if (injectorHook != null) {
logger.info(" -> Injecting extra data");
injectorHook.inject();
}
logger.info(" --> create river");
IndexResponse indexResponse = index("_river", dbName, "_meta", river);
assertTrue(indexResponse.isCreated());
logger.info("--> checking that river [{}] was created", dbName);
assertThat(awaitBusy(new Predicate<Object>() {
public boolean apply(Object obj) {
GetResponse response = client().prepareGet(RiverIndexName.Conf.DEFAULT_INDEX_NAME, dbName, "_status").get();
return response.isExists();
}
}, 5, TimeUnit.SECONDS), equalTo(true));
// Check that docs are still processed by the river
logger.info(" --> waiting for expected number of docs: [{}]", numDocsPerMessage * numMessages - removed.size());
assertThat(awaitBusy(new Predicate<Object>() {
public boolean apply(Object obj) {
try {
refresh();
int expected = numDocsPerMessage * numMessages - removed.size();
CountResponse response = client().prepareCount(dbName).get();
logger.debug(" -> got {} docs, expected {}", response.getCount(), expected);
return response.getCount() == expected;
} catch (IndexMissingException e) {
return false;
}
}
}, 20, TimeUnit.SECONDS), equalTo(true));
} finally {
if (channel != null && channel.isOpen()) {
channel.close();
}
if (connection != null && connection.isOpen()) {
connection.close();
}
// Deletes the river
GetResponse response = client().prepareGet(RiverIndexName.Conf.DEFAULT_INDEX_NAME, dbName, "_status").get();
if (response.isExists()) {
client().prepareDelete(RiverIndexName.Conf.DEFAULT_INDEX_NAME, dbName, "_meta").get();
client().prepareDelete(RiverIndexName.Conf.DEFAULT_INDEX_NAME, dbName, "_status").get();
}
assertThat(awaitBusy(new Predicate<Object>() {
public boolean apply(Object obj) {
GetResponse response = client().prepareGet(RiverIndexName.Conf.DEFAULT_INDEX_NAME, dbName, "_status").get();
return response.isExists();
}
}, 5, TimeUnit.SECONDS), equalTo(false));
}
}
Aggregations