use of org.elasticsearch.common.collect.Tuple in project elasticsearch by elastic.
the class InternalAwsS3Service method client.
@Override
public synchronized AmazonS3 client(Settings repositorySettings, Integer maxRetries, boolean useThrottleRetries, Boolean pathStyleAccess) {
String clientName = CLIENT_NAME.get(repositorySettings);
String foundEndpoint = findEndpoint(logger, repositorySettings, settings, clientName);
AWSCredentialsProvider credentials = buildCredentials(logger, deprecationLogger, settings, repositorySettings, clientName);
Tuple<String, String> clientDescriptor = new Tuple<>(foundEndpoint, credentials.getCredentials().getAWSAccessKeyId());
AmazonS3Client client = clients.get(clientDescriptor);
if (client != null) {
return client;
}
client = new AmazonS3Client(credentials, buildConfiguration(logger, repositorySettings, settings, clientName, maxRetries, foundEndpoint, useThrottleRetries));
if (pathStyleAccess != null) {
client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(pathStyleAccess));
}
if (!foundEndpoint.isEmpty()) {
client.setEndpoint(foundEndpoint);
}
clients.put(clientDescriptor, client);
return client;
}
use of org.elasticsearch.common.collect.Tuple in project elasticsearch by elastic.
the class ESIntegTestCase method indexRandom.
/**
* Indexes the given {@link IndexRequestBuilder} instances randomly. It shuffles the given builders and either
* indexes them in a blocking or async fashion. This is very useful to catch problems that relate to internal document
* ids or index segment creations. Some features might have bug when a given document is the first or the last in a
* segment or if only one document is in a segment etc. This method prevents issues like this by randomizing the index
* layout.
*
* @param forceRefresh if <tt>true</tt> all involved indices are refreshed once the documents are indexed.
* @param dummyDocuments if <tt>true</tt> some empty dummy documents may be randomly inserted into the document list and deleted once
* all documents are indexed. This is useful to produce deleted documents on the server side.
* @param maybeFlush if <tt>true</tt> this method may randomly execute full flushes after index operations.
* @param builders the documents to index.
*/
public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean maybeFlush, List<IndexRequestBuilder> builders) throws InterruptedException, ExecutionException {
Random random = random();
Set<String> indicesSet = new HashSet<>();
for (IndexRequestBuilder builder : builders) {
indicesSet.add(builder.request().index());
}
Set<Tuple<String, String>> bogusIds = new HashSet<>();
if (random.nextBoolean() && !builders.isEmpty() && dummyDocuments) {
builders = new ArrayList<>(builders);
final String[] indices = indicesSet.toArray(new String[indicesSet.size()]);
// inject some bogus docs
final int numBogusDocs = scaledRandomIntBetween(1, builders.size() * 2);
final int unicodeLen = between(1, 10);
for (int i = 0; i < numBogusDocs; i++) {
String id = randomRealisticUnicodeOfLength(unicodeLen) + Integer.toString(dummmyDocIdGenerator.incrementAndGet());
String index = RandomPicks.randomFrom(random, indices);
bogusIds.add(new Tuple<>(index, id));
builders.add(client().prepareIndex(index, RANDOM_BOGUS_TYPE, id).setSource("{}", XContentType.JSON));
}
}
final String[] indices = indicesSet.toArray(new String[indicesSet.size()]);
Collections.shuffle(builders, random());
final CopyOnWriteArrayList<Tuple<IndexRequestBuilder, Exception>> errors = new CopyOnWriteArrayList<>();
List<CountDownLatch> inFlightAsyncOperations = new ArrayList<>();
// If you are indexing just a few documents then frequently do it one at a time. If many then frequently in bulk.
if (builders.size() < FREQUENT_BULK_THRESHOLD ? frequently() : builders.size() < ALWAYS_BULK_THRESHOLD ? rarely() : false) {
if (frequently()) {
logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), true, false);
for (IndexRequestBuilder indexRequestBuilder : builders) {
indexRequestBuilder.execute(new PayloadLatchedActionListener<IndexResponse, IndexRequestBuilder>(indexRequestBuilder, newLatch(inFlightAsyncOperations), errors));
postIndexAsyncActions(indices, inFlightAsyncOperations, maybeFlush);
}
} else {
logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), false, false);
for (IndexRequestBuilder indexRequestBuilder : builders) {
indexRequestBuilder.execute().actionGet();
postIndexAsyncActions(indices, inFlightAsyncOperations, maybeFlush);
}
}
} else {
List<List<IndexRequestBuilder>> partition = eagerPartition(builders, Math.min(MAX_BULK_INDEX_REQUEST_SIZE, Math.max(1, (int) (builders.size() * randomDouble()))));
logger.info("Index [{}] docs async: [{}] bulk: [{}] partitions [{}]", builders.size(), false, true, partition.size());
for (List<IndexRequestBuilder> segmented : partition) {
BulkRequestBuilder bulkBuilder = client().prepareBulk();
for (IndexRequestBuilder indexRequestBuilder : segmented) {
bulkBuilder.add(indexRequestBuilder);
}
BulkResponse actionGet = bulkBuilder.execute().actionGet();
assertThat(actionGet.hasFailures() ? actionGet.buildFailureMessage() : "", actionGet.hasFailures(), equalTo(false));
}
}
for (CountDownLatch operation : inFlightAsyncOperations) {
operation.await();
}
final List<Exception> actualErrors = new ArrayList<>();
for (Tuple<IndexRequestBuilder, Exception> tuple : errors) {
if (ExceptionsHelper.unwrapCause(tuple.v2()) instanceof EsRejectedExecutionException) {
// re-index if rejected
tuple.v1().execute().actionGet();
} else {
actualErrors.add(tuple.v2());
}
}
assertThat(actualErrors, emptyIterable());
if (!bogusIds.isEmpty()) {
// delete the bogus types again - it might trigger merges or at least holes in the segments and enforces deleted docs!
for (Tuple<String, String> doc : bogusIds) {
assertEquals("failed to delete a dummy doc [" + doc.v1() + "][" + doc.v2() + "]", DocWriteResponse.Result.DELETED, client().prepareDelete(doc.v1(), RANDOM_BOGUS_TYPE, doc.v2()).get().getResult());
}
}
if (forceRefresh) {
assertNoFailures(client().admin().indices().prepareRefresh(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute().get());
}
}
use of org.elasticsearch.common.collect.Tuple in project elasticsearch by elastic.
the class StreamTests method testSpecificVLongSerialization.
public void testSpecificVLongSerialization() throws IOException {
List<Tuple<Long, byte[]>> values = Arrays.asList(new Tuple<>(0L, new byte[] { 0 }), new Tuple<>(-1L, new byte[] { 1 }), new Tuple<>(1L, new byte[] { 2 }), new Tuple<>(-2L, new byte[] { 3 }), new Tuple<>(2L, new byte[] { 4 }), new Tuple<>(Long.MIN_VALUE, new byte[] { -1, -1, -1, -1, -1, -1, -1, -1, -1, 1 }), new Tuple<>(Long.MAX_VALUE, new byte[] { -2, -1, -1, -1, -1, -1, -1, -1, -1, 1 }));
for (Tuple<Long, byte[]> value : values) {
BytesStreamOutput out = new BytesStreamOutput();
out.writeZLong(value.v1());
assertArrayEquals(Long.toString(value.v1()), value.v2(), BytesReference.toBytes(out.bytes()));
BytesReference bytes = new BytesArray(value.v2());
assertEquals(Arrays.toString(value.v2()), (long) value.v1(), bytes.streamInput().readZLong());
}
}
use of org.elasticsearch.common.collect.Tuple in project elasticsearch by elastic.
the class TimeZoneRoundingTests method testIntervalRoundingMonotonic_CET.
/**
* Test that rounded values are always greater or equal to last rounded value if date is increasing.
* The example covers an interval around 2011-10-30T02:10:00+01:00, time zone CET, interval: 2700000ms
*/
public void testIntervalRoundingMonotonic_CET() {
long interval = TimeUnit.MINUTES.toMillis(45);
DateTimeZone tz = DateTimeZone.forID("CET");
Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz);
List<Tuple<String, String>> expectedDates = new ArrayList<>();
// first date is the date to be rounded, second the expected result
expectedDates.add(new Tuple<>("2011-10-30T01:40:00.000+02:00", "2011-10-30T01:30:00.000+02:00"));
expectedDates.add(new Tuple<>("2011-10-30T02:02:30.000+02:00", "2011-10-30T01:30:00.000+02:00"));
expectedDates.add(new Tuple<>("2011-10-30T02:25:00.000+02:00", "2011-10-30T02:15:00.000+02:00"));
expectedDates.add(new Tuple<>("2011-10-30T02:47:30.000+02:00", "2011-10-30T02:15:00.000+02:00"));
expectedDates.add(new Tuple<>("2011-10-30T02:10:00.000+01:00", "2011-10-30T02:15:00.000+02:00"));
expectedDates.add(new Tuple<>("2011-10-30T02:32:30.000+01:00", "2011-10-30T02:15:00.000+01:00"));
expectedDates.add(new Tuple<>("2011-10-30T02:55:00.000+01:00", "2011-10-30T02:15:00.000+01:00"));
expectedDates.add(new Tuple<>("2011-10-30T03:17:30.000+01:00", "2011-10-30T03:00:00.000+01:00"));
long previousDate = Long.MIN_VALUE;
for (Tuple<String, String> dates : expectedDates) {
final long roundedDate = rounding.round(time(dates.v1()));
assertThat(roundedDate, isDate(time(dates.v2()), tz));
assertThat(roundedDate, greaterThanOrEqualTo(previousDate));
previousDate = roundedDate;
}
// here's what this means for interval widths
assertEquals(TimeUnit.MINUTES.toMillis(45), time("2011-10-30T02:15:00.000+02:00") - time("2011-10-30T01:30:00.000+02:00"));
assertEquals(TimeUnit.MINUTES.toMillis(60), time("2011-10-30T02:15:00.000+01:00") - time("2011-10-30T02:15:00.000+02:00"));
assertEquals(TimeUnit.MINUTES.toMillis(45), time("2011-10-30T03:00:00.000+01:00") - time("2011-10-30T02:15:00.000+01:00"));
}
use of org.elasticsearch.common.collect.Tuple in project elasticsearch by elastic.
the class AsyncIOProcessorTests method testRandomFail.
public void testRandomFail() throws InterruptedException {
AtomicInteger received = new AtomicInteger(0);
AtomicInteger failed = new AtomicInteger(0);
AtomicInteger actualFailed = new AtomicInteger(0);
AsyncIOProcessor<Object> processor = new AsyncIOProcessor<Object>(logger, scaledRandomIntBetween(1, 2024)) {
@Override
protected void write(List<Tuple<Object, Consumer<Exception>>> candidates) throws IOException {
received.addAndGet(candidates.size());
if (randomBoolean()) {
failed.addAndGet(candidates.size());
if (randomBoolean()) {
throw new IOException();
} else {
throw new RuntimeException();
}
}
}
};
Semaphore semaphore = new Semaphore(Integer.MAX_VALUE);
final int count = randomIntBetween(1000, 20000);
Thread[] thread = new Thread[randomIntBetween(3, 10)];
CountDownLatch latch = new CountDownLatch(thread.length);
for (int i = 0; i < thread.length; i++) {
thread[i] = new Thread() {
@Override
public void run() {
try {
latch.countDown();
latch.await();
for (int i = 0; i < count; i++) {
semaphore.acquire();
processor.put(new Object(), (ex) -> {
if (ex != null) {
actualFailed.incrementAndGet();
}
semaphore.release();
});
}
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
;
};
thread[i].start();
}
for (int i = 0; i < thread.length; i++) {
thread[i].join();
}
assertTrue(semaphore.tryAcquire(Integer.MAX_VALUE, 10, TimeUnit.SECONDS));
assertEquals(count * thread.length, received.get());
assertEquals(actualFailed.get(), failed.get());
}
Aggregations