Search in sources :

Example 6 with Tuple

use of org.elasticsearch.common.collect.Tuple in project elasticsearch by elastic.

the class InternalAwsS3Service method client.

@Override
public synchronized AmazonS3 client(Settings repositorySettings, Integer maxRetries, boolean useThrottleRetries, Boolean pathStyleAccess) {
    String clientName = CLIENT_NAME.get(repositorySettings);
    String foundEndpoint = findEndpoint(logger, repositorySettings, settings, clientName);
    AWSCredentialsProvider credentials = buildCredentials(logger, deprecationLogger, settings, repositorySettings, clientName);
    Tuple<String, String> clientDescriptor = new Tuple<>(foundEndpoint, credentials.getCredentials().getAWSAccessKeyId());
    AmazonS3Client client = clients.get(clientDescriptor);
    if (client != null) {
        return client;
    }
    client = new AmazonS3Client(credentials, buildConfiguration(logger, repositorySettings, settings, clientName, maxRetries, foundEndpoint, useThrottleRetries));
    if (pathStyleAccess != null) {
        client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(pathStyleAccess));
    }
    if (!foundEndpoint.isEmpty()) {
        client.setEndpoint(foundEndpoint);
    }
    clients.put(clientDescriptor, client);
    return client;
}
Also used : AmazonS3Client(com.amazonaws.services.s3.AmazonS3Client) S3ClientOptions(com.amazonaws.services.s3.S3ClientOptions) SecureString(org.elasticsearch.common.settings.SecureString) AWSCredentialsProvider(com.amazonaws.auth.AWSCredentialsProvider) Tuple(org.elasticsearch.common.collect.Tuple)

Example 7 with Tuple

use of org.elasticsearch.common.collect.Tuple in project elasticsearch by elastic.

the class ESIntegTestCase method indexRandom.

/**
     * Indexes the given {@link IndexRequestBuilder} instances randomly. It shuffles the given builders and either
     * indexes them in a blocking or async fashion. This is very useful to catch problems that relate to internal document
     * ids or index segment creations. Some features might have bug when a given document is the first or the last in a
     * segment or if only one document is in a segment etc. This method prevents issues like this by randomizing the index
     * layout.
     *
     * @param forceRefresh   if <tt>true</tt> all involved indices are refreshed once the documents are indexed.
     * @param dummyDocuments if <tt>true</tt> some empty dummy documents may be randomly inserted into the document list and deleted once
     *                       all documents are indexed. This is useful to produce deleted documents on the server side.
     * @param maybeFlush     if <tt>true</tt> this method may randomly execute full flushes after index operations.
     * @param builders       the documents to index.
     */
public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean maybeFlush, List<IndexRequestBuilder> builders) throws InterruptedException, ExecutionException {
    Random random = random();
    Set<String> indicesSet = new HashSet<>();
    for (IndexRequestBuilder builder : builders) {
        indicesSet.add(builder.request().index());
    }
    Set<Tuple<String, String>> bogusIds = new HashSet<>();
    if (random.nextBoolean() && !builders.isEmpty() && dummyDocuments) {
        builders = new ArrayList<>(builders);
        final String[] indices = indicesSet.toArray(new String[indicesSet.size()]);
        // inject some bogus docs
        final int numBogusDocs = scaledRandomIntBetween(1, builders.size() * 2);
        final int unicodeLen = between(1, 10);
        for (int i = 0; i < numBogusDocs; i++) {
            String id = randomRealisticUnicodeOfLength(unicodeLen) + Integer.toString(dummmyDocIdGenerator.incrementAndGet());
            String index = RandomPicks.randomFrom(random, indices);
            bogusIds.add(new Tuple<>(index, id));
            builders.add(client().prepareIndex(index, RANDOM_BOGUS_TYPE, id).setSource("{}", XContentType.JSON));
        }
    }
    final String[] indices = indicesSet.toArray(new String[indicesSet.size()]);
    Collections.shuffle(builders, random());
    final CopyOnWriteArrayList<Tuple<IndexRequestBuilder, Exception>> errors = new CopyOnWriteArrayList<>();
    List<CountDownLatch> inFlightAsyncOperations = new ArrayList<>();
    // If you are indexing just a few documents then frequently do it one at a time.  If many then frequently in bulk.
    if (builders.size() < FREQUENT_BULK_THRESHOLD ? frequently() : builders.size() < ALWAYS_BULK_THRESHOLD ? rarely() : false) {
        if (frequently()) {
            logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), true, false);
            for (IndexRequestBuilder indexRequestBuilder : builders) {
                indexRequestBuilder.execute(new PayloadLatchedActionListener<IndexResponse, IndexRequestBuilder>(indexRequestBuilder, newLatch(inFlightAsyncOperations), errors));
                postIndexAsyncActions(indices, inFlightAsyncOperations, maybeFlush);
            }
        } else {
            logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), false, false);
            for (IndexRequestBuilder indexRequestBuilder : builders) {
                indexRequestBuilder.execute().actionGet();
                postIndexAsyncActions(indices, inFlightAsyncOperations, maybeFlush);
            }
        }
    } else {
        List<List<IndexRequestBuilder>> partition = eagerPartition(builders, Math.min(MAX_BULK_INDEX_REQUEST_SIZE, Math.max(1, (int) (builders.size() * randomDouble()))));
        logger.info("Index [{}] docs async: [{}] bulk: [{}] partitions [{}]", builders.size(), false, true, partition.size());
        for (List<IndexRequestBuilder> segmented : partition) {
            BulkRequestBuilder bulkBuilder = client().prepareBulk();
            for (IndexRequestBuilder indexRequestBuilder : segmented) {
                bulkBuilder.add(indexRequestBuilder);
            }
            BulkResponse actionGet = bulkBuilder.execute().actionGet();
            assertThat(actionGet.hasFailures() ? actionGet.buildFailureMessage() : "", actionGet.hasFailures(), equalTo(false));
        }
    }
    for (CountDownLatch operation : inFlightAsyncOperations) {
        operation.await();
    }
    final List<Exception> actualErrors = new ArrayList<>();
    for (Tuple<IndexRequestBuilder, Exception> tuple : errors) {
        if (ExceptionsHelper.unwrapCause(tuple.v2()) instanceof EsRejectedExecutionException) {
            // re-index if rejected
            tuple.v1().execute().actionGet();
        } else {
            actualErrors.add(tuple.v2());
        }
    }
    assertThat(actualErrors, emptyIterable());
    if (!bogusIds.isEmpty()) {
        // delete the bogus types again - it might trigger merges or at least holes in the segments and enforces deleted docs!
        for (Tuple<String, String> doc : bogusIds) {
            assertEquals("failed to delete a dummy doc [" + doc.v1() + "][" + doc.v2() + "]", DocWriteResponse.Result.DELETED, client().prepareDelete(doc.v1(), RANDOM_BOGUS_TYPE, doc.v2()).get().getResult());
        }
    }
    if (forceRefresh) {
        assertNoFailures(client().admin().indices().prepareRefresh(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute().get());
    }
}
Also used : CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) Random(java.util.Random) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) List(java.util.List) HashSet(java.util.HashSet) EsRejectedExecutionException(org.elasticsearch.common.util.concurrent.EsRejectedExecutionException) BulkResponse(org.elasticsearch.action.bulk.BulkResponse) CountDownLatch(java.util.concurrent.CountDownLatch) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) EsRejectedExecutionException(org.elasticsearch.common.util.concurrent.EsRejectedExecutionException) ElasticsearchException(org.elasticsearch.ElasticsearchException) ShardOperationFailedException(org.elasticsearch.action.ShardOperationFailedException) CreateIndexRequestBuilder(org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder) IndexRequestBuilder(org.elasticsearch.action.index.IndexRequestBuilder) GetIndexResponse(org.elasticsearch.action.admin.indices.get.GetIndexResponse) IndexResponse(org.elasticsearch.action.index.IndexResponse) BulkRequestBuilder(org.elasticsearch.action.bulk.BulkRequestBuilder) Tuple(org.elasticsearch.common.collect.Tuple) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList)

Example 8 with Tuple

use of org.elasticsearch.common.collect.Tuple in project elasticsearch by elastic.

the class StreamTests method testSpecificVLongSerialization.

public void testSpecificVLongSerialization() throws IOException {
    List<Tuple<Long, byte[]>> values = Arrays.asList(new Tuple<>(0L, new byte[] { 0 }), new Tuple<>(-1L, new byte[] { 1 }), new Tuple<>(1L, new byte[] { 2 }), new Tuple<>(-2L, new byte[] { 3 }), new Tuple<>(2L, new byte[] { 4 }), new Tuple<>(Long.MIN_VALUE, new byte[] { -1, -1, -1, -1, -1, -1, -1, -1, -1, 1 }), new Tuple<>(Long.MAX_VALUE, new byte[] { -2, -1, -1, -1, -1, -1, -1, -1, -1, 1 }));
    for (Tuple<Long, byte[]> value : values) {
        BytesStreamOutput out = new BytesStreamOutput();
        out.writeZLong(value.v1());
        assertArrayEquals(Long.toString(value.v1()), value.v2(), BytesReference.toBytes(out.bytes()));
        BytesReference bytes = new BytesArray(value.v2());
        assertEquals(Arrays.toString(value.v2()), (long) value.v1(), bytes.streamInput().readZLong());
    }
}
Also used : BytesReference(org.elasticsearch.common.bytes.BytesReference) BytesArray(org.elasticsearch.common.bytes.BytesArray) Tuple(org.elasticsearch.common.collect.Tuple)

Example 9 with Tuple

use of org.elasticsearch.common.collect.Tuple in project elasticsearch by elastic.

the class TimeZoneRoundingTests method testIntervalRoundingMonotonic_CET.

/**
     * Test that rounded values are always greater or equal to last rounded value if date is increasing.
     * The example covers an interval around 2011-10-30T02:10:00+01:00, time zone CET, interval: 2700000ms
     */
public void testIntervalRoundingMonotonic_CET() {
    long interval = TimeUnit.MINUTES.toMillis(45);
    DateTimeZone tz = DateTimeZone.forID("CET");
    Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz);
    List<Tuple<String, String>> expectedDates = new ArrayList<>();
    // first date is the date to be rounded, second the expected result
    expectedDates.add(new Tuple<>("2011-10-30T01:40:00.000+02:00", "2011-10-30T01:30:00.000+02:00"));
    expectedDates.add(new Tuple<>("2011-10-30T02:02:30.000+02:00", "2011-10-30T01:30:00.000+02:00"));
    expectedDates.add(new Tuple<>("2011-10-30T02:25:00.000+02:00", "2011-10-30T02:15:00.000+02:00"));
    expectedDates.add(new Tuple<>("2011-10-30T02:47:30.000+02:00", "2011-10-30T02:15:00.000+02:00"));
    expectedDates.add(new Tuple<>("2011-10-30T02:10:00.000+01:00", "2011-10-30T02:15:00.000+02:00"));
    expectedDates.add(new Tuple<>("2011-10-30T02:32:30.000+01:00", "2011-10-30T02:15:00.000+01:00"));
    expectedDates.add(new Tuple<>("2011-10-30T02:55:00.000+01:00", "2011-10-30T02:15:00.000+01:00"));
    expectedDates.add(new Tuple<>("2011-10-30T03:17:30.000+01:00", "2011-10-30T03:00:00.000+01:00"));
    long previousDate = Long.MIN_VALUE;
    for (Tuple<String, String> dates : expectedDates) {
        final long roundedDate = rounding.round(time(dates.v1()));
        assertThat(roundedDate, isDate(time(dates.v2()), tz));
        assertThat(roundedDate, greaterThanOrEqualTo(previousDate));
        previousDate = roundedDate;
    }
    // here's what this means for interval widths
    assertEquals(TimeUnit.MINUTES.toMillis(45), time("2011-10-30T02:15:00.000+02:00") - time("2011-10-30T01:30:00.000+02:00"));
    assertEquals(TimeUnit.MINUTES.toMillis(60), time("2011-10-30T02:15:00.000+01:00") - time("2011-10-30T02:15:00.000+02:00"));
    assertEquals(TimeUnit.MINUTES.toMillis(45), time("2011-10-30T03:00:00.000+01:00") - time("2011-10-30T02:15:00.000+01:00"));
}
Also used : TimeIntervalRounding(org.elasticsearch.common.rounding.Rounding.TimeIntervalRounding) ArrayList(java.util.ArrayList) TimeUnitRounding(org.elasticsearch.common.rounding.Rounding.TimeUnitRounding) TimeIntervalRounding(org.elasticsearch.common.rounding.Rounding.TimeIntervalRounding) DateTimeZone(org.joda.time.DateTimeZone) Tuple(org.elasticsearch.common.collect.Tuple)

Example 10 with Tuple

use of org.elasticsearch.common.collect.Tuple in project elasticsearch by elastic.

the class AsyncIOProcessorTests method testRandomFail.

public void testRandomFail() throws InterruptedException {
    AtomicInteger received = new AtomicInteger(0);
    AtomicInteger failed = new AtomicInteger(0);
    AtomicInteger actualFailed = new AtomicInteger(0);
    AsyncIOProcessor<Object> processor = new AsyncIOProcessor<Object>(logger, scaledRandomIntBetween(1, 2024)) {

        @Override
        protected void write(List<Tuple<Object, Consumer<Exception>>> candidates) throws IOException {
            received.addAndGet(candidates.size());
            if (randomBoolean()) {
                failed.addAndGet(candidates.size());
                if (randomBoolean()) {
                    throw new IOException();
                } else {
                    throw new RuntimeException();
                }
            }
        }
    };
    Semaphore semaphore = new Semaphore(Integer.MAX_VALUE);
    final int count = randomIntBetween(1000, 20000);
    Thread[] thread = new Thread[randomIntBetween(3, 10)];
    CountDownLatch latch = new CountDownLatch(thread.length);
    for (int i = 0; i < thread.length; i++) {
        thread[i] = new Thread() {

            @Override
            public void run() {
                try {
                    latch.countDown();
                    latch.await();
                    for (int i = 0; i < count; i++) {
                        semaphore.acquire();
                        processor.put(new Object(), (ex) -> {
                            if (ex != null) {
                                actualFailed.incrementAndGet();
                            }
                            semaphore.release();
                        });
                    }
                } catch (Exception ex) {
                    throw new RuntimeException(ex);
                }
            }

            ;
        };
        thread[i].start();
    }
    for (int i = 0; i < thread.length; i++) {
        thread[i].join();
    }
    assertTrue(semaphore.tryAcquire(Integer.MAX_VALUE, 10, TimeUnit.SECONDS));
    assertEquals(count * thread.length, received.get());
    assertEquals(actualFailed.get(), failed.get());
}
Also used : TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Semaphore(java.util.concurrent.Semaphore) IOException(java.io.IOException) ESTestCase(org.elasticsearch.test.ESTestCase) Tuple(org.elasticsearch.common.collect.Tuple) IOException(java.io.IOException) Semaphore(java.util.concurrent.Semaphore) CountDownLatch(java.util.concurrent.CountDownLatch) IOException(java.io.IOException) Consumer(java.util.function.Consumer) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) List(java.util.List)

Aggregations

Tuple (org.elasticsearch.common.collect.Tuple)50 IOException (java.io.IOException)18 ArrayList (java.util.ArrayList)17 HashMap (java.util.HashMap)12 List (java.util.List)12 Map (java.util.Map)12 Settings (org.elasticsearch.common.settings.Settings)8 CountDownLatch (java.util.concurrent.CountDownLatch)7 HashSet (java.util.HashSet)6 ParameterizedMessage (org.apache.logging.log4j.message.ParameterizedMessage)6 Matchers.containsString (org.hamcrest.Matchers.containsString)6 CopyOnWriteArrayList (java.util.concurrent.CopyOnWriteArrayList)5 Supplier (org.apache.logging.log4j.util.Supplier)5 TaskInfo (org.elasticsearch.tasks.TaskInfo)5 Set (java.util.Set)4 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)4 Consumer (java.util.function.Consumer)4 ElasticsearchException (org.elasticsearch.ElasticsearchException)4 Version (org.elasticsearch.Version)4 DiscoveryNode (org.elasticsearch.cluster.node.DiscoveryNode)4