Search in sources :

Example 1 with ListenableActionFuture

use of org.elasticsearch.action.ListenableActionFuture in project elasticsearch by elastic.

the class SharedClusterSnapshotRestoreIT method testGetSnapshotsRequest.

public void testGetSnapshotsRequest() throws Exception {
    final String repositoryName = "test-repo";
    final String indexName = "test-idx";
    final Client client = client();
    final Path repo = randomRepoPath();
    logger.info("-->  creating repository at {}", repo.toAbsolutePath());
    assertAcked(client.admin().cluster().preparePutRepository(repositoryName).setType("mock").setSettings(Settings.builder().put("location", repo).put("compress", false).put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES).put("wait_after_unblock", 200)));
    logger.info("--> get snapshots on an empty repository");
    expectThrows(SnapshotMissingException.class, () -> client.admin().cluster().prepareGetSnapshots(repositoryName).addSnapshots("non-existent-snapshot").get());
    // with ignore unavailable set to true, should not throw an exception
    GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots(repositoryName).setIgnoreUnavailable(true).addSnapshots("non-existent-snapshot").get();
    assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(0));
    logger.info("--> creating an index and indexing documents");
    // Create index on 2 nodes and make sure each node has a primary by setting no replicas
    assertAcked(prepareCreate(indexName, 1, Settings.builder().put("number_of_replicas", 0)));
    ensureGreen();
    for (int i = 0; i < 10; i++) {
        index(indexName, "doc", Integer.toString(i), "foo", "bar" + i);
    }
    refresh();
    // make sure we return only the in-progress snapshot when taking the first snapshot on a clean repository
    // take initial snapshot with a block, making sure we only get 1 in-progress snapshot returned
    // block a node so the create snapshot operation can remain in progress
    final String initialBlockedNode = blockNodeWithIndex(repositoryName, indexName);
    ListenableActionFuture<CreateSnapshotResponse> responseListener = client.admin().cluster().prepareCreateSnapshot(repositoryName, "snap-on-empty-repo").setWaitForCompletion(false).setIndices(indexName).execute();
    // wait for block to kick in
    waitForBlock(initialBlockedNode, repositoryName, TimeValue.timeValueSeconds(60));
    getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots(randomFrom("_all", "_current", "snap-on-*", "*-on-empty-repo", "snap-on-empty-repo")).get();
    assertEquals(1, getSnapshotsResponse.getSnapshots().size());
    assertEquals("snap-on-empty-repo", getSnapshotsResponse.getSnapshots().get(0).snapshotId().getName());
    // unblock node
    unblockNode(repositoryName, initialBlockedNode);
    // timeout after 10 seconds
    responseListener.actionGet(TimeValue.timeValueMillis(10000L));
    client.admin().cluster().prepareDeleteSnapshot(repositoryName, "snap-on-empty-repo").get();
    final int numSnapshots = randomIntBetween(1, 3) + 1;
    logger.info("--> take {} snapshot(s)", numSnapshots - 1);
    final String[] snapshotNames = new String[numSnapshots];
    for (int i = 0; i < numSnapshots - 1; i++) {
        final String snapshotName = randomAsciiOfLength(8).toLowerCase(Locale.ROOT);
        CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot(repositoryName, snapshotName).setWaitForCompletion(true).setIndices(indexName).get();
        assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
        snapshotNames[i] = snapshotName;
    }
    logger.info("--> take another snapshot to be in-progress");
    // add documents so there are data files to block on
    for (int i = 10; i < 20; i++) {
        index(indexName, "doc", Integer.toString(i), "foo", "bar" + i);
    }
    refresh();
    final String inProgressSnapshot = randomAsciiOfLength(8).toLowerCase(Locale.ROOT);
    snapshotNames[numSnapshots - 1] = inProgressSnapshot;
    // block a node so the create snapshot operation can remain in progress
    final String blockedNode = blockNodeWithIndex(repositoryName, indexName);
    client.admin().cluster().prepareCreateSnapshot(repositoryName, inProgressSnapshot).setWaitForCompletion(false).setIndices(indexName).get();
    // wait for block to kick in
    waitForBlock(blockedNode, repositoryName, TimeValue.timeValueSeconds(60));
    logger.info("--> get all snapshots with a current in-progress");
    // with ignore unavailable set to true, should not throw an exception
    final List<String> snapshotsToGet = new ArrayList<>();
    if (randomBoolean()) {
        // use _current plus the individual names of the finished snapshots
        snapshotsToGet.add("_current");
        for (int i = 0; i < numSnapshots - 1; i++) {
            snapshotsToGet.add(snapshotNames[i]);
        }
    } else {
        snapshotsToGet.add("_all");
    }
    getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots(repositoryName).setSnapshots(snapshotsToGet.toArray(Strings.EMPTY_ARRAY)).get();
    List<String> sortedNames = Arrays.asList(snapshotNames);
    Collections.sort(sortedNames);
    assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(numSnapshots));
    assertThat(getSnapshotsResponse.getSnapshots().stream().map(s -> s.snapshotId().getName()).sorted().collect(Collectors.toList()), equalTo(sortedNames));
    getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots(repositoryName).addSnapshots(snapshotNames).get();
    sortedNames = Arrays.asList(snapshotNames);
    Collections.sort(sortedNames);
    assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(numSnapshots));
    assertThat(getSnapshotsResponse.getSnapshots().stream().map(s -> s.snapshotId().getName()).sorted().collect(Collectors.toList()), equalTo(sortedNames));
    logger.info("--> make sure duplicates are not returned in the response");
    String regexName = snapshotNames[randomIntBetween(0, numSnapshots - 1)];
    final int splitPos = regexName.length() / 2;
    final String firstRegex = regexName.substring(0, splitPos) + "*";
    final String secondRegex = "*" + regexName.substring(splitPos);
    getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots(repositoryName).addSnapshots(snapshotNames).addSnapshots(firstRegex, secondRegex).get();
    assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(numSnapshots));
    assertThat(getSnapshotsResponse.getSnapshots().stream().map(s -> s.snapshotId().getName()).sorted().collect(Collectors.toList()), equalTo(sortedNames));
    // unblock node
    unblockNode(repositoryName, blockedNode);
    waitForCompletion(repositoryName, inProgressSnapshot, TimeValue.timeValueSeconds(60));
}
Also used : Path(java.nio.file.Path) ShardId(org.elasticsearch.index.shard.ShardId) ByteSizeUnit(org.elasticsearch.common.unit.ByteSizeUnit) Arrays(java.util.Arrays) RestoreSnapshotResponse(org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse) ClusterBlocks(org.elasticsearch.cluster.block.ClusterBlocks) ClusterState(org.elasticsearch.cluster.ClusterState) ClusterStateUpdateTask(org.elasticsearch.cluster.ClusterStateUpdateTask) Matchers.nullValue(org.hamcrest.Matchers.nullValue) Path(java.nio.file.Path) Priority(org.elasticsearch.common.Priority) GetSettingsResponse(org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse) Matchers.notNullValue(org.hamcrest.Matchers.notNullValue) TestLogging(org.elasticsearch.test.junit.annotations.TestLogging) Matchers.allOf(org.hamcrest.Matchers.allOf) DeletePipelineRequest(org.elasticsearch.action.ingest.DeletePipelineRequest) ElasticsearchAssertions.assertAliasesMissing(org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAliasesMissing) Matchers.startsWith(org.hamcrest.Matchers.startsWith) ElasticsearchAssertions.assertBlocked(org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked) CountDownLatch(java.util.concurrent.CountDownLatch) Stream(java.util.stream.Stream) QueryBuilders.matchQuery(org.elasticsearch.index.query.QueryBuilders.matchQuery) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) SnapshotIndexStatus(org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotIndexStatus) InvalidIndexNameException(org.elasticsearch.indices.InvalidIndexNameException) Matchers.greaterThan(org.hamcrest.Matchers.greaterThan) Matchers.is(org.hamcrest.Matchers.is) Matchers.containsString(org.hamcrest.Matchers.containsString) ShardSnapshotStatus(org.elasticsearch.cluster.SnapshotsInProgress.ShardSnapshotStatus) XContentFactory(org.elasticsearch.common.xcontent.XContentFactory) ImmutableOpenMap(org.elasticsearch.common.collect.ImmutableOpenMap) GetPipelineResponse(org.elasticsearch.action.ingest.GetPipelineResponse) ClusterService(org.elasticsearch.cluster.service.ClusterService) ArrayList(java.util.ArrayList) BytesArray(org.elasticsearch.common.bytes.BytesArray) SnapshotStatus(org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus) ElasticsearchAssertions.assertIndexTemplateMissing(org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertIndexTemplateMissing) Matchers.lessThan(org.hamcrest.Matchers.lessThan) IndicesService(org.elasticsearch.indices.IndicesService) FlushResponse(org.elasticsearch.action.admin.indices.flush.FlushResponse) ElasticsearchAssertions.assertThrows(org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows) Files(java.nio.file.Files) SETTING_NUMBER_OF_SHARDS(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS) Client(org.elasticsearch.client.Client) IndexService(org.elasticsearch.index.IndexService) IOUtils(org.apache.lucene.util.IOUtils) RepositoriesService(org.elasticsearch.repositories.RepositoriesService) ExecutionException(java.util.concurrent.ExecutionException) SnapshotIndexShardStage(org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotIndexShardStage) SnapshotsStatusResponse(org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse) ElasticsearchAssertions.assertAcked(org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked) MappingMetaData(org.elasticsearch.cluster.metadata.MappingMetaData) PutRepositoryResponse(org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse) IngestTestPlugin(org.elasticsearch.ingest.IngestTestPlugin) Settings(org.elasticsearch.common.settings.Settings) Locale(java.util.Locale) SearchResponse(org.elasticsearch.action.search.SearchResponse) XContentFactory.jsonBuilder(org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder) RepositoryException(org.elasticsearch.repositories.RepositoryException) ElasticsearchAssertions.assertIndexTemplateExists(org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertIndexTemplateExists) Collection(java.util.Collection) StandardOpenOption(java.nio.file.StandardOpenOption) State(org.elasticsearch.cluster.SnapshotsInProgress.State) ElasticsearchAssertions.assertHitCount(org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount) GetStoredScriptResponse(org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse) BytesReference(org.elasticsearch.common.bytes.BytesReference) Collectors(java.util.stream.Collectors) ActiveShardCount(org.elasticsearch.action.support.ActiveShardCount) SeekableByteChannel(java.nio.channels.SeekableByteChannel) List(java.util.List) Version(org.elasticsearch.Version) IndexRequestBuilder(org.elasticsearch.action.index.IndexRequestBuilder) IndexRoutingTable(org.elasticsearch.cluster.routing.IndexRoutingTable) MockScriptEngine(org.elasticsearch.script.MockScriptEngine) Matchers.equalTo(org.hamcrest.Matchers.equalTo) ElasticsearchAssertions.assertAliasesExist(org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAliasesExist) CreateSnapshotResponse(org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse) RepositoryData(org.elasticsearch.repositories.RepositoryData) XContentType(org.elasticsearch.common.xcontent.XContentType) ListenableActionFuture(org.elasticsearch.action.ListenableActionFuture) IndexId(org.elasticsearch.repositories.IndexId) Entry(org.elasticsearch.cluster.SnapshotsInProgress.Entry) Strings(org.elasticsearch.common.Strings) SETTING_NUMBER_OF_REPLICAS(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS) SnapshotIndexShardStatus(org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotIndexShardStatus) TimeValue(org.elasticsearch.common.unit.TimeValue) GetSnapshotsResponse(org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse) Plugin(org.elasticsearch.plugins.Plugin) MockRepository(org.elasticsearch.snapshots.mockstore.MockRepository) INDEX_REFRESH_INTERVAL_SETTING(org.elasticsearch.index.IndexSettings.INDEX_REFRESH_INTERVAL_SETTING) ClusterStateResponse(org.elasticsearch.action.admin.cluster.state.ClusterStateResponse) GetIndexTemplatesResponse(org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse) TimeUnit(java.util.concurrent.TimeUnit) ExceptionsHelper(org.elasticsearch.ExceptionsHelper) SnapshotsInProgress(org.elasticsearch.cluster.SnapshotsInProgress) ElasticsearchAssertions.assertAllSuccessful(org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful) StoredScriptsIT(org.elasticsearch.script.StoredScriptsIT) Collections(java.util.Collections) MetaDataIndexStateService(org.elasticsearch.cluster.metadata.MetaDataIndexStateService) ElasticsearchAssertions.assertNoFailures(org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures) GetSnapshotsResponse(org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse) CreateSnapshotResponse(org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse) ArrayList(java.util.ArrayList) Matchers.containsString(org.hamcrest.Matchers.containsString) Client(org.elasticsearch.client.Client)

Example 2 with ListenableActionFuture

use of org.elasticsearch.action.ListenableActionFuture in project elasticsearch by elastic.

the class RethrottleTests method testCase.

private void testCase(AbstractBulkByScrollRequestBuilder<?, ?> request, String actionName) throws Exception {
    logger.info("Starting test for [{}] with [{}] slices", actionName, request.request().getSlices());
    /* Add ten documents per slice so most slices will have many documents to process, having to go to multiple batches.
         * we can't rely on all of them doing so, but 
         */
    List<IndexRequestBuilder> docs = new ArrayList<>();
    for (int i = 0; i < request.request().getSlices() * 10; i++) {
        docs.add(client().prepareIndex("test", "test", Integer.toString(i)).setSource("foo", "bar"));
    }
    indexRandom(true, docs);
    // Start a request that will never finish unless we rethrottle it
    // Throttle "forever"
    request.setRequestsPerSecond(.000001f);
    // Make sure we use multiple batches
    request.source().setSize(1);
    ListenableActionFuture<? extends BulkByScrollResponse> responseListener = request.execute();
    TaskGroup taskGroupToRethrottle = findTaskToRethrottle(actionName, request.request().getSlices());
    TaskId taskToRethrottle = taskGroupToRethrottle.getTaskInfo().getTaskId();
    if (request.request().getSlices() == 1) {
        assertThat(taskGroupToRethrottle.getChildTasks(), empty());
    } else {
        // There should be a sane number of child tasks running
        assertThat(taskGroupToRethrottle.getChildTasks(), hasSize(allOf(greaterThanOrEqualTo(1), lessThanOrEqualTo(request.request().getSlices()))));
        // Wait for all of the sub tasks to start (or finish, some might finish early, all that matters is that not all do)
        assertBusy(() -> {
            BulkByScrollTask.Status parent = (BulkByScrollTask.Status) client().admin().cluster().prepareGetTask(taskToRethrottle).get().getTask().getTask().getStatus();
            long finishedSubTasks = parent.getSliceStatuses().stream().filter(s -> s != null).count();
            ListTasksResponse list = client().admin().cluster().prepareListTasks().setParentTaskId(taskToRethrottle).get();
            list.rethrowFailures("subtasks");
            assertThat(finishedSubTasks + list.getTasks().size(), greaterThanOrEqualTo((long) request.request().getSlices()));
            assertThat(list.getTasks().size(), greaterThan(0));
        });
    }
    // Now rethrottle it so it'll finish
    // No throttle or "very fast"
    float newRequestsPerSecond = randomBoolean() ? Float.POSITIVE_INFINITY : between(1, 1000) * 100000;
    ListTasksResponse rethrottleResponse = rethrottle().setTaskId(taskToRethrottle).setRequestsPerSecond(newRequestsPerSecond).get();
    rethrottleResponse.rethrowFailures("Rethrottle");
    assertThat(rethrottleResponse.getTasks(), hasSize(1));
    BulkByScrollTask.Status status = (BulkByScrollTask.Status) rethrottleResponse.getTasks().get(0).getStatus();
    // Now check the resulting requests per second.
    if (request.request().getSlices() == 1) {
        // If there is a single slice it should match perfectly
        assertEquals(newRequestsPerSecond, status.getRequestsPerSecond(), Float.MIN_NORMAL);
    } else {
        /* Check that at least one slice was rethrottled. We won't always rethrottle all of them because they might have completed.
             * With multiple slices these numbers might not add up perfectly, thus the 1.01F. */
        long unfinished = status.getSliceStatuses().stream().filter(slice -> slice != null).filter(slice -> slice.getStatus().getTotal() > slice.getStatus().getSuccessfullyProcessed()).count();
        float maxExpectedSliceRequestsPerSecond = newRequestsPerSecond == Float.POSITIVE_INFINITY ? Float.POSITIVE_INFINITY : (newRequestsPerSecond / unfinished) * 1.01F;
        float minExpectedSliceRequestsPerSecond = newRequestsPerSecond == Float.POSITIVE_INFINITY ? Float.POSITIVE_INFINITY : (newRequestsPerSecond / request.request().getSlices()) * 0.99F;
        boolean oneSliceRethrottled = false;
        float totalRequestsPerSecond = 0;
        for (BulkByScrollTask.StatusOrException statusOrException : status.getSliceStatuses()) {
            if (statusOrException == null) {
                /* The slice can be null here because it was completed but hadn't reported its success back to the task when the
                     * rethrottle request came through. */
                continue;
            }
            assertNull(statusOrException.getException());
            BulkByScrollTask.Status slice = statusOrException.getStatus();
            if (slice.getTotal() > slice.getSuccessfullyProcessed()) {
                // This slice reports as not having completed so it should have been processed.
                assertThat(slice.getRequestsPerSecond(), both(greaterThanOrEqualTo(minExpectedSliceRequestsPerSecond)).and(lessThanOrEqualTo(maxExpectedSliceRequestsPerSecond)));
            }
            if (minExpectedSliceRequestsPerSecond <= slice.getRequestsPerSecond() && slice.getRequestsPerSecond() <= maxExpectedSliceRequestsPerSecond) {
                oneSliceRethrottled = true;
            }
            totalRequestsPerSecond += slice.getRequestsPerSecond();
        }
        assertTrue("At least one slice must be rethrottled", oneSliceRethrottled);
        /* Now assert that the parent request has the total requests per second. This is a much weaker assertion than that the parent
             * actually has the newRequestsPerSecond. For the most part it will. Sometimes it'll be greater because only unfinished requests
             * are rethrottled, the finished ones just keep whatever requests per second they had while they were running. But it might
             * also be less than newRequestsPerSecond because the newRequestsPerSecond is divided among running sub-requests and then the
             * requests are rethrottled. If one request finishes in between the division and the application of the new throttle then it
             * won't be rethrottled, thus only contributing its lower total. */
        assertEquals(totalRequestsPerSecond, status.getRequestsPerSecond(), totalRequestsPerSecond * 0.0001f);
    }
    // Now the response should come back quickly because we've rethrottled the request
    BulkByScrollResponse response = responseListener.get();
    assertThat("Entire request completed in a single batch. This may invalidate the test as throttling is done between batches.", response.getBatches(), greaterThanOrEqualTo(request.request().getSlices()));
}
Also used : BulkByScrollResponse(org.elasticsearch.action.bulk.byscroll.BulkByScrollResponse) Matchers.empty(org.hamcrest.Matchers.empty) Matchers.greaterThanOrEqualTo(org.hamcrest.Matchers.greaterThanOrEqualTo) Matchers.allOf(org.hamcrest.Matchers.allOf) Matchers.lessThanOrEqualTo(org.hamcrest.Matchers.lessThanOrEqualTo) AbstractBulkByScrollRequestBuilder(org.elasticsearch.action.bulk.byscroll.AbstractBulkByScrollRequestBuilder) ListenableActionFuture(org.elasticsearch.action.ListenableActionFuture) TaskId(org.elasticsearch.tasks.TaskId) ArrayList(java.util.ArrayList) Matchers.both(org.hamcrest.Matchers.both) ListTasksResponse(org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse) TimeUnit(java.util.concurrent.TimeUnit) List(java.util.List) IndexRequestBuilder(org.elasticsearch.action.index.IndexRequestBuilder) Matchers.lessThan(org.hamcrest.Matchers.lessThan) Matchers.hasSize(org.hamcrest.Matchers.hasSize) Matchers.greaterThan(org.hamcrest.Matchers.greaterThan) TaskGroup(org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup) BulkByScrollTask(org.elasticsearch.action.bulk.byscroll.BulkByScrollTask) TaskId(org.elasticsearch.tasks.TaskId) ArrayList(java.util.ArrayList) ListTasksResponse(org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse) BulkByScrollResponse(org.elasticsearch.action.bulk.byscroll.BulkByScrollResponse) IndexRequestBuilder(org.elasticsearch.action.index.IndexRequestBuilder) BulkByScrollTask(org.elasticsearch.action.bulk.byscroll.BulkByScrollTask) TaskGroup(org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup)

Aggregations

ArrayList (java.util.ArrayList)2 List (java.util.List)2 TimeUnit (java.util.concurrent.TimeUnit)2 ListenableActionFuture (org.elasticsearch.action.ListenableActionFuture)2 IndexRequestBuilder (org.elasticsearch.action.index.IndexRequestBuilder)2 SeekableByteChannel (java.nio.channels.SeekableByteChannel)1 Files (java.nio.file.Files)1 Path (java.nio.file.Path)1 StandardOpenOption (java.nio.file.StandardOpenOption)1 Arrays (java.util.Arrays)1 Collection (java.util.Collection)1 Collections (java.util.Collections)1 Locale (java.util.Locale)1 CountDownLatch (java.util.concurrent.CountDownLatch)1 ExecutionException (java.util.concurrent.ExecutionException)1 Collectors (java.util.stream.Collectors)1 Stream (java.util.stream.Stream)1 IOUtils (org.apache.lucene.util.IOUtils)1 ExceptionsHelper (org.elasticsearch.ExceptionsHelper)1 Version (org.elasticsearch.Version)1