Search in sources :

Example 26 with ByteSizeValue

use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.

the class GoogleCloudStorageBlobStoreRepositoryTests method testChunkSize.

public void testChunkSize() {
    // default chunk size
    RepositoryMetaData repositoryMetaData = new RepositoryMetaData("repo", GoogleCloudStorageRepository.TYPE, Settings.EMPTY);
    ByteSizeValue chunkSize = GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repositoryMetaData);
    assertEquals(GoogleCloudStorageRepository.MAX_CHUNK_SIZE, chunkSize);
    // chunk size in settings
    int size = randomIntBetween(1, 100);
    repositoryMetaData = new RepositoryMetaData("repo", GoogleCloudStorageRepository.TYPE, Settings.builder().put("chunk_size", size + "mb").build());
    chunkSize = GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repositoryMetaData);
    assertEquals(new ByteSizeValue(size, ByteSizeUnit.MB), chunkSize);
    // zero bytes is not allowed
    IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> {
        RepositoryMetaData repoMetaData = new RepositoryMetaData("repo", GoogleCloudStorageRepository.TYPE, Settings.builder().put("chunk_size", "0").build());
        GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetaData);
    });
    assertEquals("Failed to parse value [0] for setting [chunk_size] must be >= 1b", e.getMessage());
    // negative bytes not allowed
    e = expectThrows(IllegalArgumentException.class, () -> {
        RepositoryMetaData repoMetaData = new RepositoryMetaData("repo", GoogleCloudStorageRepository.TYPE, Settings.builder().put("chunk_size", "-1").build());
        GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetaData);
    });
    assertEquals("Failed to parse value [-1] for setting [chunk_size] must be >= 1b", e.getMessage());
    // greater than max chunk size not allowed
    e = expectThrows(IllegalArgumentException.class, () -> {
        RepositoryMetaData repoMetaData = new RepositoryMetaData("repo", GoogleCloudStorageRepository.TYPE, Settings.builder().put("chunk_size", "101mb").build());
        GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetaData);
    });
    assertEquals("Failed to parse value [101mb] for setting [chunk_size] must be <= 100mb", e.getMessage());
}
Also used : RepositoryMetaData(org.elasticsearch.cluster.metadata.RepositoryMetaData) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue)

Example 27 with ByteSizeValue

use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.

the class ZenFaultDetectionTests method setUp.

@Override
@Before
public void setUp() throws Exception {
    super.setUp();
    Settings settings = Settings.builder().put(HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), new ByteSizeValue(0)).build();
    ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
    threadPool = new TestThreadPool(getClass().getName());
    circuitBreakerService = new HierarchyCircuitBreakerService(settings, clusterSettings);
    settingsA = Settings.builder().put("node.name", "TS_A").put(settings).build();
    serviceA = build(settingsA, version0);
    nodeA = serviceA.getLocalDiscoNode();
    settingsB = Settings.builder().put("node.name", "TS_B").put(settings).build();
    serviceB = build(settingsB, version1);
    nodeB = serviceB.getLocalDiscoNode();
    clusterServiceA = createClusterService(settingsA, threadPool, nodeA);
    clusterServiceB = createClusterService(settingsB, threadPool, nodeB);
    // wait till all nodes are properly connected and the event has been sent, so tests in this class
    // will not get this callback called on the connections done in this setup
    final CountDownLatch latch = new CountDownLatch(2);
    TransportConnectionListener waitForConnection = new TransportConnectionListener() {

        @Override
        public void onNodeConnected(DiscoveryNode node) {
            latch.countDown();
        }

        @Override
        public void onNodeDisconnected(DiscoveryNode node) {
            fail("disconnect should not be called " + node);
        }
    };
    serviceA.addConnectionListener(waitForConnection);
    serviceB.addConnectionListener(waitForConnection);
    serviceA.connectToNode(nodeB);
    serviceA.connectToNode(nodeA);
    serviceB.connectToNode(nodeA);
    serviceB.connectToNode(nodeB);
    assertThat("failed to wait for all nodes to connect", latch.await(5, TimeUnit.SECONDS), equalTo(true));
    serviceA.removeConnectionListener(waitForConnection);
    serviceB.removeConnectionListener(waitForConnection);
}
Also used : DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) HierarchyCircuitBreakerService(org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService) TestThreadPool(org.elasticsearch.threadpool.TestThreadPool) CountDownLatch(java.util.concurrent.CountDownLatch) Settings(org.elasticsearch.common.settings.Settings) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) TransportConnectionListener(org.elasticsearch.transport.TransportConnectionListener) Before(org.junit.Before)

Example 28 with ByteSizeValue

use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.

the class IndexWithShadowReplicasIT method testIndexWithFewDocuments.

@TestLogging("org.elasticsearch.gateway:TRACE")
public void testIndexWithFewDocuments() throws Exception {
    final Path dataPath = createTempDir();
    Settings nodeSettings = nodeSettings(dataPath);
    internalCluster().startNodes(3, nodeSettings);
    final String IDX = "test";
    Settings idxSettings = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2).put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)).put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()).put(IndexMetaData.SETTING_SHADOW_REPLICAS, true).put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true).build();
    prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get();
    ensureGreen(IDX);
    // So basically, the primary should fail and the replica will need to
    // replay the translog, this is what this tests
    client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get();
    client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get();
    IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats(IDX).clear().setTranslog(true).get();
    assertEquals(2, indicesStatsResponse.getIndex(IDX).getPrimaries().getTranslog().estimatedNumberOfOperations());
    assertEquals(2, indicesStatsResponse.getIndex(IDX).getTotal().getTranslog().estimatedNumberOfOperations());
    Index index = resolveIndex(IDX);
    for (IndicesService service : internalCluster().getInstances(IndicesService.class)) {
        IndexService indexService = service.indexService(index);
        if (indexService != null) {
            IndexShard shard = indexService.getShard(0);
            TranslogStats translogStats = shard.translogStats();
            assertTrue(translogStats != null || shard instanceof ShadowIndexShard);
            if (translogStats != null) {
                assertEquals(2, translogStats.estimatedNumberOfOperations());
            }
        }
    }
    // Check that we can get doc 1 and 2, because we are doing realtime
    // gets and getting from the primary
    GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").get();
    GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").get();
    assertThat(gResp1.getSource().get("foo"), equalTo("bar"));
    assertThat(gResp2.getSource().get("foo"), equalTo("bar"));
    flushAndRefresh(IDX);
    client().prepareIndex(IDX, "doc", "3").setSource("foo", "bar").get();
    client().prepareIndex(IDX, "doc", "4").setSource("foo", "bar").get();
    refresh();
    // Check that we can get doc 1 and 2 without realtime
    gResp1 = client().prepareGet(IDX, "doc", "1").setRealtime(false).get();
    gResp2 = client().prepareGet(IDX, "doc", "2").setRealtime(false).get();
    assertThat(gResp1.getSource().get("foo"), equalTo("bar"));
    assertThat(gResp2.getSource().get("foo"), equalTo("bar"));
    logger.info("--> restarting all nodes");
    if (randomBoolean()) {
        logger.info("--> rolling restart");
        internalCluster().rollingRestart();
    } else {
        logger.info("--> full restart");
        internalCluster().fullRestart();
    }
    client().admin().cluster().prepareHealth().setWaitForNodes("3").get();
    ensureGreen(IDX);
    flushAndRefresh(IDX);
    logger.info("--> performing query");
    SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get();
    assertHitCount(resp, 4);
    logger.info("--> deleting index");
    assertAcked(client().admin().indices().prepareDelete(IDX));
}
Also used : Path(java.nio.file.Path) IndicesStatsResponse(org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse) ShadowIndexShard(org.elasticsearch.index.shard.ShadowIndexShard) IndexShard(org.elasticsearch.index.shard.IndexShard) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) TranslogStats(org.elasticsearch.index.translog.TranslogStats) IndicesService(org.elasticsearch.indices.IndicesService) ShadowIndexShard(org.elasticsearch.index.shard.ShadowIndexShard) GetResponse(org.elasticsearch.action.get.GetResponse) SearchResponse(org.elasticsearch.action.search.SearchResponse) Settings(org.elasticsearch.common.settings.Settings) TestLogging(org.elasticsearch.test.junit.annotations.TestLogging)

Example 29 with ByteSizeValue

use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.

the class CorruptedFileIT method testReplicaCorruption.

/**
     * This test verifies that if we corrupt a replica, we can still get to green, even though
     * listing its store fails. Note, we need to make sure that replicas are allocated on all data
     * nodes, so that replica won't be sneaky and allocated on a node that doesn't have a corrupted
     * replica.
     */
public void testReplicaCorruption() throws Exception {
    int numDocs = scaledRandomIntBetween(100, 1000);
    internalCluster().ensureAtLeastNumDataNodes(2);
    assertAcked(prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, cluster().numDataNodes() - 1).put(MergePolicyConfig.INDEX_MERGE_ENABLED, false).put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), // no checkindex - we corrupt shards on purpose
    false).put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), // no translog based flush - it might change the .liv / segments.N files
    new ByteSizeValue(1, ByteSizeUnit.PB))));
    ensureGreen();
    IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
    for (int i = 0; i < builders.length; i++) {
        builders[i] = client().prepareIndex("test", "type").setSource("field", "value");
    }
    indexRandom(true, builders);
    ensureGreen();
    assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).execute().actionGet());
    // we have to flush at least once here since we don't corrupt the translog
    SearchResponse countResponse = client().prepareSearch().setSize(0).get();
    assertHitCount(countResponse, numDocs);
    // disable allocations of replicas post restart (the restart will change replicas to primaries, so we have
    // to capture replicas post restart)
    assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "primaries")));
    internalCluster().fullRestart();
    ensureYellow();
    final Index index = resolveIndex("test");
    final IndicesShardStoresResponse stores = client().admin().indices().prepareShardStores(index.getName()).get();
    for (IntObjectCursor<List<IndicesShardStoresResponse.StoreStatus>> shards : stores.getStoreStatuses().get(index.getName())) {
        for (IndicesShardStoresResponse.StoreStatus store : shards.value) {
            final ShardId shardId = new ShardId(index, shards.key);
            if (store.getAllocationStatus().equals(IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED)) {
                for (Path path : findFilesToCorruptOnNode(store.getNode().getName(), shardId)) {
                    try (OutputStream os = Files.newOutputStream(path)) {
                        os.write(0);
                    }
                    logger.info("corrupting file {} on node {}", path, store.getNode().getName());
                }
            }
        }
    }
    // enable allocation
    assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder().putNull(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey())));
    ensureGreen();
}
Also used : Path(java.nio.file.Path) IndicesShardStoresResponse(org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse) OutputStream(java.io.OutputStream) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) CheckIndex(org.apache.lucene.index.CheckIndex) Index(org.elasticsearch.index.Index) SearchResponse(org.elasticsearch.action.search.SearchResponse) IndexRequestBuilder(org.elasticsearch.action.index.IndexRequestBuilder) ShardId(org.elasticsearch.index.shard.ShardId) List(java.util.List) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) CollectionUtils.iterableAsArrayList(org.elasticsearch.common.util.CollectionUtils.iterableAsArrayList)

Example 30 with ByteSizeValue

use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.

the class CorruptedFileIT method testCorruptFileAndRecover.

/**
     * Tests that we can actually recover from a corruption on the primary given that we have replica shards around.
     */
public void testCorruptFileAndRecover() throws ExecutionException, InterruptedException, IOException {
    int numDocs = scaledRandomIntBetween(100, 1000);
    // have enough space for 3 copies
    internalCluster().ensureAtLeastNumDataNodes(3);
    if (cluster().numDataNodes() == 3) {
        logger.info("--> cluster has [3] data nodes, corrupted primary will be overwritten");
    }
    assertThat(cluster().numDataNodes(), greaterThanOrEqualTo(3));
    assertAcked(prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1").put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1").put(MergePolicyConfig.INDEX_MERGE_ENABLED, false).put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), // no checkindex - we corrupt shards on purpose
    false).put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), // no translog based flush - it might change the .liv / segments.N files
    new ByteSizeValue(1, ByteSizeUnit.PB))));
    ensureGreen();
    disableAllocation("test");
    IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
    for (int i = 0; i < builders.length; i++) {
        builders[i] = client().prepareIndex("test", "type").setSource("field", "value");
    }
    indexRandom(true, builders);
    ensureGreen();
    assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).execute().actionGet());
    // we have to flush at least once here since we don't corrupt the translog
    SearchResponse countResponse = client().prepareSearch().setSize(0).get();
    assertHitCount(countResponse, numDocs);
    final int numShards = numShards("test");
    ShardRouting corruptedShardRouting = corruptRandomPrimaryFile();
    logger.info("--> {} corrupted", corruptedShardRouting);
    enableAllocation("test");
    /*
         * we corrupted the primary shard - now lets make sure we never recover from it successfully
         */
    Settings build = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "2").build();
    client().admin().indices().prepareUpdateSettings("test").setSettings(build).get();
    ClusterHealthResponse health = client().admin().cluster().health(Requests.clusterHealthRequest("test").waitForGreenStatus().timeout(// sometimes due to cluster rebalacing and random settings default timeout is just not enough.
    "5m").waitForNoRelocatingShards(true)).actionGet();
    if (health.isTimedOut()) {
        logger.info("cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState(), client().admin().cluster().preparePendingClusterTasks().get());
        assertThat("timed out waiting for green state", health.isTimedOut(), equalTo(false));
    }
    assertThat(health.getStatus(), equalTo(ClusterHealthStatus.GREEN));
    final int numIterations = scaledRandomIntBetween(5, 20);
    for (int i = 0; i < numIterations; i++) {
        SearchResponse response = client().prepareSearch().setSize(numDocs).get();
        assertHitCount(response, numDocs);
    }
    /*
         * now hook into the IndicesService and register a close listener to
         * run the checkindex. if the corruption is still there we will catch it.
         */
    // primary + 2 replicas
    final CountDownLatch latch = new CountDownLatch(numShards * 3);
    final CopyOnWriteArrayList<Exception> exception = new CopyOnWriteArrayList<>();
    final IndexEventListener listener = new IndexEventListener() {

        @Override
        public void afterIndexShardClosed(ShardId sid, @Nullable IndexShard indexShard, Settings indexSettings) {
            if (indexShard != null) {
                Store store = indexShard.store();
                store.incRef();
                try {
                    if (!Lucene.indexExists(store.directory()) && indexShard.state() == IndexShardState.STARTED) {
                        return;
                    }
                    try (CheckIndex checkIndex = new CheckIndex(store.directory())) {
                        BytesStreamOutput os = new BytesStreamOutput();
                        PrintStream out = new PrintStream(os, false, StandardCharsets.UTF_8.name());
                        checkIndex.setInfoStream(out);
                        out.flush();
                        CheckIndex.Status status = checkIndex.checkIndex();
                        if (!status.clean) {
                            logger.warn("check index [failure]\n{}", os.bytes().utf8ToString());
                            throw new IOException("index check failure");
                        }
                    }
                } catch (Exception e) {
                    exception.add(e);
                } finally {
                    store.decRef();
                    latch.countDown();
                }
            }
        }
    };
    for (MockIndexEventListener.TestEventListener eventListener : internalCluster().getDataNodeInstances(MockIndexEventListener.TestEventListener.class)) {
        eventListener.setNewDelegate(listener);
    }
    try {
        client().admin().indices().prepareDelete("test").get();
        latch.await();
        assertThat(exception, empty());
    } finally {
        for (MockIndexEventListener.TestEventListener eventListener : internalCluster().getDataNodeInstances(MockIndexEventListener.TestEventListener.class)) {
            eventListener.setNewDelegate(null);
        }
    }
}
Also used : MockIndexEventListener(org.elasticsearch.test.MockIndexEventListener) PrintStream(java.io.PrintStream) ClusterHealthResponse(org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse) IndexShard(org.elasticsearch.index.shard.IndexShard) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) MockFSIndexStore(org.elasticsearch.test.store.MockFSIndexStore) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) TransportException(org.elasticsearch.transport.TransportException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) BytesStreamOutput(org.elasticsearch.common.io.stream.BytesStreamOutput) SearchResponse(org.elasticsearch.action.search.SearchResponse) IndexRequestBuilder(org.elasticsearch.action.index.IndexRequestBuilder) ShardId(org.elasticsearch.index.shard.ShardId) MockIndexEventListener(org.elasticsearch.test.MockIndexEventListener) IndexEventListener(org.elasticsearch.index.shard.IndexEventListener) ShardRouting(org.elasticsearch.cluster.routing.ShardRouting) Settings(org.elasticsearch.common.settings.Settings) IndexSettings(org.elasticsearch.index.IndexSettings) Nullable(org.elasticsearch.common.Nullable) CheckIndex(org.apache.lucene.index.CheckIndex) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList)

Aggregations

ByteSizeValue (org.elasticsearch.common.unit.ByteSizeValue)146 Settings (org.elasticsearch.common.settings.Settings)23 Test (org.junit.Test)21 IOException (java.io.IOException)16 CountDownLatch (java.util.concurrent.CountDownLatch)13 ArrayList (java.util.ArrayList)11 TimeValue (org.elasticsearch.common.unit.TimeValue)11 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)9 Matchers.containsString (org.hamcrest.Matchers.containsString)9 List (java.util.List)8 AtomicReference (java.util.concurrent.atomic.AtomicReference)8 Path (java.nio.file.Path)7 Translog (org.elasticsearch.index.translog.Translog)7 Arrays (java.util.Arrays)6 Collections (java.util.Collections)6 Collectors (java.util.stream.Collectors)6 BulkProcessor (org.elasticsearch.action.bulk.BulkProcessor)6 BulkRequest (org.elasticsearch.action.bulk.BulkRequest)6 BytesArray (org.elasticsearch.common.bytes.BytesArray)6 Matchers.equalTo (org.hamcrest.Matchers.equalTo)6