Search in sources :

Example 1 with Nullable

use of org.elasticsearch.common.Nullable in project elasticsearch by elastic.

the class CorruptedFileIT method testCorruptFileAndRecover.

/**
     * Tests that we can actually recover from a corruption on the primary given that we have replica shards around.
     */
public void testCorruptFileAndRecover() throws ExecutionException, InterruptedException, IOException {
    int numDocs = scaledRandomIntBetween(100, 1000);
    // have enough space for 3 copies
    internalCluster().ensureAtLeastNumDataNodes(3);
    if (cluster().numDataNodes() == 3) {
        logger.info("--> cluster has [3] data nodes, corrupted primary will be overwritten");
    }
    assertThat(cluster().numDataNodes(), greaterThanOrEqualTo(3));
    assertAcked(prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1").put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1").put(MergePolicyConfig.INDEX_MERGE_ENABLED, false).put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), // no checkindex - we corrupt shards on purpose
    false).put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), // no translog based flush - it might change the .liv / segments.N files
    new ByteSizeValue(1, ByteSizeUnit.PB))));
    ensureGreen();
    disableAllocation("test");
    IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
    for (int i = 0; i < builders.length; i++) {
        builders[i] = client().prepareIndex("test", "type").setSource("field", "value");
    }
    indexRandom(true, builders);
    ensureGreen();
    assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).execute().actionGet());
    // we have to flush at least once here since we don't corrupt the translog
    SearchResponse countResponse = client().prepareSearch().setSize(0).get();
    assertHitCount(countResponse, numDocs);
    final int numShards = numShards("test");
    ShardRouting corruptedShardRouting = corruptRandomPrimaryFile();
    logger.info("--> {} corrupted", corruptedShardRouting);
    enableAllocation("test");
    /*
         * we corrupted the primary shard - now lets make sure we never recover from it successfully
         */
    Settings build = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "2").build();
    client().admin().indices().prepareUpdateSettings("test").setSettings(build).get();
    ClusterHealthResponse health = client().admin().cluster().health(Requests.clusterHealthRequest("test").waitForGreenStatus().timeout(// sometimes due to cluster rebalacing and random settings default timeout is just not enough.
    "5m").waitForNoRelocatingShards(true)).actionGet();
    if (health.isTimedOut()) {
        logger.info("cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState(), client().admin().cluster().preparePendingClusterTasks().get());
        assertThat("timed out waiting for green state", health.isTimedOut(), equalTo(false));
    }
    assertThat(health.getStatus(), equalTo(ClusterHealthStatus.GREEN));
    final int numIterations = scaledRandomIntBetween(5, 20);
    for (int i = 0; i < numIterations; i++) {
        SearchResponse response = client().prepareSearch().setSize(numDocs).get();
        assertHitCount(response, numDocs);
    }
    /*
         * now hook into the IndicesService and register a close listener to
         * run the checkindex. if the corruption is still there we will catch it.
         */
    // primary + 2 replicas
    final CountDownLatch latch = new CountDownLatch(numShards * 3);
    final CopyOnWriteArrayList<Exception> exception = new CopyOnWriteArrayList<>();
    final IndexEventListener listener = new IndexEventListener() {

        @Override
        public void afterIndexShardClosed(ShardId sid, @Nullable IndexShard indexShard, Settings indexSettings) {
            if (indexShard != null) {
                Store store = indexShard.store();
                store.incRef();
                try {
                    if (!Lucene.indexExists(store.directory()) && indexShard.state() == IndexShardState.STARTED) {
                        return;
                    }
                    try (CheckIndex checkIndex = new CheckIndex(store.directory())) {
                        BytesStreamOutput os = new BytesStreamOutput();
                        PrintStream out = new PrintStream(os, false, StandardCharsets.UTF_8.name());
                        checkIndex.setInfoStream(out);
                        out.flush();
                        CheckIndex.Status status = checkIndex.checkIndex();
                        if (!status.clean) {
                            logger.warn("check index [failure]\n{}", os.bytes().utf8ToString());
                            throw new IOException("index check failure");
                        }
                    }
                } catch (Exception e) {
                    exception.add(e);
                } finally {
                    store.decRef();
                    latch.countDown();
                }
            }
        }
    };
    for (MockIndexEventListener.TestEventListener eventListener : internalCluster().getDataNodeInstances(MockIndexEventListener.TestEventListener.class)) {
        eventListener.setNewDelegate(listener);
    }
    try {
        client().admin().indices().prepareDelete("test").get();
        latch.await();
        assertThat(exception, empty());
    } finally {
        for (MockIndexEventListener.TestEventListener eventListener : internalCluster().getDataNodeInstances(MockIndexEventListener.TestEventListener.class)) {
            eventListener.setNewDelegate(null);
        }
    }
}
Also used : MockIndexEventListener(org.elasticsearch.test.MockIndexEventListener) PrintStream(java.io.PrintStream) ClusterHealthResponse(org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse) IndexShard(org.elasticsearch.index.shard.IndexShard) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) MockFSIndexStore(org.elasticsearch.test.store.MockFSIndexStore) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) TransportException(org.elasticsearch.transport.TransportException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) BytesStreamOutput(org.elasticsearch.common.io.stream.BytesStreamOutput) SearchResponse(org.elasticsearch.action.search.SearchResponse) IndexRequestBuilder(org.elasticsearch.action.index.IndexRequestBuilder) ShardId(org.elasticsearch.index.shard.ShardId) MockIndexEventListener(org.elasticsearch.test.MockIndexEventListener) IndexEventListener(org.elasticsearch.index.shard.IndexEventListener) ShardRouting(org.elasticsearch.cluster.routing.ShardRouting) Settings(org.elasticsearch.common.settings.Settings) IndexSettings(org.elasticsearch.index.IndexSettings) Nullable(org.elasticsearch.common.Nullable) CheckIndex(org.apache.lucene.index.CheckIndex) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList)

Example 2 with Nullable

use of org.elasticsearch.common.Nullable in project crate by crate.

the class PluginLoader method findImplementations.

@Nullable
private Collection<Class<? extends Plugin>> findImplementations(Collection<URL> pluginUrls) {
    URL[] urls = pluginUrls.toArray(new URL[pluginUrls.size()]);
    ClassLoader loader = URLClassLoader.newInstance(urls, getClass().getClassLoader());
    ResourceFinder finder = new ResourceFinder(RESOURCE_PATH, loader, urls);
    try {
        return finder.findAllImplementations(Plugin.class);
    } catch (ClassCastException e) {
        logger.error("plugin does not implement io.crate.Plugin interface", e);
    } catch (ClassNotFoundException e) {
        logger.error("error while loading plugin, misconfigured plugin", e);
    } catch (Throwable t) {
        logger.error("error while loading plugins", t);
    }
    return null;
}
Also used : ResourceFinder(org.apache.xbean.finder.ResourceFinder) URLClassLoader(java.net.URLClassLoader) URL(java.net.URL) Nullable(org.elasticsearch.common.Nullable)

Example 3 with Nullable

use of org.elasticsearch.common.Nullable in project crate by crate.

the class PartitionName method encodeIdent.

@Nullable
public static String encodeIdent(Collection<? extends BytesRef> values) {
    if (values.size() == 0) {
        return null;
    }
    BytesStreamOutput streamOutput = new BytesStreamOutput(estimateSize(values));
    try {
        streamOutput.writeVInt(values.size());
        for (BytesRef value : values) {
            StringType.INSTANCE.streamer().writeValueTo(streamOutput, value);
        }
    } catch (IOException e) {
        throw Throwables.propagate(e);
    }
    String identBase32 = BASE32.encodeAsString(streamOutput.bytes().toBytes()).toLowerCase(Locale.ROOT);
    // decode doesn't need padding, remove it
    int idx = identBase32.indexOf('=');
    if (idx > -1) {
        return identBase32.substring(0, idx);
    }
    return identBase32;
}
Also used : IOException(java.io.IOException) BytesStreamOutput(org.elasticsearch.common.io.stream.BytesStreamOutput) BytesRef(org.apache.lucene.util.BytesRef) Nullable(org.elasticsearch.common.Nullable)

Example 4 with Nullable

use of org.elasticsearch.common.Nullable in project crate by crate.

the class NodeStatsCollectSource method nodeIds.

@Nullable
static Collection<DiscoveryNode> nodeIds(WhereClause whereClause, Collection<DiscoveryNode> nodes, Functions functions) {
    if (!whereClause.hasQuery()) {
        return nodes;
    }
    LocalSysColReferenceResolver localSysColReferenceResolver = new LocalSysColReferenceResolver(ImmutableList.of(SysNodesTableInfo.Columns.NAME, SysNodesTableInfo.Columns.ID));
    EvaluatingNormalizer normalizer = new EvaluatingNormalizer(functions, RowGranularity.DOC, ReplaceMode.COPY, localSysColReferenceResolver, null);
    List<DiscoveryNode> newNodes = new ArrayList<>();
    for (DiscoveryNode node : nodes) {
        String nodeId = node.getId();
        for (RowCollectExpression<NodeStatsContext, ?> expression : localSysColReferenceResolver.expressions()) {
            expression.setNextRow(new NodeStatsContext(nodeId, node.name()));
        }
        Symbol normalized = normalizer.normalize(whereClause.query(), null);
        if (normalized.equals(whereClause.query())) {
            // No local available sys nodes columns in where clause
            return nodes;
        }
        if (WhereClause.canMatch(normalized)) {
            newNodes.add(node);
        }
    }
    return newNodes;
}
Also used : DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) EvaluatingNormalizer(io.crate.analyze.EvaluatingNormalizer) Symbol(io.crate.analyze.symbol.Symbol) ArrayList(java.util.ArrayList) NodeStatsContext(io.crate.operation.reference.sys.node.NodeStatsContext) Nullable(org.elasticsearch.common.Nullable)

Example 5 with Nullable

use of org.elasticsearch.common.Nullable in project elasticsearch-skywalker by jprante.

the class Skywalker method loadIndex.

@Nullable
private static IndexMetaData loadIndex(List<File> files, String index, NodeEnvironment nodeEnv) {
    long highestVersion = -1;
    IndexMetaData indexMetaData = null;
    for (File indexLocation : nodeEnv.indexLocations(new Index(index))) {
        File stateDir = new File(indexLocation, "_state");
        if (!stateDir.exists() || !stateDir.isDirectory()) {
            continue;
        }
        // now, iterate over the current versions, and find latest one
        File[] stateFiles = stateDir.listFiles();
        if (stateFiles == null) {
            continue;
        }
        for (File stateFile : stateFiles) {
            if (!stateFile.getName().startsWith("state-")) {
                continue;
            }
            files.add(stateFile);
            try {
                long version = Long.parseLong(stateFile.getName().substring("state-".length()));
                if (version > highestVersion) {
                    byte[] data = Streams.copyToByteArray(new FileInputStream(stateFile));
                    if (data.length == 0) {
                        continue;
                    }
                    XContentParser parser = null;
                    try {
                        parser = XContentHelper.createParser(data, 0, data.length);
                        // move to START_OBJECT
                        parser.nextToken();
                        indexMetaData = IndexMetaData.Builder.fromXContent(parser);
                        highestVersion = version;
                    } finally {
                        if (parser != null) {
                            parser.close();
                        }
                    }
                }
            } catch (Exception e) {
                continue;
            }
        }
    }
    return indexMetaData;
}
Also used : Index(org.elasticsearch.index.Index) File(java.io.File) FileInputStream(java.io.FileInputStream) XContentParser(org.elasticsearch.common.xcontent.XContentParser) ElasticsearchException(org.elasticsearch.ElasticsearchException) IOException(java.io.IOException) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) Nullable(org.elasticsearch.common.Nullable)

Aggregations

Nullable (org.elasticsearch.common.Nullable)13 IOException (java.io.IOException)8 ElasticsearchException (org.elasticsearch.ElasticsearchException)4 IndexSettings (org.elasticsearch.index.IndexSettings)4 ArrayList (java.util.ArrayList)3 IndexWriterConfig (org.apache.lucene.index.IndexWriterConfig)3 Directory (org.apache.lucene.store.Directory)3 IndexMetaData (org.elasticsearch.cluster.metadata.IndexMetaData)3 CodecService (org.elasticsearch.index.codec.CodecService)3 TranslogConfig (org.elasticsearch.index.translog.TranslogConfig)3 HashSet (java.util.HashSet)2 CountDownLatch (java.util.concurrent.CountDownLatch)2 LiveIndexWriterConfig (org.apache.lucene.index.LiveIndexWriterConfig)2 AlreadyClosedException (org.apache.lucene.store.AlreadyClosedException)2 BytesStreamOutput (org.elasticsearch.common.io.stream.BytesStreamOutput)2 Engine (org.elasticsearch.index.engine.Engine)2 DirectoryService (org.elasticsearch.index.store.DirectoryService)2 Store (org.elasticsearch.index.store.Store)2 DummyShardLock (org.elasticsearch.test.DummyShardLock)2 TestThreadPool (org.elasticsearch.threadpool.TestThreadPool)2