Search in sources :

Example 56 with BytesStreamOutput

use of org.elasticsearch.common.io.stream.BytesStreamOutput in project elasticsearch by elastic.

the class CorruptedFileIT method testCorruptFileAndRecover.

/**
     * Tests that we can actually recover from a corruption on the primary given that we have replica shards around.
     */
public void testCorruptFileAndRecover() throws ExecutionException, InterruptedException, IOException {
    int numDocs = scaledRandomIntBetween(100, 1000);
    // have enough space for 3 copies
    internalCluster().ensureAtLeastNumDataNodes(3);
    if (cluster().numDataNodes() == 3) {
        logger.info("--> cluster has [3] data nodes, corrupted primary will be overwritten");
    }
    assertThat(cluster().numDataNodes(), greaterThanOrEqualTo(3));
    assertAcked(prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1").put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1").put(MergePolicyConfig.INDEX_MERGE_ENABLED, false).put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), // no checkindex - we corrupt shards on purpose
    false).put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), // no translog based flush - it might change the .liv / segments.N files
    new ByteSizeValue(1, ByteSizeUnit.PB))));
    ensureGreen();
    disableAllocation("test");
    IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
    for (int i = 0; i < builders.length; i++) {
        builders[i] = client().prepareIndex("test", "type").setSource("field", "value");
    }
    indexRandom(true, builders);
    ensureGreen();
    assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).execute().actionGet());
    // we have to flush at least once here since we don't corrupt the translog
    SearchResponse countResponse = client().prepareSearch().setSize(0).get();
    assertHitCount(countResponse, numDocs);
    final int numShards = numShards("test");
    ShardRouting corruptedShardRouting = corruptRandomPrimaryFile();
    logger.info("--> {} corrupted", corruptedShardRouting);
    enableAllocation("test");
    /*
         * we corrupted the primary shard - now lets make sure we never recover from it successfully
         */
    Settings build = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "2").build();
    client().admin().indices().prepareUpdateSettings("test").setSettings(build).get();
    ClusterHealthResponse health = client().admin().cluster().health(Requests.clusterHealthRequest("test").waitForGreenStatus().timeout(// sometimes due to cluster rebalacing and random settings default timeout is just not enough.
    "5m").waitForNoRelocatingShards(true)).actionGet();
    if (health.isTimedOut()) {
        logger.info("cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState(), client().admin().cluster().preparePendingClusterTasks().get());
        assertThat("timed out waiting for green state", health.isTimedOut(), equalTo(false));
    }
    assertThat(health.getStatus(), equalTo(ClusterHealthStatus.GREEN));
    final int numIterations = scaledRandomIntBetween(5, 20);
    for (int i = 0; i < numIterations; i++) {
        SearchResponse response = client().prepareSearch().setSize(numDocs).get();
        assertHitCount(response, numDocs);
    }
    /*
         * now hook into the IndicesService and register a close listener to
         * run the checkindex. if the corruption is still there we will catch it.
         */
    // primary + 2 replicas
    final CountDownLatch latch = new CountDownLatch(numShards * 3);
    final CopyOnWriteArrayList<Exception> exception = new CopyOnWriteArrayList<>();
    final IndexEventListener listener = new IndexEventListener() {

        @Override
        public void afterIndexShardClosed(ShardId sid, @Nullable IndexShard indexShard, Settings indexSettings) {
            if (indexShard != null) {
                Store store = indexShard.store();
                store.incRef();
                try {
                    if (!Lucene.indexExists(store.directory()) && indexShard.state() == IndexShardState.STARTED) {
                        return;
                    }
                    try (CheckIndex checkIndex = new CheckIndex(store.directory())) {
                        BytesStreamOutput os = new BytesStreamOutput();
                        PrintStream out = new PrintStream(os, false, StandardCharsets.UTF_8.name());
                        checkIndex.setInfoStream(out);
                        out.flush();
                        CheckIndex.Status status = checkIndex.checkIndex();
                        if (!status.clean) {
                            logger.warn("check index [failure]\n{}", os.bytes().utf8ToString());
                            throw new IOException("index check failure");
                        }
                    }
                } catch (Exception e) {
                    exception.add(e);
                } finally {
                    store.decRef();
                    latch.countDown();
                }
            }
        }
    };
    for (MockIndexEventListener.TestEventListener eventListener : internalCluster().getDataNodeInstances(MockIndexEventListener.TestEventListener.class)) {
        eventListener.setNewDelegate(listener);
    }
    try {
        client().admin().indices().prepareDelete("test").get();
        latch.await();
        assertThat(exception, empty());
    } finally {
        for (MockIndexEventListener.TestEventListener eventListener : internalCluster().getDataNodeInstances(MockIndexEventListener.TestEventListener.class)) {
            eventListener.setNewDelegate(null);
        }
    }
}
Also used : MockIndexEventListener(org.elasticsearch.test.MockIndexEventListener) PrintStream(java.io.PrintStream) ClusterHealthResponse(org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse) IndexShard(org.elasticsearch.index.shard.IndexShard) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) MockFSIndexStore(org.elasticsearch.test.store.MockFSIndexStore) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) TransportException(org.elasticsearch.transport.TransportException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) BytesStreamOutput(org.elasticsearch.common.io.stream.BytesStreamOutput) SearchResponse(org.elasticsearch.action.search.SearchResponse) IndexRequestBuilder(org.elasticsearch.action.index.IndexRequestBuilder) ShardId(org.elasticsearch.index.shard.ShardId) MockIndexEventListener(org.elasticsearch.test.MockIndexEventListener) IndexEventListener(org.elasticsearch.index.shard.IndexEventListener) ShardRouting(org.elasticsearch.cluster.routing.ShardRouting) Settings(org.elasticsearch.common.settings.Settings) IndexSettings(org.elasticsearch.index.IndexSettings) Nullable(org.elasticsearch.common.Nullable) CheckIndex(org.apache.lucene.index.CheckIndex) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList)

Example 57 with BytesStreamOutput

use of org.elasticsearch.common.io.stream.BytesStreamOutput in project elasticsearch by elastic.

the class IndexShardTests method testShardStats.

public void testShardStats() throws IOException {
    IndexShard shard = newStartedShard();
    ShardStats stats = new ShardStats(shard.routingEntry(), shard.shardPath(), new CommonStats(new IndicesQueryCache(Settings.EMPTY), shard, new CommonStatsFlags()), shard.commitStats(), shard.seqNoStats());
    assertEquals(shard.shardPath().getRootDataPath().toString(), stats.getDataPath());
    assertEquals(shard.shardPath().getRootStatePath().toString(), stats.getStatePath());
    assertEquals(shard.shardPath().isCustomDataPath(), stats.isCustomDataPath());
    if (randomBoolean() || true) {
        // try to serialize it to ensure values survive the serialization
        BytesStreamOutput out = new BytesStreamOutput();
        stats.writeTo(out);
        StreamInput in = out.bytes().streamInput();
        stats = ShardStats.readShardStats(in);
    }
    XContentBuilder builder = jsonBuilder();
    builder.startObject();
    stats.toXContent(builder, EMPTY_PARAMS);
    builder.endObject();
    String xContent = builder.string();
    StringBuilder expectedSubSequence = new StringBuilder("\"shard_path\":{\"state_path\":\"");
    expectedSubSequence.append(shard.shardPath().getRootStatePath().toString());
    expectedSubSequence.append("\",\"data_path\":\"");
    expectedSubSequence.append(shard.shardPath().getRootDataPath().toString());
    expectedSubSequence.append("\",\"is_custom_data_path\":").append(shard.shardPath().isCustomDataPath()).append("}");
    if (Constants.WINDOWS) {
    // Some path weirdness on windows
    } else {
        assertTrue(xContent.contains(expectedSubSequence));
    }
    closeShards(shard);
}
Also used : ShardStats(org.elasticsearch.action.admin.indices.stats.ShardStats) IndicesQueryCache(org.elasticsearch.indices.IndicesQueryCache) CommonStats(org.elasticsearch.action.admin.indices.stats.CommonStats) CommonStatsFlags(org.elasticsearch.action.admin.indices.stats.CommonStatsFlags) StreamInput(org.elasticsearch.common.io.stream.StreamInput) Matchers.containsString(org.hamcrest.Matchers.containsString) BytesStreamOutput(org.elasticsearch.common.io.stream.BytesStreamOutput) XContentBuilder(org.elasticsearch.common.xcontent.XContentBuilder)

Example 58 with BytesStreamOutput

use of org.elasticsearch.common.io.stream.BytesStreamOutput in project elasticsearch by elastic.

the class TranslogTests method testTranslogOpSerialization.

public void testTranslogOpSerialization() throws Exception {
    BytesReference B_1 = new BytesArray(new byte[] { 1 });
    SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID();
    assert Version.CURRENT.major <= 6 : "Using UNASSIGNED_SEQ_NO can be removed in 7.0, because 6.0+ nodes have actual sequence numbers";
    long randomSeqNum = randomBoolean() ? SequenceNumbersService.UNASSIGNED_SEQ_NO : randomNonNegativeLong();
    long randomPrimaryTerm = randomBoolean() ? 0 : randomNonNegativeLong();
    seqID.seqNo.setLongValue(randomSeqNum);
    seqID.seqNoDocValue.setLongValue(randomSeqNum);
    seqID.primaryTerm.setLongValue(randomPrimaryTerm);
    Field uidField = new Field("_uid", Uid.createUid("test", "1"), UidFieldMapper.Defaults.FIELD_TYPE);
    Field versionField = new NumericDocValuesField("_version", 1);
    Document document = new Document();
    document.add(new TextField("value", "test", Field.Store.YES));
    document.add(uidField);
    document.add(versionField);
    document.add(seqID.seqNo);
    document.add(seqID.seqNoDocValue);
    document.add(seqID.primaryTerm);
    ParsedDocument doc = new ParsedDocument(versionField, seqID, "1", "type", null, Arrays.asList(document), B_1, XContentType.JSON, null);
    Engine.Index eIndex = new Engine.Index(newUid(doc), doc, randomSeqNum, randomPrimaryTerm, 1, VersionType.INTERNAL, Origin.PRIMARY, 0, 0, false);
    Engine.IndexResult eIndexResult = new Engine.IndexResult(1, randomSeqNum, true);
    Translog.Index index = new Translog.Index(eIndex, eIndexResult);
    BytesStreamOutput out = new BytesStreamOutput();
    index.writeTo(out);
    StreamInput in = out.bytes().streamInput();
    Translog.Index serializedIndex = new Translog.Index(in);
    assertEquals(index, serializedIndex);
    Engine.Delete eDelete = new Engine.Delete(doc.type(), doc.id(), newUid(doc), randomSeqNum, randomPrimaryTerm, 2, VersionType.INTERNAL, Origin.PRIMARY, 0);
    Engine.DeleteResult eDeleteResult = new Engine.DeleteResult(2, randomSeqNum, true);
    Translog.Delete delete = new Translog.Delete(eDelete, eDeleteResult);
    out = new BytesStreamOutput();
    delete.writeTo(out);
    in = out.bytes().streamInput();
    Translog.Delete serializedDelete = new Translog.Delete(in);
    assertEquals(delete, serializedDelete);
}
Also used : BytesReference(org.elasticsearch.common.bytes.BytesReference) SeqNoFieldMapper(org.elasticsearch.index.mapper.SeqNoFieldMapper) BytesArray(org.elasticsearch.common.bytes.BytesArray) Document(org.elasticsearch.index.mapper.ParseContext.Document) ParsedDocument(org.elasticsearch.index.mapper.ParsedDocument) BytesStreamOutput(org.elasticsearch.common.io.stream.BytesStreamOutput) NumericDocValuesField(org.apache.lucene.document.NumericDocValuesField) Field(org.apache.lucene.document.Field) TextField(org.apache.lucene.document.TextField) NumericDocValuesField(org.apache.lucene.document.NumericDocValuesField) ParsedDocument(org.elasticsearch.index.mapper.ParsedDocument) StreamInput(org.elasticsearch.common.io.stream.StreamInput) TextField(org.apache.lucene.document.TextField) Engine(org.elasticsearch.index.engine.Engine)

Example 59 with BytesStreamOutput

use of org.elasticsearch.common.io.stream.BytesStreamOutput in project elasticsearch by elastic.

the class TranslogTests method stats.

protected TranslogStats stats() throws IOException {
    // force flushing and updating of stats
    translog.sync();
    TranslogStats stats = translog.stats();
    if (randomBoolean()) {
        BytesStreamOutput out = new BytesStreamOutput();
        stats.writeTo(out);
        StreamInput in = out.bytes().streamInput();
        stats = new TranslogStats();
        stats.readFrom(in);
    }
    return stats;
}
Also used : StreamInput(org.elasticsearch.common.io.stream.StreamInput) BytesStreamOutput(org.elasticsearch.common.io.stream.BytesStreamOutput)

Example 60 with BytesStreamOutput

use of org.elasticsearch.common.io.stream.BytesStreamOutput in project elasticsearch by elastic.

the class TermsLookupTests method testSerialization.

public void testSerialization() throws IOException {
    TermsLookup termsLookup = randomTermsLookup();
    try (BytesStreamOutput output = new BytesStreamOutput()) {
        termsLookup.writeTo(output);
        try (StreamInput in = output.bytes().streamInput()) {
            TermsLookup deserializedLookup = new TermsLookup(in);
            assertEquals(deserializedLookup, termsLookup);
            assertEquals(deserializedLookup.hashCode(), termsLookup.hashCode());
            assertNotSame(deserializedLookup, termsLookup);
        }
    }
}
Also used : StreamInput(org.elasticsearch.common.io.stream.StreamInput) TermsLookup(org.elasticsearch.indices.TermsLookup) BytesStreamOutput(org.elasticsearch.common.io.stream.BytesStreamOutput)

Aggregations

BytesStreamOutput (org.elasticsearch.common.io.stream.BytesStreamOutput)222 StreamInput (org.elasticsearch.common.io.stream.StreamInput)147 Test (org.junit.Test)45 CrateUnitTest (io.crate.test.integration.CrateUnitTest)36 NamedWriteableAwareStreamInput (org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput)24 IOException (java.io.IOException)21 BytesArray (org.elasticsearch.common.bytes.BytesArray)21 BytesReference (org.elasticsearch.common.bytes.BytesReference)18 Version (org.elasticsearch.Version)15 XContentBuilder (org.elasticsearch.common.xcontent.XContentBuilder)14 ArrayList (java.util.ArrayList)12 BytesRef (org.apache.lucene.util.BytesRef)11 Map (java.util.Map)10 UUID (java.util.UUID)9 Symbol (io.crate.analyze.symbol.Symbol)8 HashMap (java.util.HashMap)8 NamedWriteableRegistry (org.elasticsearch.common.io.stream.NamedWriteableRegistry)8 DiscoveryNode (org.elasticsearch.cluster.node.DiscoveryNode)7 StreamOutput (org.elasticsearch.common.io.stream.StreamOutput)7 List (java.util.List)6