Search in sources :

Example 16 with Snapshot

use of org.elasticsearch.snapshots.Snapshot in project elasticsearch by elastic.

the class IndexShardTests method testRestoreShard.

public void testRestoreShard() throws IOException {
    final IndexShard source = newStartedShard(true);
    IndexShard target = newStartedShard(true);
    indexDoc(source, "test", "0");
    if (randomBoolean()) {
        source.refresh("test");
    }
    indexDoc(target, "test", "1");
    target.refresh("test");
    assertDocs(target, new Uid("test", "1"));
    // only flush source
    flushShard(source);
    final ShardRouting origRouting = target.routingEntry();
    ShardRouting routing = ShardRoutingHelper.reinitPrimary(origRouting);
    final Snapshot snapshot = new Snapshot("foo", new SnapshotId("bar", UUIDs.randomBase64UUID()));
    routing = ShardRoutingHelper.newWithRestoreSource(routing, new RecoverySource.SnapshotRecoverySource(snapshot, Version.CURRENT, "test"));
    target = reinitShard(target, routing);
    Store sourceStore = source.store();
    Store targetStore = target.store();
    DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
    target.markAsRecovering("store", new RecoveryState(routing, localNode, null));
    assertTrue(target.restoreFromRepository(new RestoreOnlyRepository("test") {

        @Override
        public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) {
            try {
                cleanLuceneIndex(targetStore.directory());
                for (String file : sourceStore.directory().listAll()) {
                    if (file.equals("write.lock") || file.startsWith("extra")) {
                        continue;
                    }
                    targetStore.directory().copyFrom(sourceStore.directory(), file, file, IOContext.DEFAULT);
                }
            } catch (Exception ex) {
                throw new RuntimeException(ex);
            }
        }
    }));
    target.updateRoutingEntry(routing.moveToStarted());
    assertDocs(target, new Uid("test", "0"));
    closeShards(source, target);
}
Also used : IndexId(org.elasticsearch.repositories.IndexId) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) Store(org.elasticsearch.index.store.Store) Matchers.containsString(org.hamcrest.Matchers.containsString) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) EngineException(org.elasticsearch.index.engine.EngineException) IOException(java.io.IOException) BrokenBarrierException(java.util.concurrent.BrokenBarrierException) ExecutionException(java.util.concurrent.ExecutionException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) Uid(org.elasticsearch.index.mapper.Uid) Snapshot(org.elasticsearch.snapshots.Snapshot) SnapshotId(org.elasticsearch.snapshots.SnapshotId) Version(org.elasticsearch.Version) TestShardRouting(org.elasticsearch.cluster.routing.TestShardRouting) ShardRouting(org.elasticsearch.cluster.routing.ShardRouting) RecoveryState(org.elasticsearch.indices.recovery.RecoveryState)

Example 17 with Snapshot

use of org.elasticsearch.snapshots.Snapshot in project elasticsearch by elastic.

the class NodeVersionAllocationDeciderTests method testRestoreDoesNotAllocateSnapshotOnOlderNodes.

public void testRestoreDoesNotAllocateSnapshotOnOlderNodes() {
    final DiscoveryNode newNode = new DiscoveryNode("newNode", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT);
    final DiscoveryNode oldNode1 = new DiscoveryNode("oldNode1", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, VersionUtils.getPreviousVersion());
    final DiscoveryNode oldNode2 = new DiscoveryNode("oldNode2", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, VersionUtils.getPreviousVersion());
    int numberOfShards = randomIntBetween(1, 3);
    final IndexMetaData.Builder indexMetaData = IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numberOfShards).numberOfReplicas(randomIntBetween(0, 3));
    for (int i = 0; i < numberOfShards; i++) {
        indexMetaData.putInSyncAllocationIds(i, Collections.singleton("_test_"));
    }
    MetaData metaData = MetaData.builder().put(indexMetaData).build();
    ClusterState state = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(RoutingTable.builder().addAsRestore(metaData.index("test"), new SnapshotRecoverySource(new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), Version.CURRENT, "test")).build()).nodes(DiscoveryNodes.builder().add(newNode).add(oldNode1).add(oldNode2)).build();
    AllocationDeciders allocationDeciders = new AllocationDeciders(Settings.EMPTY, Arrays.asList(new ReplicaAfterPrimaryActiveAllocationDecider(Settings.EMPTY), new NodeVersionAllocationDecider(Settings.EMPTY)));
    AllocationService strategy = new MockAllocationService(Settings.EMPTY, allocationDeciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
    state = strategy.reroute(state, new AllocationCommands(), true, false).getClusterState();
    // Make sure that primary shards are only allocated on the new node
    for (int i = 0; i < numberOfShards; i++) {
        assertEquals("newNode", state.routingTable().index("test").getShards().get(i).primaryShard().currentNodeId());
    }
}
Also used : TestGatewayAllocator(org.elasticsearch.test.gateway.TestGatewayAllocator) ClusterState(org.elasticsearch.cluster.ClusterState) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) BalancedShardsAllocator(org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator) AllocationDeciders(org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders) ReplicaAfterPrimaryActiveAllocationDecider(org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) AllocationCommands(org.elasticsearch.cluster.routing.allocation.command.AllocationCommands) Snapshot(org.elasticsearch.snapshots.Snapshot) SnapshotId(org.elasticsearch.snapshots.SnapshotId) SnapshotRecoverySource(org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource) MetaData(org.elasticsearch.cluster.metadata.MetaData) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) NodeVersionAllocationDecider(org.elasticsearch.cluster.routing.allocation.decider.NodeVersionAllocationDecider)

Example 18 with Snapshot

use of org.elasticsearch.snapshots.Snapshot in project elasticsearch by elastic.

the class ClusterSerializationTests method testSnapshotDeletionsInProgressSerialization.

public void testSnapshotDeletionsInProgressSerialization() throws Exception {
    boolean includeRestore = randomBoolean();
    ClusterState.Builder builder = ClusterState.builder(ClusterState.EMPTY_STATE).putCustom(SnapshotDeletionsInProgress.TYPE, SnapshotDeletionsInProgress.newInstance(new SnapshotDeletionsInProgress.Entry(new Snapshot("repo1", new SnapshotId("snap1", UUIDs.randomBase64UUID())), randomNonNegativeLong(), randomNonNegativeLong())));
    if (includeRestore) {
        builder.putCustom(RestoreInProgress.TYPE, new RestoreInProgress(new RestoreInProgress.Entry(new Snapshot("repo2", new SnapshotId("snap2", UUIDs.randomBase64UUID())), RestoreInProgress.State.STARTED, Collections.singletonList("index_name"), ImmutableOpenMap.of())));
    }
    ClusterState clusterState = builder.incrementVersion().build();
    Diff<ClusterState> diffs = clusterState.diff(ClusterState.EMPTY_STATE);
    // serialize with current version
    BytesStreamOutput outStream = new BytesStreamOutput();
    diffs.writeTo(outStream);
    StreamInput inStream = outStream.bytes().streamInput();
    inStream = new NamedWriteableAwareStreamInput(inStream, new NamedWriteableRegistry(ClusterModule.getNamedWriteables()));
    Diff<ClusterState> serializedDiffs = ClusterState.readDiffFrom(inStream, clusterState.nodes().getLocalNode());
    ClusterState stateAfterDiffs = serializedDiffs.apply(ClusterState.EMPTY_STATE);
    assertThat(stateAfterDiffs.custom(RestoreInProgress.TYPE), includeRestore ? notNullValue() : nullValue());
    assertThat(stateAfterDiffs.custom(SnapshotDeletionsInProgress.TYPE), notNullValue());
    // serialize with old version
    outStream = new BytesStreamOutput();
    outStream.setVersion(Version.CURRENT.minimumCompatibilityVersion());
    diffs.writeTo(outStream);
    inStream = outStream.bytes().streamInput();
    inStream = new NamedWriteableAwareStreamInput(inStream, new NamedWriteableRegistry(ClusterModule.getNamedWriteables()));
    serializedDiffs = ClusterState.readDiffFrom(inStream, clusterState.nodes().getLocalNode());
    stateAfterDiffs = serializedDiffs.apply(ClusterState.EMPTY_STATE);
    assertThat(stateAfterDiffs.custom(RestoreInProgress.TYPE), includeRestore ? notNullValue() : nullValue());
    assertThat(stateAfterDiffs.custom(SnapshotDeletionsInProgress.TYPE), nullValue());
    // remove the custom and try serializing again with old version
    clusterState = ClusterState.builder(clusterState).removeCustom(SnapshotDeletionsInProgress.TYPE).incrementVersion().build();
    outStream = new BytesStreamOutput();
    diffs.writeTo(outStream);
    inStream = outStream.bytes().streamInput();
    inStream = new NamedWriteableAwareStreamInput(inStream, new NamedWriteableRegistry(ClusterModule.getNamedWriteables()));
    serializedDiffs = ClusterState.readDiffFrom(inStream, clusterState.nodes().getLocalNode());
    stateAfterDiffs = serializedDiffs.apply(stateAfterDiffs);
    assertThat(stateAfterDiffs.custom(RestoreInProgress.TYPE), includeRestore ? notNullValue() : nullValue());
    assertThat(stateAfterDiffs.custom(SnapshotDeletionsInProgress.TYPE), nullValue());
}
Also used : NamedWriteableRegistry(org.elasticsearch.common.io.stream.NamedWriteableRegistry) Snapshot(org.elasticsearch.snapshots.Snapshot) SnapshotId(org.elasticsearch.snapshots.SnapshotId) ClusterState(org.elasticsearch.cluster.ClusterState) RestoreInProgress(org.elasticsearch.cluster.RestoreInProgress) NamedWriteableAwareStreamInput(org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput) StreamInput(org.elasticsearch.common.io.stream.StreamInput) NamedWriteableAwareStreamInput(org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput) BytesStreamOutput(org.elasticsearch.common.io.stream.BytesStreamOutput)

Example 19 with Snapshot

use of org.elasticsearch.snapshots.Snapshot in project elasticsearch by elastic.

the class ExceptionSerializationTests method testSnapshotException.

public void testSnapshotException() throws IOException {
    final Snapshot snapshot = new Snapshot("repo", new SnapshotId("snap", UUIDs.randomBase64UUID()));
    SnapshotException ex = serialize(new SnapshotException(snapshot, "no such snapshot", new NullPointerException()));
    assertEquals(ex.getRepositoryName(), snapshot.getRepository());
    assertEquals(ex.getSnapshotName(), snapshot.getSnapshotId().getName());
    assertEquals(ex.getMessage(), "[" + snapshot + "] no such snapshot");
    assertTrue(ex.getCause() instanceof NullPointerException);
    ex = serialize(new SnapshotException(null, "no such snapshot", new NullPointerException()));
    assertNull(ex.getRepositoryName());
    assertNull(ex.getSnapshotName());
    assertEquals(ex.getMessage(), "[_na] no such snapshot");
    assertTrue(ex.getCause() instanceof NullPointerException);
}
Also used : Snapshot(org.elasticsearch.snapshots.Snapshot) SnapshotId(org.elasticsearch.snapshots.SnapshotId) SnapshotException(org.elasticsearch.snapshots.SnapshotException)

Example 20 with Snapshot

use of org.elasticsearch.snapshots.Snapshot in project elasticsearch by elastic.

the class MetaDataDeleteIndexServiceTests method testDeleteSnapshotting.

public void testDeleteSnapshotting() {
    String index = randomAsciiOfLength(5);
    Snapshot snapshot = new Snapshot("doesn't matter", new SnapshotId("snapshot name", "snapshot uuid"));
    SnapshotsInProgress snaps = new SnapshotsInProgress(new SnapshotsInProgress.Entry(snapshot, true, false, SnapshotsInProgress.State.INIT, singletonList(new IndexId(index, "doesn't matter")), System.currentTimeMillis(), (long) randomIntBetween(0, 1000), ImmutableOpenMap.of()));
    ClusterState state = ClusterState.builder(clusterState(index)).putCustom(SnapshotsInProgress.TYPE, snaps).build();
    Exception e = expectThrows(IllegalArgumentException.class, () -> service.deleteIndices(state, singleton(state.metaData().getIndices().get(index).getIndex())));
    assertEquals("Cannot delete indices that are being snapshotted: [[" + index + "]]. Try again after snapshot finishes " + "or cancel the currently running snapshot.", e.getMessage());
}
Also used : Snapshot(org.elasticsearch.snapshots.Snapshot) SnapshotId(org.elasticsearch.snapshots.SnapshotId) IndexId(org.elasticsearch.repositories.IndexId) ClusterState(org.elasticsearch.cluster.ClusterState) SnapshotsInProgress(org.elasticsearch.cluster.SnapshotsInProgress) IndexNotFoundException(org.elasticsearch.index.IndexNotFoundException)

Aggregations

Snapshot (org.elasticsearch.snapshots.Snapshot)20 SnapshotId (org.elasticsearch.snapshots.SnapshotId)13 ClusterState (org.elasticsearch.cluster.ClusterState)9 IndexMetaData (org.elasticsearch.cluster.metadata.IndexMetaData)7 MetaData (org.elasticsearch.cluster.metadata.MetaData)7 SnapshotRecoverySource (org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource)7 RoutingTable (org.elasticsearch.cluster.routing.RoutingTable)4 ShardId (org.elasticsearch.index.shard.ShardId)4 TableIdent (io.crate.metadata.TableIdent)3 CrateUnitTest (io.crate.test.integration.CrateUnitTest)3 IOException (java.io.IOException)3 ArrayList (java.util.ArrayList)3 List (java.util.List)3 Map (java.util.Map)3 DiscoveryNode (org.elasticsearch.cluster.node.DiscoveryNode)3 SnapshotInfo (org.elasticsearch.snapshots.SnapshotInfo)3 IntHashSet (com.carrotsearch.hppc.IntHashSet)2 HashMap (java.util.HashMap)2 SnapshotsInProgress (org.elasticsearch.cluster.SnapshotsInProgress)2 ClusterBlockException (org.elasticsearch.cluster.block.ClusterBlockException)2