use of org.elasticsearch.index.engine.InternalEngineFactory in project crate by crate.
the class BlobStoreRepositoryRestoreTests method testRestoreSnapshotWithExistingFiles.
/**
* Restoring a snapshot that contains multiple files must succeed even when
* some files already exist in the shard's store.
*/
public void testRestoreSnapshotWithExistingFiles() throws IOException {
final IndexId indexId = new IndexId(randomAlphaOfLength(10), UUIDs.randomBase64UUID());
final ShardId shardId = new ShardId(indexId.getName(), indexId.getId(), 0);
IndexShard shard = newShard(shardId, true);
try {
// index documents in the shards
final int numDocs = scaledRandomIntBetween(1, 500);
recoverShardFromStore(shard);
for (int i = 0; i < numDocs; i++) {
indexDoc(shard, Integer.toString(i));
if (rarely()) {
flushShard(shard, false);
}
}
assertDocCount(shard, numDocs);
// snapshot the shard
final Repository repository = createRepository();
final Snapshot snapshot = new Snapshot(repository.getMetadata().name(), new SnapshotId(randomAlphaOfLength(10), "_uuid"));
snapshotShard(shard, snapshot, repository);
// capture current store files
final Store.MetadataSnapshot storeFiles = shard.snapshotStoreMetadata();
assertFalse(storeFiles.asMap().isEmpty());
// close the shard
closeShards(shard);
// delete some random files in the store
List<String> deletedFiles = randomSubsetOf(randomIntBetween(1, storeFiles.size() - 1), storeFiles.asMap().keySet());
for (String deletedFile : deletedFiles) {
Files.delete(shard.shardPath().resolveIndex().resolve(deletedFile));
}
// build a new shard using the same store directory as the closed shard
ShardRouting shardRouting = ShardRoutingHelper.initWithSameId(shard.routingEntry(), RecoverySource.ExistingStoreRecoverySource.INSTANCE);
shard = newShard(shardRouting, shard.shardPath(), shard.indexSettings().getIndexMetadata(), null, new InternalEngineFactory(), () -> {
}, RetentionLeaseSyncer.EMPTY, EMPTY_EVENT_LISTENER);
// restore the shard
recoverShardFromSnapshot(shard, snapshot, repository);
// check that the shard is not corrupted
TestUtil.checkIndex(shard.store().directory());
// check that all files have been restored
final Directory directory = shard.store().directory();
final List<String> directoryFiles = Arrays.asList(directory.listAll());
for (StoreFileMetadata storeFile : storeFiles) {
String fileName = storeFile.name();
assertTrue("File [" + fileName + "] does not exist in store directory", directoryFiles.contains(fileName));
assertEquals(storeFile.length(), shard.store().directory().fileLength(fileName));
}
} finally {
if (shard != null && shard.state() != IndexShardState.CLOSED) {
try {
shard.close("test", false);
} finally {
IOUtils.close(shard.store());
}
}
}
}
use of org.elasticsearch.index.engine.InternalEngineFactory in project crate by crate.
the class IndexShardRetentionLeaseTests method runExpirationTest.
private void runExpirationTest(final boolean primary) throws IOException {
final long retentionLeaseMillis = randomLongBetween(1, TimeValue.timeValueHours(12).millis());
final Settings settings = Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING.getKey(), TimeValue.timeValueMillis(retentionLeaseMillis).getStringRep()).build();
// current time is mocked through the thread pool
final IndexShard indexShard = newStartedShard(primary, settings, new InternalEngineFactory());
final long primaryTerm = indexShard.getOperationPrimaryTerm();
try {
final long[] retainingSequenceNumbers = new long[1];
retainingSequenceNumbers[0] = randomLongBetween(0, Long.MAX_VALUE);
final long initialVersion;
if (primary) {
initialVersion = 2;
indexShard.addRetentionLease("0", retainingSequenceNumbers[0], "test-0", ActionListener.wrap(() -> {
}));
} else {
initialVersion = 3;
final RetentionLeases retentionLeases = new RetentionLeases(primaryTerm, initialVersion, Arrays.asList(peerRecoveryRetentionLease(indexShard), new RetentionLease("0", retainingSequenceNumbers[0], currentTimeMillis.get(), "test-0")));
indexShard.updateRetentionLeasesOnReplica(retentionLeases);
}
{
final RetentionLeases retentionLeases = indexShard.getEngine().config().retentionLeasesSupplier().get();
assertThat(retentionLeases.version(), equalTo(initialVersion));
assertThat(retentionLeases.leases(), hasSize(2));
final RetentionLease retentionLease = retentionLeases.get("0");
assertThat(retentionLease.timestamp(), equalTo(currentTimeMillis.get()));
assertRetentionLeases(indexShard, 1, retainingSequenceNumbers, primaryTerm, initialVersion, primary, false);
}
// renew the lease
currentTimeMillis.set(currentTimeMillis.get() + randomLongBetween(0, 1024));
retainingSequenceNumbers[0] = randomLongBetween(retainingSequenceNumbers[0], Long.MAX_VALUE);
if (primary) {
indexShard.renewRetentionLease("0", retainingSequenceNumbers[0], "test-0");
} else {
final RetentionLeases retentionLeases = new RetentionLeases(primaryTerm, initialVersion + 1, Arrays.asList(peerRecoveryRetentionLease(indexShard), new RetentionLease("0", retainingSequenceNumbers[0], currentTimeMillis.get(), "test-0")));
indexShard.updateRetentionLeasesOnReplica(retentionLeases);
}
{
final RetentionLeases retentionLeases = indexShard.getEngine().config().retentionLeasesSupplier().get();
assertThat(retentionLeases.version(), equalTo(initialVersion + 1));
assertThat(retentionLeases.leases(), hasSize(2));
final RetentionLease retentionLease = retentionLeases.get("0");
assertThat(retentionLease.timestamp(), equalTo(currentTimeMillis.get()));
assertRetentionLeases(indexShard, 1, retainingSequenceNumbers, primaryTerm, initialVersion + 1, primary, false);
}
// now force the lease to expire
currentTimeMillis.set(currentTimeMillis.get() + randomLongBetween(retentionLeaseMillis, Long.MAX_VALUE - currentTimeMillis.get()));
if (primary) {
assertRetentionLeases(indexShard, 1, retainingSequenceNumbers, primaryTerm, initialVersion + 1, true, false);
assertRetentionLeases(indexShard, 0, new long[0], primaryTerm, initialVersion + 2, true, true);
} else {
assertRetentionLeases(indexShard, 1, retainingSequenceNumbers, primaryTerm, initialVersion + 1, false, false);
}
} finally {
closeShards(indexShard);
}
}
use of org.elasticsearch.index.engine.InternalEngineFactory in project crate by crate.
the class IndicesService method getEngineFactory.
private EngineFactory getEngineFactory(final IndexSettings idxSettings) {
final IndexMetadata indexMetadata = idxSettings.getIndexMetadata();
if (indexMetadata != null && indexMetadata.getState() == IndexMetadata.State.CLOSE) {
// NoOpEngine takes precedence as long as the index is closed
return NoOpEngine::new;
}
final List<Optional<EngineFactory>> engineFactories = engineFactoryProviders.stream().map(engineFactoryProvider -> engineFactoryProvider.apply(idxSettings)).filter(maybe -> Objects.requireNonNull(maybe).isPresent()).collect(Collectors.toList());
if (engineFactories.isEmpty()) {
return new InternalEngineFactory();
} else if (engineFactories.size() == 1) {
assert engineFactories.get(0).isPresent();
return engineFactories.get(0).get();
} else {
final String message = String.format(Locale.ROOT, "multiple engine factories provided for %s: %s", idxSettings.getIndex(), engineFactories.stream().map(t -> {
assert t.isPresent();
return "[" + t.get().getClass().getName() + "]";
}).collect(Collectors.joining(",")));
throw new IllegalStateException(message);
}
}
use of org.elasticsearch.index.engine.InternalEngineFactory in project crate by crate.
the class TransportReplicationAllPermitsAcquisitionTests method setUp.
@Override
@Before
public void setUp() throws Exception {
super.setUp();
globalBlock = randomBoolean();
RestStatus restStatus = randomFrom(RestStatus.values());
block = new ClusterBlock(randomIntBetween(1, 10), randomAlphaOfLength(5), false, true, false, restStatus, ClusterBlockLevel.ALL);
clusterService = createClusterService(threadPool);
final ClusterState.Builder state = ClusterState.builder(clusterService.state());
Set<DiscoveryNodeRole> roles = new HashSet<>(DiscoveryNodeRole.BUILT_IN_ROLES);
DiscoveryNode node1 = new DiscoveryNode("_name1", "_node1", buildNewFakeTransportAddress(), emptyMap(), roles, Version.CURRENT);
DiscoveryNode node2 = new DiscoveryNode("_name2", "_node2", buildNewFakeTransportAddress(), emptyMap(), roles, Version.CURRENT);
state.nodes(DiscoveryNodes.builder().add(node1).add(node2).localNodeId(node1.getId()).masterNodeId(node1.getId()));
shardId = new ShardId("index", UUID.randomUUID().toString(), 0);
ShardRouting shardRouting = newShardRouting(shardId, node1.getId(), true, ShardRoutingState.INITIALIZING, RecoverySource.EmptyStoreRecoverySource.INSTANCE);
Settings indexSettings = Settings.builder().put(SETTING_VERSION_CREATED, Version.CURRENT).put(SETTING_INDEX_UUID, shardId.getIndex().getUUID()).put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 1).put(SETTING_CREATION_DATE, System.currentTimeMillis()).build();
primary = newStartedShard(p -> newShard(shardRouting, indexSettings, new InternalEngineFactory()), true);
for (int i = 0; i < 10; i++) {
final String id = Integer.toString(i);
indexDoc(primary, id, "{\"value\":" + id + "}");
}
IndexMetadata indexMetadata = IndexMetadata.builder(shardId.getIndexName()).settings(indexSettings).primaryTerm(shardId.id(), primary.getOperationPrimaryTerm()).putMapping("default", "{ \"properties\": { \"value\": { \"type\": \"short\"}}}").build();
state.metadata(Metadata.builder().put(indexMetadata, false).generateClusterUuidIfNeeded());
replica = newShard(primary.shardId(), false, node2.getId(), indexMetadata, null);
recoverReplica(replica, primary, true);
IndexRoutingTable.Builder routing = IndexRoutingTable.builder(indexMetadata.getIndex());
routing.addIndexShard(new IndexShardRoutingTable.Builder(shardId).addShard(primary.routingEntry()).build());
state.routingTable(RoutingTable.builder().add(routing.build()).build());
setState(clusterService, state.build());
final Settings transportSettings = Settings.builder().put("node.name", node1.getId()).build();
MockTransport transport = new MockTransport() {
@Override
protected void onSendRequest(long requestId, String action, TransportRequest request, DiscoveryNode node) {
assertThat(action, allOf(startsWith("cluster:admin/test/"), endsWith("[r]")));
assertThat(node, equalTo(node2));
// node2 doesn't really exist, but we are performing some trickery in mockIndicesService() to pretend that node1 holds both
// the primary and the replica, so redirect the request back to node1.
transportService.sendRequest(transportService.getLocalNode(), action, request, new TransportResponseHandler<TransportReplicationAction.ReplicaResponse>() {
@Override
public ReplicaResponse read(StreamInput in) throws IOException {
return new ReplicaResponse(in);
}
@SuppressWarnings("unchecked")
private TransportResponseHandler<TransportReplicationAction.ReplicaResponse> getResponseHandler() {
return (TransportResponseHandler<TransportReplicationAction.ReplicaResponse>) getResponseHandlers().onResponseReceived(requestId, TransportMessageListener.NOOP_LISTENER);
}
@Override
public void handleResponse(TransportReplicationAction.ReplicaResponse response) {
getResponseHandler().handleResponse(response);
}
@Override
public void handleException(TransportException exp) {
getResponseHandler().handleException(exp);
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
});
}
};
transportService = transport.createTransportService(transportSettings, threadPool, bta -> node1, null);
transportService.start();
transportService.acceptIncomingRequests();
shardStateAction = new ShardStateAction(clusterService, transportService, null, null);
}
use of org.elasticsearch.index.engine.InternalEngineFactory in project crate by crate.
the class IndexShardRetentionLeaseTests method testPersistence.
public void testPersistence() throws IOException {
final Settings settings = Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING.getKey(), Long.MAX_VALUE, TimeUnit.NANOSECONDS).build();
final IndexShard indexShard = newStartedShard(true, settings, new InternalEngineFactory());
try {
final int length = randomIntBetween(0, 8);
final long[] minimumRetainingSequenceNumbers = new long[length];
for (int i = 0; i < length; i++) {
minimumRetainingSequenceNumbers[i] = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE);
currentTimeMillis.set(TimeUnit.NANOSECONDS.toMillis(randomNonNegativeLong()));
indexShard.addRetentionLease(Integer.toString(i), minimumRetainingSequenceNumbers[i], "test-" + i, ActionListener.wrap(() -> {
}));
}
currentTimeMillis.set(TimeUnit.NANOSECONDS.toMillis(Long.MAX_VALUE));
// force the retention leases to persist
indexShard.persistRetentionLeases();
// the written retention leases should equal our current retention leases
final RetentionLeases retentionLeases = indexShard.getEngine().config().retentionLeasesSupplier().get();
final RetentionLeases writtenRetentionLeases = indexShard.loadRetentionLeases();
assertThat(writtenRetentionLeases.version(), equalTo(1L + length));
assertThat(writtenRetentionLeases.leases(), contains(retentionLeases.leases().toArray(new RetentionLease[0])));
// when we recover, we should recover the retention leases
final IndexShard recoveredShard = reinitShard(indexShard, ShardRoutingHelper.initWithSameId(indexShard.routingEntry(), RecoverySource.ExistingStoreRecoverySource.INSTANCE));
try {
recoverShardFromStore(recoveredShard);
final RetentionLeases recoveredRetentionLeases = recoveredShard.getEngine().config().retentionLeasesSupplier().get();
assertThat(recoveredRetentionLeases.version(), equalTo(1L + length));
assertThat(recoveredRetentionLeases.leases(), contains(retentionLeases.leases().toArray(new RetentionLease[0])));
} finally {
closeShards(recoveredShard);
}
// we should not recover retention leases when force-allocating a stale primary
final IndexShard forceRecoveredShard = reinitShard(indexShard, ShardRoutingHelper.initWithSameId(indexShard.routingEntry(), RecoverySource.ExistingStoreRecoverySource.FORCE_STALE_PRIMARY_INSTANCE));
try {
recoverShardFromStore(forceRecoveredShard);
final RetentionLeases recoveredRetentionLeases = forceRecoveredShard.getEngine().config().retentionLeasesSupplier().get();
assertThat(recoveredRetentionLeases.leases(), hasSize(1));
assertThat(recoveredRetentionLeases.leases().iterator().next().id(), equalTo(ReplicationTracker.getPeerRecoveryRetentionLeaseId(indexShard.routingEntry())));
assertThat(recoveredRetentionLeases.version(), equalTo(1L));
} finally {
closeShards(forceRecoveredShard);
}
} finally {
closeShards(indexShard);
}
}
Aggregations