use of org.elasticsearch.threadpool.ThreadPool in project crate by crate.
the class BlobStoreRepository method writeIndexGen.
/**
* Writing a new index generation is a three step process.
* First, the {@link RepositoryMetadata} entry for this repository is set into a pending state by incrementing its
* pending generation {@code P} while its safe generation {@code N} remains unchanged.
* Second, the updated {@link RepositoryData} is written to generation {@code P + 1}.
* Lastly, the {@link RepositoryMetadata} entry for this repository is updated to the new generation {@code P + 1} and thus
* pending and safe generation are set to the same value marking the end of the update of the repository data.
*
* @param repositoryData RepositoryData to write
* @param expectedGen expected repository generation at the start of the operation
* @param writeShardGens whether to write {@link ShardGenerations} to the new {@link RepositoryData} blob
* @param listener completion listener
*/
protected void writeIndexGen(RepositoryData repositoryData, long expectedGen, boolean writeShardGens, ActionListener<Void> listener) {
// can not write to a read only repository
assert isReadOnly() == false;
final long currentGen = repositoryData.getGenId();
if (currentGen != expectedGen) {
// the index file was updated by a concurrent operation, so we were operating on stale
// repository data
listener.onFailure(new RepositoryException(metadata.name(), "concurrent modification of the index-N file, expected current generation [" + expectedGen + "], actual current generation [" + currentGen + "]"));
return;
}
// Step 1: Set repository generation state to the next possible pending generation
final StepListener<Long> setPendingStep = new StepListener<>();
clusterService.submitStateUpdateTask("set pending repository generation [" + metadata.name() + "][" + expectedGen + "]", new ClusterStateUpdateTask() {
private long newGen;
@Override
public ClusterState execute(ClusterState currentState) {
final RepositoryMetadata meta = getRepoMetadata(currentState);
final String repoName = metadata.name();
final long genInState = meta.generation();
final boolean uninitializedMeta = meta.generation() == RepositoryData.UNKNOWN_REPO_GEN || bestEffortConsistency;
if (uninitializedMeta == false && meta.pendingGeneration() != genInState) {
LOGGER.info("Trying to write new repository data over unfinished write, repo [{}] is at " + "safe generation [{}] and pending generation [{}]", meta.name(), genInState, meta.pendingGeneration());
}
assert expectedGen == RepositoryData.EMPTY_REPO_GEN || uninitializedMeta || expectedGen == meta.generation() : "Expected non-empty generation [" + expectedGen + "] does not match generation tracked in [" + meta + "]";
// If we run into the empty repo generation for the expected gen, the repo is assumed to have been cleared of
// all contents by an external process so we reset the safe generation to the empty generation.
final long safeGeneration = expectedGen == RepositoryData.EMPTY_REPO_GEN ? RepositoryData.EMPTY_REPO_GEN : (uninitializedMeta ? expectedGen : genInState);
// Regardless of whether or not the safe generation has been reset, the pending generation always increments so that
// even if a repository has been manually cleared of all contents we will never reuse the same repository generation.
// This is motivated by the consistency behavior the S3 based blob repository implementation has to support which does
// not offer any consistency guarantees when it comes to overwriting the same blob name with different content.
final long nextPendingGen = metadata.pendingGeneration() + 1;
newGen = uninitializedMeta ? Math.max(expectedGen + 1, nextPendingGen) : nextPendingGen;
assert newGen > latestKnownRepoGen.get() : "Attempted new generation [" + newGen + "] must be larger than latest known generation [" + latestKnownRepoGen.get() + "]";
return ClusterState.builder(currentState).metadata(Metadata.builder(currentState.getMetadata()).putCustom(RepositoriesMetadata.TYPE, currentState.metadata().<RepositoriesMetadata>custom(RepositoriesMetadata.TYPE).withUpdatedGeneration(repoName, safeGeneration, newGen)).build()).build();
}
@Override
public void onFailure(String source, Exception e) {
listener.onFailure(new RepositoryException(metadata.name(), "Failed to execute cluster state update [" + source + "]", e));
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
setPendingStep.onResponse(newGen);
}
});
final StepListener<RepositoryData> filterRepositoryDataStep = new StepListener<>();
// Step 2: Write new index-N blob to repository and update index.latest
setPendingStep.whenComplete(newGen -> threadPool().executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.wrap(listener, l -> {
// BwC logic: Load snapshot version information if any snapshot is missing a version in RepositoryData so that the new
// RepositoryData contains a version for every snapshot
final List<SnapshotId> snapshotIdsWithoutVersion = repositoryData.getSnapshotIds().stream().filter(snapshotId -> repositoryData.getVersion(snapshotId) == null).collect(Collectors.toList());
if (snapshotIdsWithoutVersion.isEmpty() == false) {
final Map<SnapshotId, Version> updatedVersionMap = new ConcurrentHashMap<>();
final GroupedActionListener<Void> loadAllVersionsListener = new GroupedActionListener<>(ActionListener.runAfter(new ActionListener<Collection<Void>>() {
@Override
public void onResponse(Collection<Void> voids) {
LOGGER.info("Successfully loaded all snapshot's version information for {} from snapshot metadata", AllocationService.firstListElementsToCommaDelimitedString(snapshotIdsWithoutVersion, SnapshotId::toString, LOGGER.isDebugEnabled()));
}
@Override
public void onFailure(Exception e) {
LOGGER.warn("Failure when trying to load missing version information from snapshot metadata", e);
}
}, () -> filterRepositoryDataStep.onResponse(repositoryData.withVersions(updatedVersionMap))), snapshotIdsWithoutVersion.size());
for (SnapshotId snapshotId : snapshotIdsWithoutVersion) {
threadPool().executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.run(loadAllVersionsListener, () -> {
ActionListener<SnapshotInfo> snapshotInfoListener = ActionListener.delegateFailure(loadAllVersionsListener, (delegate, snapshotInfo) -> {
updatedVersionMap.put(snapshotId, snapshotInfo.version());
delegate.onResponse(null);
});
getSnapshotInfo(snapshotId, snapshotInfoListener);
}));
}
} else {
filterRepositoryDataStep.onResponse(repositoryData);
}
})), listener::onFailure);
filterRepositoryDataStep.whenComplete(filteredRepositoryData -> {
final long newGen = setPendingStep.result();
if (latestKnownRepoGen.get() >= newGen) {
throw new IllegalArgumentException("Tried writing generation [" + newGen + "] but repository is at least at generation [" + latestKnownRepoGen.get() + "] already");
}
// write the index file
final String indexBlob = INDEX_FILE_PREFIX + Long.toString(newGen);
LOGGER.debug("Repository [{}] writing new index generational blob [{}]", metadata.name(), indexBlob);
writeAtomic(indexBlob, BytesReference.bytes(filteredRepositoryData.snapshotsToXContent(XContentFactory.jsonBuilder(), writeShardGens)), true);
// write the current generation to the index-latest file
final BytesReference genBytes;
try (BytesStreamOutput bStream = new BytesStreamOutput()) {
bStream.writeLong(newGen);
genBytes = bStream.bytes();
}
LOGGER.debug("Repository [{}] updating index.latest with generation [{}]", metadata.name(), newGen);
writeAtomic(INDEX_LATEST_BLOB, genBytes, false);
// Step 3: Update CS to reflect new repository generation.
clusterService.submitStateUpdateTask("set safe repository generation [" + metadata.name() + "][" + newGen + "]", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
final RepositoryMetadata meta = getRepoMetadata(currentState);
if (meta.generation() != expectedGen) {
throw new IllegalStateException("Tried to update repo generation to [" + newGen + "] but saw unexpected generation in state [" + meta + "]");
}
if (meta.pendingGeneration() != newGen) {
throw new IllegalStateException("Tried to update from unexpected pending repo generation [" + meta.pendingGeneration() + "] after write to generation [" + newGen + "]");
}
return ClusterState.builder(currentState).metadata(Metadata.builder(currentState.getMetadata()).putCustom(RepositoriesMetadata.TYPE, currentState.metadata().<RepositoriesMetadata>custom(RepositoriesMetadata.TYPE).withUpdatedGeneration(metadata.name(), newGen, newGen)).build()).build();
}
@Override
public void onFailure(String source, Exception e) {
listener.onFailure(new RepositoryException(metadata.name(), "Failed to execute cluster state update [" + source + "]", e));
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.run(listener, () -> {
// Delete all now outdated index files up to 1000 blobs back from the new generation.
// If there are more than 1000 dangling index-N cleanup functionality on repo delete will take care of them.
// Deleting one older than the current expectedGen is done for BwC reasons as older versions used to keep
// two index-N blobs around.
final List<String> oldIndexN = LongStream.range(Math.max(Math.max(expectedGen - 1, 0), newGen - 1000), newGen).mapToObj(gen -> INDEX_FILE_PREFIX + gen).collect(Collectors.toList());
try {
blobContainer().deleteBlobsIgnoringIfNotExists(oldIndexN);
} catch (IOException e) {
LOGGER.warn("Failed to clean up old index blobs {}", oldIndexN);
}
}));
}
});
}, listener::onFailure);
}
use of org.elasticsearch.threadpool.ThreadPool in project crate by crate.
the class TransportReplicationAllPermitsAcquisitionTests method setUp.
@Override
@Before
public void setUp() throws Exception {
super.setUp();
globalBlock = randomBoolean();
RestStatus restStatus = randomFrom(RestStatus.values());
block = new ClusterBlock(randomIntBetween(1, 10), randomAlphaOfLength(5), false, true, false, restStatus, ClusterBlockLevel.ALL);
clusterService = createClusterService(threadPool);
final ClusterState.Builder state = ClusterState.builder(clusterService.state());
Set<DiscoveryNodeRole> roles = new HashSet<>(DiscoveryNodeRole.BUILT_IN_ROLES);
DiscoveryNode node1 = new DiscoveryNode("_name1", "_node1", buildNewFakeTransportAddress(), emptyMap(), roles, Version.CURRENT);
DiscoveryNode node2 = new DiscoveryNode("_name2", "_node2", buildNewFakeTransportAddress(), emptyMap(), roles, Version.CURRENT);
state.nodes(DiscoveryNodes.builder().add(node1).add(node2).localNodeId(node1.getId()).masterNodeId(node1.getId()));
shardId = new ShardId("index", UUID.randomUUID().toString(), 0);
ShardRouting shardRouting = newShardRouting(shardId, node1.getId(), true, ShardRoutingState.INITIALIZING, RecoverySource.EmptyStoreRecoverySource.INSTANCE);
Settings indexSettings = Settings.builder().put(SETTING_VERSION_CREATED, Version.CURRENT).put(SETTING_INDEX_UUID, shardId.getIndex().getUUID()).put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 1).put(SETTING_CREATION_DATE, System.currentTimeMillis()).build();
primary = newStartedShard(p -> newShard(shardRouting, indexSettings, new InternalEngineFactory()), true);
for (int i = 0; i < 10; i++) {
final String id = Integer.toString(i);
indexDoc(primary, id, "{\"value\":" + id + "}");
}
IndexMetadata indexMetadata = IndexMetadata.builder(shardId.getIndexName()).settings(indexSettings).primaryTerm(shardId.id(), primary.getOperationPrimaryTerm()).putMapping("default", "{ \"properties\": { \"value\": { \"type\": \"short\"}}}").build();
state.metadata(Metadata.builder().put(indexMetadata, false).generateClusterUuidIfNeeded());
replica = newShard(primary.shardId(), false, node2.getId(), indexMetadata, null);
recoverReplica(replica, primary, true);
IndexRoutingTable.Builder routing = IndexRoutingTable.builder(indexMetadata.getIndex());
routing.addIndexShard(new IndexShardRoutingTable.Builder(shardId).addShard(primary.routingEntry()).build());
state.routingTable(RoutingTable.builder().add(routing.build()).build());
setState(clusterService, state.build());
final Settings transportSettings = Settings.builder().put("node.name", node1.getId()).build();
MockTransport transport = new MockTransport() {
@Override
protected void onSendRequest(long requestId, String action, TransportRequest request, DiscoveryNode node) {
assertThat(action, allOf(startsWith("cluster:admin/test/"), endsWith("[r]")));
assertThat(node, equalTo(node2));
// node2 doesn't really exist, but we are performing some trickery in mockIndicesService() to pretend that node1 holds both
// the primary and the replica, so redirect the request back to node1.
transportService.sendRequest(transportService.getLocalNode(), action, request, new TransportResponseHandler<TransportReplicationAction.ReplicaResponse>() {
@Override
public ReplicaResponse read(StreamInput in) throws IOException {
return new ReplicaResponse(in);
}
@SuppressWarnings("unchecked")
private TransportResponseHandler<TransportReplicationAction.ReplicaResponse> getResponseHandler() {
return (TransportResponseHandler<TransportReplicationAction.ReplicaResponse>) getResponseHandlers().onResponseReceived(requestId, TransportMessageListener.NOOP_LISTENER);
}
@Override
public void handleResponse(TransportReplicationAction.ReplicaResponse response) {
getResponseHandler().handleResponse(response);
}
@Override
public void handleException(TransportException exp) {
getResponseHandler().handleException(exp);
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
});
}
};
transportService = transport.createTransportService(transportSettings, threadPool, bta -> node1, null);
transportService.start();
transportService.acceptIncomingRequests();
shardStateAction = new ShardStateAction(clusterService, transportService, null, null);
}
use of org.elasticsearch.threadpool.ThreadPool in project crate by crate.
the class NodeConnectionsServiceTests method setUp.
@Override
@Before
public void setUp() throws Exception {
super.setUp();
ThreadPool threadPool = new TestThreadPool(getClass().getName());
this.threadPool = threadPool;
nodeConnectionBlocks = newConcurrentMap();
transportService = new TestTransportService(new MockTransport(threadPool), threadPool);
transportService.start();
transportService.acceptIncomingRequests();
}
use of org.elasticsearch.threadpool.ThreadPool in project crate by crate.
the class DeterministicTaskQueueTests method testThreadPoolSchedulesFutureTasks.
public void testThreadPoolSchedulesFutureTasks() {
final DeterministicTaskQueue taskQueue = newTaskQueue();
advanceToRandomTime(taskQueue);
final long startTime = taskQueue.getCurrentTimeMillis();
final List<String> strings = new ArrayList<>(5);
final ThreadPool threadPool = taskQueue.getThreadPool();
final long delayMillis = randomLongBetween(1, 100);
threadPool.schedule(() -> strings.add("deferred"), TimeValue.timeValueMillis(delayMillis), GENERIC);
assertFalse(taskQueue.hasRunnableTasks());
assertTrue(taskQueue.hasDeferredTasks());
threadPool.schedule(() -> strings.add("runnable"), TimeValue.ZERO, GENERIC);
assertTrue(taskQueue.hasRunnableTasks());
threadPool.schedule(() -> strings.add("also runnable"), TimeValue.MINUS_ONE, GENERIC);
taskQueue.runAllTasks();
assertThat(taskQueue.getCurrentTimeMillis(), is(startTime + delayMillis));
assertThat(strings, containsInAnyOrder("runnable", "also runnable", "deferred"));
final long delayMillis1 = randomLongBetween(2, 100);
final long delayMillis2 = randomLongBetween(1, delayMillis1 - 1);
threadPool.schedule(() -> strings.add("further deferred"), TimeValue.timeValueMillis(delayMillis1), GENERIC);
threadPool.schedule(() -> strings.add("not quite so deferred"), TimeValue.timeValueMillis(delayMillis2), GENERIC);
assertFalse(taskQueue.hasRunnableTasks());
assertTrue(taskQueue.hasDeferredTasks());
taskQueue.runAllTasks();
assertThat(taskQueue.getCurrentTimeMillis(), is(startTime + delayMillis + delayMillis1));
final TimeValue cancelledDelay = TimeValue.timeValueMillis(randomLongBetween(1, 100));
final Scheduler.Cancellable cancelledBeforeExecution = threadPool.schedule(() -> strings.add("cancelled before execution"), cancelledDelay, "");
cancelledBeforeExecution.cancel();
taskQueue.runAllTasks();
assertThat(strings, containsInAnyOrder("runnable", "also runnable", "deferred", "not quite so deferred", "further deferred"));
}
use of org.elasticsearch.threadpool.ThreadPool in project crate by crate.
the class MasterServiceTests method testAcking.
public void testAcking() throws InterruptedException {
final DiscoveryNode node1 = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
final DiscoveryNode node2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
final DiscoveryNode node3 = new DiscoveryNode("node3", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
try (MasterService masterService = new MasterService(Settings.builder().put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName()).put(Node.NODE_NAME_SETTING.getKey(), "test_node").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool)) {
final ClusterState initialClusterState = ClusterState.builder(new ClusterName(MasterServiceTests.class.getSimpleName())).nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3).localNodeId(node1.getId()).masterNodeId(node1.getId())).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).build();
final AtomicReference<ClusterStatePublisher> publisherRef = new AtomicReference<>();
masterService.setClusterStatePublisher((e, pl, al) -> publisherRef.get().publish(e, pl, al));
masterService.setClusterStateSupplier(() -> initialClusterState);
masterService.start();
// check that we don't time out before even committing the cluster state
{
final CountDownLatch latch = new CountDownLatch(1);
publisherRef.set((clusterChangedEvent, publishListener, ackListener) -> publishListener.onFailure(new FailedToCommitClusterStateException("mock exception")));
masterService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask<Void>(null, null) {
@Override
public ClusterState execute(ClusterState currentState) {
return ClusterState.builder(currentState).build();
}
@Override
public TimeValue ackTimeout() {
return TimeValue.ZERO;
}
@Override
public TimeValue timeout() {
return null;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
fail();
}
@Override
protected Void newResponse(boolean acknowledged) {
fail();
return null;
}
@Override
public void onFailure(String source, Exception e) {
latch.countDown();
}
@Override
public void onAckTimeout() {
fail();
}
});
latch.await();
}
// check that we timeout if commit took too long
{
final CountDownLatch latch = new CountDownLatch(2);
final TimeValue ackTimeout = TimeValue.timeValueMillis(randomInt(100));
publisherRef.set((clusterChangedEvent, publishListener, ackListener) -> {
publishListener.onResponse(null);
ackListener.onCommit(TimeValue.timeValueMillis(ackTimeout.millis() + randomInt(100)));
ackListener.onNodeAck(node1, null);
ackListener.onNodeAck(node2, null);
ackListener.onNodeAck(node3, null);
});
masterService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask<Void>(null, null) {
@Override
public ClusterState execute(ClusterState currentState) {
return ClusterState.builder(currentState).build();
}
@Override
public TimeValue ackTimeout() {
return ackTimeout;
}
@Override
public TimeValue timeout() {
return null;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
protected Void newResponse(boolean acknowledged) {
fail();
return null;
}
@Override
public void onFailure(String source, Exception e) {
fail();
}
@Override
public void onAckTimeout() {
latch.countDown();
}
});
latch.await();
}
}
}
Aggregations