use of org.elasticsearch.threadpool.ThreadPool in project crate by crate.
the class PeerRecoveryTargetService method doRecovery.
private void doRecovery(final long recoveryId) {
final StartRecoveryRequest request;
final RecoveryState.Timer timer;
CancellableThreads cancellableThreads;
try (RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId)) {
if (recoveryRef == null) {
LOGGER.trace("not running recovery with id [{}] - can not find it (probably finished)", recoveryId);
return;
}
final RecoveryTarget recoveryTarget = recoveryRef.target();
timer = recoveryTarget.state().getTimer();
cancellableThreads = recoveryTarget.cancellableThreads();
try {
assert recoveryTarget.sourceNode() != null : "can not do a recovery without a source node";
LOGGER.trace("{} preparing shard for peer recovery", recoveryTarget.shardId());
recoveryTarget.indexShard().prepareForIndexRecovery();
final long startingSeqNo = recoveryTarget.indexShard().recoverLocallyUpToGlobalCheckpoint();
assert startingSeqNo == UNASSIGNED_SEQ_NO || recoveryTarget.state().getStage() == RecoveryState.Stage.TRANSLOG : "unexpected recovery stage [" + recoveryTarget.state().getStage() + "] starting seqno [ " + startingSeqNo + "]";
request = getStartRecoveryRequest(LOGGER, clusterService.localNode(), recoveryTarget, startingSeqNo);
} catch (final Exception e) {
// this will be logged as warning later on...
LOGGER.trace("unexpected error while preparing shard for peer recovery, failing recovery", e);
onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(recoveryTarget.state(), "failed to prepare shard for recovery", e), true);
return;
}
}
Consumer<Exception> handleException = e -> {
if (LOGGER.isTraceEnabled()) {
LOGGER.trace(() -> new ParameterizedMessage("[{}][{}] Got exception on recovery", request.shardId().getIndex().getName(), request.shardId().id()), e);
}
Throwable cause = SQLExceptions.unwrap(e);
if (cause instanceof CancellableThreads.ExecutionCancelledException) {
// this can also come from the source wrapped in a RemoteTransportException
onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(request, "source has canceled the recovery", cause), false);
return;
}
if (cause instanceof RecoveryEngineException) {
// unwrap an exception that was thrown as part of the recovery
cause = cause.getCause();
}
// do it twice, in case we have double transport exception
cause = SQLExceptions.unwrap(cause);
if (cause instanceof RecoveryEngineException) {
// unwrap an exception that was thrown as part of the recovery
cause = cause.getCause();
}
if (cause instanceof IllegalIndexShardStateException || cause instanceof IndexNotFoundException || cause instanceof ShardNotFoundException) {
// if the target is not ready yet, retry
retryRecovery(recoveryId, "remote shard not ready", recoverySettings.retryDelayStateSync(), recoverySettings.activityTimeout());
return;
}
if (cause instanceof DelayRecoveryException) {
retryRecovery(recoveryId, cause, recoverySettings.retryDelayStateSync(), recoverySettings.activityTimeout());
return;
}
if (cause instanceof ConnectTransportException) {
LOGGER.debug("delaying recovery of {} for [{}] due to networking error [{}]", request.shardId(), recoverySettings.retryDelayNetwork(), cause.getMessage());
retryRecovery(recoveryId, cause.getMessage(), recoverySettings.retryDelayNetwork(), recoverySettings.activityTimeout());
return;
}
if (cause instanceof AlreadyClosedException) {
onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(request, "source shard is closed", cause), false);
return;
}
onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(request, e), true);
};
try {
LOGGER.trace("{} starting recovery from {}", request.shardId(), request.sourceNode());
cancellableThreads.executeIO(() -> transportService.sendRequest(request.sourceNode(), PeerRecoverySourceService.Actions.START_RECOVERY, request, new TransportResponseHandler<RecoveryResponse>() {
@Override
public void handleResponse(RecoveryResponse recoveryResponse) {
final TimeValue recoveryTime = new TimeValue(timer.time());
// do this through ongoing recoveries to remove it from the collection
onGoingRecoveries.markRecoveryAsDone(recoveryId);
if (LOGGER.isTraceEnabled()) {
StringBuilder sb = new StringBuilder();
sb.append('[').append(request.shardId().getIndex().getName()).append(']').append('[').append(request.shardId().id()).append("] ");
sb.append("recovery completed from ").append(request.sourceNode()).append(", took[").append(recoveryTime).append("]\n");
sb.append(" phase1: recovered_files [").append(recoveryResponse.phase1FileNames.size()).append("]").append(" with total_size of [").append(new ByteSizeValue(recoveryResponse.phase1TotalSize)).append("]").append(", took [").append(timeValueMillis(recoveryResponse.phase1Time)).append("], throttling_wait [").append(timeValueMillis(recoveryResponse.phase1ThrottlingWaitTime)).append(']').append("\n");
sb.append(" : reusing_files [").append(recoveryResponse.phase1ExistingFileNames.size()).append("] with total_size of [").append(new ByteSizeValue(recoveryResponse.phase1ExistingTotalSize)).append("]\n");
sb.append(" phase2: start took [").append(timeValueMillis(recoveryResponse.startTime)).append("]\n");
sb.append(" : recovered [").append(recoveryResponse.phase2Operations).append("]").append(" transaction log operations").append(", took [").append(timeValueMillis(recoveryResponse.phase2Time)).append("]").append("\n");
LOGGER.trace("{}", sb);
} else {
LOGGER.debug("{} recovery done from [{}], took [{}]", request.shardId(), request.sourceNode(), recoveryTime);
}
}
@Override
public void handleException(TransportException e) {
handleException.accept(e);
}
@Override
public String executor() {
// we do some heavy work like refreshes in the response so fork off to the generic threadpool
return ThreadPool.Names.GENERIC;
}
@Override
public RecoveryResponse read(StreamInput in) throws IOException {
return new RecoveryResponse(in);
}
}));
} catch (CancellableThreads.ExecutionCancelledException e) {
LOGGER.trace("recovery cancelled", e);
} catch (Exception e) {
handleException.accept(e);
}
}
use of org.elasticsearch.threadpool.ThreadPool in project crate by crate.
the class SysSnapshotsTest method testQueryAllColumns.
@Test
public void testQueryAllColumns() throws Exception {
execute("create table tbl (id int primary key) ");
Object[][] bulkArgs = new Object[10][];
for (int i = 0; i < 10; i++) {
bulkArgs[i] = new Object[] { i };
}
execute("insert into tbl (id) values (?)", bulkArgs);
execute("refresh table tbl");
ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class);
execute("CREATE REPOSITORY r1 TYPE fs WITH (location = ?, compress = true)", new Object[] { TEMP_FOLDER.newFolder("backup_s1").getAbsolutePath() });
long createdTime = threadPool.absoluteTimeInMillis();
execute("CREATE SNAPSHOT r1.s1 TABLE tbl WITH (wait_for_completion = true)");
long finishedTime = threadPool.absoluteTimeInMillis();
execute("select * from sys.snapshots");
assertThat(response.rowCount(), is(1L));
assertThat(response.cols(), arrayContaining("concrete_indices", "failures", "finished", "name", "repository", "started", "state", "table_partitions", "tables", "version"));
ArrayType<String> stringArray = new ArrayType<>(DataTypes.STRING);
assertThat(response.columnTypes(), arrayContaining(stringArray, stringArray, TimestampType.INSTANCE_WITH_TZ, StringType.INSTANCE, StringType.INSTANCE, TimestampType.INSTANCE_WITH_TZ, StringType.INSTANCE, new ArrayType<>(ObjectType.builder().setInnerType("values", stringArray).setInnerType("table_schema", StringType.INSTANCE).setInnerType("table_name", StringType.INSTANCE).build()), stringArray, StringType.INSTANCE));
Object[] firstRow = response.rows()[0];
assertThat((List<Object>) firstRow[0], Matchers.contains(getFqn("tbl")));
assertThat((List<Object>) firstRow[1], Matchers.empty());
assertThat((Long) firstRow[2], lessThanOrEqualTo(finishedTime));
assertThat(firstRow[3], is("s1"));
assertThat(firstRow[4], is("r1"));
assertThat((Long) firstRow[5], greaterThanOrEqualTo(createdTime));
assertThat(firstRow[6], is(SnapshotState.SUCCESS.name()));
assertThat((List<Object>) firstRow[7], Matchers.empty());
assertThat((List<Object>) firstRow[8], Matchers.contains(getFqn("tbl")));
assertThat(firstRow[9], is(Version.CURRENT.toString()));
}
use of org.elasticsearch.threadpool.ThreadPool in project crate by crate.
the class SnapshotRestoreIntegrationTest method getRepositoryData.
private RepositoryData getRepositoryData() throws Exception {
RepositoriesService service = internalCluster().getInstance(RepositoriesService.class, internalCluster().getMasterName());
Repository repository = service.repository(REPOSITORY_NAME);
ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class, internalCluster().getMasterName());
final SetOnce<RepositoryData> repositoryData = new SetOnce<>();
final CountDownLatch latch = new CountDownLatch(1);
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> {
repositoryData.set(ESBlobStoreTestCase.getRepositoryData(repository));
latch.countDown();
});
latch.await();
return repositoryData.get();
}
use of org.elasticsearch.threadpool.ThreadPool in project crate by crate.
the class BlobHeadRequestHandlerTests method setUpResources.
@Before
public void setUpResources() throws Exception {
threadPool = new TestThreadPool(getClass().getName());
var transport = new CapturingTransport() {
@Override
protected void onSendRequest(long requestId, String action, TransportRequest request, DiscoveryNode node) {
super.onSendRequest(requestId, action, request, node);
handleResponse(requestId, TransportResponse.Empty.INSTANCE);
}
};
clusterService = ClusterServiceUtils.createClusterService(threadPool);
transportService = transport.createTransportService(clusterService.getSettings(), threadPool, boundAddress -> clusterService.localNode(), null);
transportService.start();
transportService.acceptIncomingRequests();
}
use of org.elasticsearch.threadpool.ThreadPool in project crate by crate.
the class IndexWriterProjectorTest method testIndexWriter.
@Test
public void testIndexWriter() throws Throwable {
execute("create table bulk_import (id int primary key, name string) with (number_of_replicas=0)");
ensureGreen();
InputCollectExpression sourceInput = new InputCollectExpression(1);
List<CollectExpression<Row, ?>> collectExpressions = Collections.<CollectExpression<Row, ?>>singletonList(sourceInput);
RelationName bulkImportIdent = new RelationName(sqlExecutor.getCurrentSchema(), "bulk_import");
ClusterState state = clusterService().state();
Settings tableSettings = TableSettingsResolver.get(state.getMetadata(), bulkImportIdent, false);
ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class);
IndexWriterProjector writerProjector = new IndexWriterProjector(clusterService(), new NodeLimits(new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), new NoopCircuitBreaker("dummy"), RamAccounting.NO_ACCOUNTING, threadPool.scheduler(), threadPool.executor(ThreadPool.Names.SEARCH), CoordinatorTxnCtx.systemTransactionContext(), new NodeContext(internalCluster().getInstance(Functions.class)), Settings.EMPTY, IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.get(tableSettings), NumberOfReplicas.fromSettings(tableSettings, state.getNodes().getSize()), internalCluster().getInstance(TransportCreatePartitionsAction.class), internalCluster().getInstance(TransportShardUpsertAction.class)::execute, IndexNameResolver.forTable(bulkImportIdent), new Reference(new ReferenceIdent(bulkImportIdent, DocSysColumns.RAW), RowGranularity.DOC, DataTypes.STRING, 0, null), Collections.singletonList(ID_IDENT), Collections.<Symbol>singletonList(new InputColumn(0)), null, null, sourceInput, collectExpressions, 20, null, null, false, false, UUID.randomUUID(), UpsertResultContext.forRowCount(), false);
BatchIterator rowsIterator = InMemoryBatchIterator.of(IntStream.range(0, 100).mapToObj(i -> new RowN(new Object[] { i, "{\"id\": " + i + ", \"name\": \"Arthur\"}" })).collect(Collectors.toList()), SENTINEL, true);
TestingRowConsumer consumer = new TestingRowConsumer();
consumer.accept(writerProjector.apply(rowsIterator), null);
Bucket objects = consumer.getBucket();
assertThat(objects, contains(isRow(100L)));
execute("refresh table bulk_import");
execute("select count(*) from bulk_import");
assertThat(response.rowCount(), is(1L));
assertThat(response.rows()[0][0], is(100L));
}
Aggregations