use of org.elasticsearch.action.support.master.AcknowledgedResponse in project logging-log4j2 by apache.
the class LogstashIT method createClient.
private static RestHighLevelClient createClient() throws IOException {
// Instantiate the client.
LOGGER.info("instantiating the ES client");
final HttpHost httpHost = new HttpHost(HOST_NAME, MavenHardcodedConstants.ES_PORT);
final RestClientBuilder clientBuilder = RestClient.builder(httpHost);
final RestHighLevelClient client = new RestHighLevelClient(clientBuilder);
// Verify the connection.
LOGGER.info("verifying the ES connection");
final ClusterHealthResponse healthResponse = client.cluster().health(new ClusterHealthRequest(), RequestOptions.DEFAULT);
Assertions.assertThat(healthResponse.getStatus()).isNotEqualTo(ClusterHealthStatus.RED);
// Delete the index.
LOGGER.info("deleting the ES index");
final DeleteIndexRequest deleteRequest = new DeleteIndexRequest(MavenHardcodedConstants.ES_INDEX_NAME);
try {
final AcknowledgedResponse deleteResponse = client.indices().delete(deleteRequest, RequestOptions.DEFAULT);
Assertions.assertThat(deleteResponse.isAcknowledged()).isTrue();
} catch (ElasticsearchStatusException error) {
Assertions.assertThat(error).satisfies(ignored -> Assertions.assertThat(error.status()).isEqualTo(RestStatus.NOT_FOUND));
}
return client;
}
use of org.elasticsearch.action.support.master.AcknowledgedResponse in project crate by crate.
the class InsertFromValues method executeBulk.
@Override
public List<CompletableFuture<Long>> executeBulk(DependencyCarrier dependencies, PlannerContext plannerContext, List<Row> bulkParams, SubQueryResults subQueryResults) {
DocTableInfo tableInfo = dependencies.schemas().getTableInfo(writerProjection.tableIdent(), Operation.INSERT);
String[] updateColumnNames;
Assignments assignments;
if (writerProjection.onDuplicateKeyAssignments() == null) {
assignments = null;
updateColumnNames = null;
} else {
assignments = Assignments.convert(writerProjection.onDuplicateKeyAssignments(), dependencies.nodeContext());
updateColumnNames = assignments.targetNames();
}
InputFactory inputFactory = new InputFactory(dependencies.nodeContext());
InputFactory.Context<CollectExpression<Row, ?>> context = inputFactory.ctxForInputColumns(plannerContext.transactionContext());
var allColumnSymbols = InputColumns.create(writerProjection.allTargetColumns(), new InputColumns.SourceSymbols(writerProjection.allTargetColumns()));
ArrayList<Input<?>> insertInputs = new ArrayList<>(allColumnSymbols.size());
for (Symbol symbol : allColumnSymbols) {
insertInputs.add(context.add(symbol));
}
ArrayList<Input<?>> partitionedByInputs = new ArrayList<>(writerProjection.partitionedBySymbols().size());
for (Symbol partitionedBySymbol : writerProjection.partitionedBySymbols()) {
partitionedByInputs.add(context.add(partitionedBySymbol));
}
ArrayList<Input<?>> primaryKeyInputs = new ArrayList<>(writerProjection.ids().size());
for (Symbol symbol : writerProjection.ids()) {
primaryKeyInputs.add(context.add(symbol));
}
Input<?> clusterByInput;
if (writerProjection.clusteredBy() != null) {
clusterByInput = context.add(writerProjection.clusteredBy());
} else {
clusterByInput = null;
}
var indexNameResolver = IndexNameResolver.create(writerProjection.tableIdent(), writerProjection.partitionIdent(), partitionedByInputs);
ShardUpsertRequest.Builder builder = new ShardUpsertRequest.Builder(plannerContext.transactionContext().sessionSettings(), BULK_REQUEST_TIMEOUT_SETTING.get(dependencies.settings()), writerProjection.isIgnoreDuplicateKeys() ? ShardUpsertRequest.DuplicateKeyAction.IGNORE : ShardUpsertRequest.DuplicateKeyAction.UPDATE_OR_FAIL, // continueOnErrors
true, updateColumnNames, writerProjection.allTargetColumns().toArray(new Reference[0]), null, plannerContext.jobId(), true);
var shardedRequests = new ShardedRequests<>(builder::newRequest, RamAccounting.NO_ACCOUNTING);
HashMap<String, InsertSourceFromCells> validatorsCache = new HashMap<>();
IntArrayList bulkIndices = new IntArrayList();
List<CompletableFuture<Long>> results = createUnsetFutures(bulkParams.size());
for (int bulkIdx = 0; bulkIdx < bulkParams.size(); bulkIdx++) {
Row param = bulkParams.get(bulkIdx);
final Symbol[] assignmentSources;
if (assignments != null) {
assignmentSources = assignments.bindSources(tableInfo, param, subQueryResults);
} else {
assignmentSources = null;
}
GroupRowsByShard<ShardUpsertRequest, ShardUpsertRequest.Item> grouper = createRowsByShardGrouper(assignmentSources, insertInputs, indexNameResolver, context, plannerContext, dependencies.clusterService());
try {
Iterator<Row> rows = evaluateValueTableFunction(tableFunctionRelation.functionImplementation(), tableFunctionRelation.function().arguments(), writerProjection.allTargetColumns(), tableInfo, param, plannerContext, subQueryResults);
while (rows.hasNext()) {
Row row = rows.next();
grouper.accept(shardedRequests, row);
checkPrimaryKeyValuesNotNull(primaryKeyInputs);
checkClusterByValueNotNull(clusterByInput);
checkConstraintsOnGeneratedSource(row.materialize(), indexNameResolver.get(), tableInfo, plannerContext, validatorsCache);
bulkIndices.add(bulkIdx);
}
} catch (Throwable t) {
for (CompletableFuture<Long> result : results) {
result.completeExceptionally(t);
}
return results;
}
}
validatorsCache.clear();
var actionProvider = dependencies.transportActionProvider();
createIndices(actionProvider.transportBulkCreateIndicesAction(), shardedRequests.itemsByMissingIndex().keySet(), dependencies.clusterService(), plannerContext.jobId()).thenCompose(acknowledgedResponse -> {
var shardUpsertRequests = resolveAndGroupShardRequests(shardedRequests, dependencies.clusterService()).values();
return execute(dependencies.nodeLimits(), dependencies.clusterService().state(), shardUpsertRequests, actionProvider.transportShardUpsertAction(), dependencies.scheduler());
}).whenComplete((response, t) -> {
if (t == null) {
long[] resultRowCount = createBulkResponse(response, bulkParams.size(), bulkIndices);
for (int i = 0; i < bulkParams.size(); i++) {
results.get(i).complete(resultRowCount[i]);
}
} else {
for (CompletableFuture<Long> result : results) {
result.completeExceptionally(t);
}
}
});
return results;
}
use of org.elasticsearch.action.support.master.AcknowledgedResponse in project crate by crate.
the class InsertFromValues method execute.
@Override
public void execute(DependencyCarrier dependencies, PlannerContext plannerContext, RowConsumer consumer, Row params, SubQueryResults subQueryResults) {
DocTableInfo tableInfo = dependencies.schemas().getTableInfo(writerProjection.tableIdent(), Operation.INSERT);
// For instance, the target table of the insert from values
// statement is the table with the following schema:
//
// CREATE TABLE users (
// dep_id TEXT,
// name TEXT,
// id INT,
// country_id INT,
// PRIMARY KEY (dep_id, id, country_id))
// CLUSTERED BY (dep_id)
// PARTITIONED BY (country_id)
//
// The insert from values statement below would have the column
// index writer projection of its plan that contains the column
// idents and symbols required to create corresponding inputs.
// The diagram below shows the projection's column symbols used
// in the plan and relation between symbols sub-/sets.
//
// +------------------------+
// | +-------------+ PK symbols
// cluster by +------+ | | +------+
// symbol | | | |
// + + + +
// INSERT INTO users (dep_id, name, id, country_id) VALUES (?, ?, ?, ?)
// + + + + +
// +-------+ | | | |
// all target +--------------+ | | +---+ partitioned by
// column +-------------------+ | symbols
// symbols +-------------------------+
InputFactory inputFactory = new InputFactory(dependencies.nodeContext());
InputFactory.Context<CollectExpression<Row, ?>> context = inputFactory.ctxForInputColumns(plannerContext.transactionContext());
var allColumnSymbols = InputColumns.create(writerProjection.allTargetColumns(), new InputColumns.SourceSymbols(writerProjection.allTargetColumns()));
ArrayList<Input<?>> insertInputs = new ArrayList<>(allColumnSymbols.size());
for (Symbol symbol : allColumnSymbols) {
insertInputs.add(context.add(symbol));
}
ArrayList<Input<?>> partitionedByInputs = new ArrayList<>(writerProjection.partitionedBySymbols().size());
for (Symbol partitionedBySymbol : writerProjection.partitionedBySymbols()) {
partitionedByInputs.add(context.add(partitionedBySymbol));
}
ArrayList<Input<?>> primaryKeyInputs = new ArrayList<>(writerProjection.ids().size());
for (Symbol symbol : writerProjection.ids()) {
primaryKeyInputs.add(context.add(symbol));
}
Input<?> clusterByInput;
if (writerProjection.clusteredBy() != null) {
clusterByInput = context.add(writerProjection.clusteredBy());
} else {
clusterByInput = null;
}
String[] updateColumnNames;
Symbol[] assignmentSources;
if (writerProjection.onDuplicateKeyAssignments() == null) {
updateColumnNames = null;
assignmentSources = null;
} else {
Assignments assignments = Assignments.convert(writerProjection.onDuplicateKeyAssignments(), dependencies.nodeContext());
assignmentSources = assignments.bindSources(tableInfo, params, subQueryResults);
updateColumnNames = assignments.targetNames();
}
var indexNameResolver = IndexNameResolver.create(writerProjection.tableIdent(), writerProjection.partitionIdent(), partitionedByInputs);
GroupRowsByShard<ShardUpsertRequest, ShardUpsertRequest.Item> grouper = createRowsByShardGrouper(assignmentSources, insertInputs, indexNameResolver, context, plannerContext, dependencies.clusterService());
ArrayList<Row> rows = new ArrayList<>();
evaluateValueTableFunction(tableFunctionRelation.functionImplementation(), tableFunctionRelation.function().arguments(), writerProjection.allTargetColumns(), tableInfo, params, plannerContext, subQueryResults).forEachRemaining(rows::add);
List<Symbol> returnValues = this.writerProjection.returnValues();
ShardUpsertRequest.Builder builder = new ShardUpsertRequest.Builder(plannerContext.transactionContext().sessionSettings(), BULK_REQUEST_TIMEOUT_SETTING.get(dependencies.settings()), writerProjection.isIgnoreDuplicateKeys() ? ShardUpsertRequest.DuplicateKeyAction.IGNORE : ShardUpsertRequest.DuplicateKeyAction.UPDATE_OR_FAIL, // continueOnErrors
rows.size() > 1, updateColumnNames, writerProjection.allTargetColumns().toArray(new Reference[0]), returnValues.isEmpty() ? null : returnValues.toArray(new Symbol[0]), plannerContext.jobId(), false);
var shardedRequests = new ShardedRequests<>(builder::newRequest, RamAccounting.NO_ACCOUNTING);
HashMap<String, InsertSourceFromCells> validatorsCache = new HashMap<>();
for (Row row : rows) {
grouper.accept(shardedRequests, row);
try {
checkPrimaryKeyValuesNotNull(primaryKeyInputs);
checkClusterByValueNotNull(clusterByInput);
checkConstraintsOnGeneratedSource(row.materialize(), indexNameResolver.get(), tableInfo, plannerContext, validatorsCache);
} catch (Throwable t) {
consumer.accept(null, t);
return;
}
}
validatorsCache.clear();
var actionProvider = dependencies.transportActionProvider();
createIndices(actionProvider.transportBulkCreateIndicesAction(), shardedRequests.itemsByMissingIndex().keySet(), dependencies.clusterService(), plannerContext.jobId()).thenCompose(acknowledgedResponse -> {
var shardUpsertRequests = resolveAndGroupShardRequests(shardedRequests, dependencies.clusterService()).values();
return execute(dependencies.nodeLimits(), dependencies.clusterService().state(), shardUpsertRequests, actionProvider.transportShardUpsertAction(), dependencies.scheduler());
}).whenComplete((response, t) -> {
if (t == null) {
if (returnValues.isEmpty()) {
consumer.accept(InMemoryBatchIterator.of(new Row1((long) response.numSuccessfulWrites()), SENTINEL), null);
} else {
consumer.accept(InMemoryBatchIterator.of(new CollectionBucket(response.resultRows()), SENTINEL, false), null);
}
} else {
consumer.accept(null, t);
}
});
}
use of org.elasticsearch.action.support.master.AcknowledgedResponse in project crate by crate.
the class TransportCloseTable method closeRoutingTable.
/**
* Step 3 - Move index states from OPEN to CLOSE in cluster state for indices that are ready for closing.
* @param target
*/
static ClusterState closeRoutingTable(ClusterState currentState, AlterTableTarget target, DDLClusterStateService ddlClusterStateService, Map<Index, ClusterBlock> blockedIndices, Map<Index, AcknowledgedResponse> results) {
// Remove the index routing table of closed indices if the cluster is in a mixed version
// that does not support the replication of closed indices
final boolean removeRoutingTable = currentState.nodes().getMinNodeVersion().before(Version.V_4_3_0);
IndexTemplateMetadata templateMetadata = target.templateMetadata();
ClusterState updatedState;
if (templateMetadata == null) {
updatedState = currentState;
} else {
Metadata.Builder metadata = Metadata.builder(currentState.metadata());
metadata.put(closePartitionTemplate(templateMetadata));
updatedState = ClusterState.builder(currentState).metadata(metadata).build();
}
String partition = target.partition();
if (partition != null) {
PartitionName partitionName = PartitionName.fromIndexOrTemplate(partition);
updatedState = ddlClusterStateService.onCloseTablePartition(updatedState, partitionName);
} else {
updatedState = ddlClusterStateService.onCloseTable(updatedState, target.table());
}
final Metadata.Builder metadata = Metadata.builder(updatedState.metadata());
final ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(updatedState.blocks());
final RoutingTable.Builder routingTable = RoutingTable.builder(updatedState.routingTable());
final Set<String> closedIndices = new HashSet<>();
for (Map.Entry<Index, AcknowledgedResponse> result : results.entrySet()) {
final Index index = result.getKey();
final boolean acknowledged = result.getValue().isAcknowledged();
try {
if (acknowledged == false) {
LOGGER.debug("verification of shards before closing {} failed", index);
continue;
}
final IndexMetadata indexMetadata = metadata.getSafe(index);
if (indexMetadata.getState() == IndexMetadata.State.CLOSE) {
LOGGER.debug("verification of shards before closing {} succeeded but index is already closed", index);
assert currentState.blocks().hasIndexBlock(index.getName(), IndexMetadata.INDEX_CLOSED_BLOCK);
continue;
}
final ClusterBlock closingBlock = blockedIndices.get(index);
if (currentState.blocks().hasIndexBlock(index.getName(), closingBlock) == false) {
LOGGER.debug("verification of shards before closing {} succeeded but block has been removed in the meantime", index);
continue;
}
Set<Index> restoringIndices = RestoreService.restoringIndices(updatedState, Set.of(index));
if (restoringIndices.isEmpty() == false) {
result.setValue(new AcknowledgedResponse(false));
LOGGER.debug("verification of shards before closing {} succeeded but index is being restored in the meantime", index);
continue;
}
Set<Index> snapshottingIndices = SnapshotsService.snapshottingIndices(updatedState, Set.of(index));
if (snapshottingIndices.isEmpty() == false) {
result.setValue(new AcknowledgedResponse(false));
LOGGER.debug("verification of shards before closing {} succeeded but index is being snapshot in the meantime", index);
continue;
}
blocks.removeIndexBlockWithId(index.getName(), INDEX_CLOSED_BLOCK_ID);
blocks.addIndexBlock(index.getName(), IndexMetadata.INDEX_CLOSED_BLOCK);
final IndexMetadata.Builder updatedMetadata = IndexMetadata.builder(indexMetadata).state(IndexMetadata.State.CLOSE);
if (removeRoutingTable) {
metadata.put(updatedMetadata);
routingTable.remove(index.getName());
} else {
metadata.put(updatedMetadata.settingsVersion(indexMetadata.getSettingsVersion() + 1).settings(Settings.builder().put(indexMetadata.getSettings()).put(IndexMetadata.VERIFIED_BEFORE_CLOSE_SETTING.getKey(), true)));
routingTable.addAsFromOpenToClose(metadata.getSafe(index));
}
LOGGER.debug("closing index {} succeeded", index);
closedIndices.add(index.getName());
} catch (IndexNotFoundException e) {
LOGGER.debug("index {} has been deleted since it was blocked before closing, ignoring", index);
}
}
LOGGER.info("completed closing of indices {}", closedIndices);
return ClusterState.builder(currentState).blocks(blocks).metadata(metadata).routingTable(routingTable.build()).build();
}
use of org.elasticsearch.action.support.master.AcknowledgedResponse in project crate by crate.
the class TransportSchemaUpdateAction method updateMapping.
private CompletableFuture<AcknowledgedResponse> updateMapping(Index index, TimeValue timeout, String mappingSource) {
FutureActionListener<AcknowledgedResponse, AcknowledgedResponse> putMappingListener = FutureActionListener.newInstance();
PutMappingRequest putMappingRequest = new PutMappingRequest().indices(new String[0]).setConcreteIndex(index).source(mappingSource, XContentType.JSON).timeout(timeout).masterNodeTimeout(timeout);
nodeClient.execute(PutMappingAction.INSTANCE, putMappingRequest, putMappingListener);
return putMappingListener;
}
Aggregations