use of org.opensearch.index.mapper.MetadataFieldMapper in project OpenSearch by opensearch-project.
the class MetadataCreateDataStreamService method validateTimestampFieldMapping.
public static void validateTimestampFieldMapping(MapperService mapperService) throws IOException {
MetadataFieldMapper fieldMapper = (MetadataFieldMapper) mapperService.documentMapper().mappers().getMapper("_data_stream_timestamp");
assert fieldMapper != null : "[_data_stream_timestamp] meta field mapper must exist";
Map<String, Object> parsedTemplateMapping = MapperService.parseMapping(NamedXContentRegistry.EMPTY, mapperService.documentMapper().mappingSource().string());
Boolean enabled = ObjectPath.eval("_doc._data_stream_timestamp.enabled", parsedTemplateMapping);
// that would be a bug.
if (enabled == null || enabled == false) {
throw new IllegalStateException("[_data_stream_timestamp] meta field has been disabled");
}
// Sanity check (this validation logic should already have been executed when merging mappings):
fieldMapper.validate(mapperService.documentMapper().mappers());
}
use of org.opensearch.index.mapper.MetadataFieldMapper in project OpenSearch by opensearch-project.
the class TransportShardBulkActionTests method testRetries.
public void testRetries() throws Exception {
IndexSettings indexSettings = new IndexSettings(indexMetadata(), Settings.EMPTY);
UpdateRequest writeRequest = new UpdateRequest("index", "id").doc(Requests.INDEX_CONTENT_TYPE, "field", "value");
// the beating will continue until success has come.
writeRequest.retryOnConflict(Integer.MAX_VALUE);
BulkItemRequest primaryRequest = new BulkItemRequest(0, writeRequest);
IndexRequest updateResponse = new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE, "field", "value");
Exception err = new VersionConflictEngineException(shardId, "id", "I'm conflicted <(;_;)>");
Engine.IndexResult conflictedResult = new Engine.IndexResult(err, 0);
Engine.IndexResult mappingUpdate = new Engine.IndexResult(new Mapping(null, mock(RootObjectMapper.class), new MetadataFieldMapper[0], Collections.emptyMap()));
Translog.Location resultLocation = new Translog.Location(42, 42, 42);
Engine.IndexResult success = new FakeIndexResult(1, 1, 13, true, resultLocation);
IndexShard shard = mock(IndexShard.class);
when(shard.applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyLong(), anyLong(), anyBoolean())).thenAnswer(ir -> {
if (randomBoolean()) {
return conflictedResult;
}
if (randomBoolean()) {
return mappingUpdate;
} else {
return success;
}
});
when(shard.indexSettings()).thenReturn(indexSettings);
when(shard.shardId()).thenReturn(shardId);
when(shard.mapperService()).thenReturn(mock(MapperService.class));
UpdateHelper updateHelper = mock(UpdateHelper.class);
when(updateHelper.prepare(any(), eq(shard), any())).thenReturn(new UpdateHelper.Result(updateResponse, randomBoolean() ? DocWriteResponse.Result.CREATED : DocWriteResponse.Result.UPDATED, Collections.singletonMap("field", "value"), Requests.INDEX_CONTENT_TYPE));
BulkItemRequest[] items = new BulkItemRequest[] { primaryRequest };
BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items);
final CountDownLatch latch = new CountDownLatch(1);
TransportShardBulkAction.performOnPrimary(bulkShardRequest, shard, updateHelper, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer(), listener -> listener.onResponse(null), new LatchedActionListener<>(ActionTestUtils.assertNoFailureListener(result -> {
assertThat(((WritePrimaryResult<BulkShardRequest, BulkShardResponse>) result).location, equalTo(resultLocation));
BulkItemResponse primaryResponse = result.replicaRequest().items()[0].getPrimaryResponse();
assertThat(primaryResponse.getItemId(), equalTo(0));
assertThat(primaryResponse.getId(), equalTo("id"));
assertThat(primaryResponse.getOpType(), equalTo(DocWriteRequest.OpType.UPDATE));
DocWriteResponse response = primaryResponse.getResponse();
assertThat(response.status(), equalTo(RestStatus.CREATED));
assertThat(response.getSeqNo(), equalTo(13L));
}), latch), threadPool, Names.WRITE);
latch.await();
}
use of org.opensearch.index.mapper.MetadataFieldMapper in project OpenSearch by opensearch-project.
the class TransportShardBulkActionTests method testForceExecutionOnRejectionAfterMappingUpdate.
public void testForceExecutionOnRejectionAfterMappingUpdate() throws Exception {
TestThreadPool rejectingThreadPool = new TestThreadPool("TransportShardBulkActionTests#testForceExecutionOnRejectionAfterMappingUpdate", Settings.builder().put("thread_pool." + ThreadPool.Names.WRITE + ".size", 1).put("thread_pool." + ThreadPool.Names.WRITE + ".queue_size", 1).build());
CyclicBarrier cyclicBarrier = new CyclicBarrier(2);
rejectingThreadPool.executor(ThreadPool.Names.WRITE).execute(() -> {
try {
cyclicBarrier.await();
logger.info("blocking the write executor");
cyclicBarrier.await();
logger.info("unblocked the write executor");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
try {
cyclicBarrier.await();
// Place a task in the queue to block next enqueue
rejectingThreadPool.executor(ThreadPool.Names.WRITE).execute(() -> {
});
BulkItemRequest[] items = new BulkItemRequest[2];
DocWriteRequest<IndexRequest> writeRequest1 = new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE, "foo", 1);
DocWriteRequest<IndexRequest> writeRequest2 = new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE, "foo", "bar");
items[0] = new BulkItemRequest(0, writeRequest1);
items[1] = new BulkItemRequest(1, writeRequest2);
BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items);
Engine.IndexResult mappingUpdate = new Engine.IndexResult(new Mapping(null, mock(RootObjectMapper.class), new MetadataFieldMapper[0], Collections.emptyMap()));
Translog.Location resultLocation1 = new Translog.Location(42, 36, 36);
Translog.Location resultLocation2 = new Translog.Location(42, 42, 42);
Engine.IndexResult success1 = new FakeIndexResult(1, 1, 10, true, resultLocation1);
Engine.IndexResult success2 = new FakeIndexResult(1, 1, 13, true, resultLocation2);
IndexShard shard = mock(IndexShard.class);
when(shard.shardId()).thenReturn(shardId);
when(shard.applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyLong(), anyLong(), anyBoolean())).thenReturn(success1, mappingUpdate, success2);
when(shard.getFailedIndexResult(any(OpenSearchRejectedExecutionException.class), anyLong())).thenCallRealMethod();
when(shard.mapperService()).thenReturn(mock(MapperService.class));
randomlySetIgnoredPrimaryResponse(items[0]);
AtomicInteger updateCalled = new AtomicInteger();
final CountDownLatch latch = new CountDownLatch(1);
TransportShardBulkAction.performOnPrimary(bulkShardRequest, shard, null, rejectingThreadPool::absoluteTimeInMillis, (update, shardId, listener) -> {
// There should indeed be a mapping update
assertNotNull(update);
updateCalled.incrementAndGet();
listener.onResponse(null);
try {
// Release blocking task now that the continue write execution has been rejected and
// the finishRequest execution has been force enqueued
cyclicBarrier.await();
} catch (InterruptedException | BrokenBarrierException e) {
throw new IllegalStateException(e);
}
}, listener -> listener.onResponse(null), new LatchedActionListener<>(ActionTestUtils.assertNoFailureListener(result -> assertThat(((WritePrimaryResult<BulkShardRequest, BulkShardResponse>) result).location, equalTo(resultLocation1))), latch), rejectingThreadPool, Names.WRITE);
latch.await();
assertThat("mappings were \"updated\" once", updateCalled.get(), equalTo(1));
verify(shard, times(2)).applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyLong(), anyLong(), anyBoolean());
BulkItemResponse primaryResponse1 = bulkShardRequest.items()[0].getPrimaryResponse();
assertThat(primaryResponse1.getItemId(), equalTo(0));
assertThat(primaryResponse1.getId(), equalTo("id"));
assertThat(primaryResponse1.getOpType(), equalTo(DocWriteRequest.OpType.INDEX));
assertFalse(primaryResponse1.isFailed());
assertThat(primaryResponse1.getResponse().status(), equalTo(RestStatus.CREATED));
assertThat(primaryResponse1.getResponse().getSeqNo(), equalTo(10L));
BulkItemResponse primaryResponse2 = bulkShardRequest.items()[1].getPrimaryResponse();
assertThat(primaryResponse2.getItemId(), equalTo(1));
assertThat(primaryResponse2.getId(), equalTo("id"));
assertThat(primaryResponse2.getOpType(), equalTo(DocWriteRequest.OpType.INDEX));
assertTrue(primaryResponse2.isFailed());
assertNull(primaryResponse2.getResponse());
assertEquals(primaryResponse2.status(), RestStatus.TOO_MANY_REQUESTS);
assertThat(primaryResponse2.getFailure().getCause(), instanceOf(OpenSearchRejectedExecutionException.class));
closeShards(shard);
} finally {
rejectingThreadPool.shutdownNow();
}
}
use of org.opensearch.index.mapper.MetadataFieldMapper in project OpenSearch by opensearch-project.
the class TransportShardBulkActionTests method testExecuteBulkIndexRequestWithMappingUpdates.
public void testExecuteBulkIndexRequestWithMappingUpdates() throws Exception {
BulkItemRequest[] items = new BulkItemRequest[1];
DocWriteRequest<IndexRequest> writeRequest = new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE, "foo", "bar");
items[0] = new BulkItemRequest(0, writeRequest);
BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items);
Engine.IndexResult mappingUpdate = new Engine.IndexResult(new Mapping(null, mock(RootObjectMapper.class), new MetadataFieldMapper[0], Collections.emptyMap()));
Translog.Location resultLocation = new Translog.Location(42, 42, 42);
Engine.IndexResult success = new FakeIndexResult(1, 1, 13, true, resultLocation);
IndexShard shard = mock(IndexShard.class);
when(shard.shardId()).thenReturn(shardId);
when(shard.applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyLong(), anyLong(), anyBoolean())).thenReturn(mappingUpdate);
when(shard.mapperService()).thenReturn(mock(MapperService.class));
randomlySetIgnoredPrimaryResponse(items[0]);
// Pretend the mappings haven't made it to the node yet
BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(bulkShardRequest, shard);
AtomicInteger updateCalled = new AtomicInteger();
TransportShardBulkAction.executeBulkItemRequest(context, null, threadPool::absoluteTimeInMillis, (update, shardId, listener) -> {
// There should indeed be a mapping update
assertNotNull(update);
updateCalled.incrementAndGet();
listener.onResponse(null);
}, listener -> listener.onResponse(null), ASSERTING_DONE_LISTENER);
assertTrue(context.isInitial());
assertTrue(context.hasMoreOperationsToExecute());
assertThat("mappings were \"updated\" once", updateCalled.get(), equalTo(1));
// Verify that the shard "executed" the operation once
verify(shard, times(1)).applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyLong(), anyLong(), anyBoolean());
when(shard.applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyLong(), anyLong(), anyBoolean())).thenReturn(success);
TransportShardBulkAction.executeBulkItemRequest(context, null, threadPool::absoluteTimeInMillis, (update, shardId, listener) -> fail("should not have had to update the mappings"), listener -> {
}, ASSERTING_DONE_LISTENER);
// Verify that the shard "executed" the operation only once (1 for previous invocations plus
// 1 for this execution)
verify(shard, times(2)).applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyLong(), anyLong(), anyBoolean());
BulkItemResponse primaryResponse = bulkShardRequest.items()[0].getPrimaryResponse();
assertThat(primaryResponse.getItemId(), equalTo(0));
assertThat(primaryResponse.getId(), equalTo("id"));
assertThat(primaryResponse.getOpType(), equalTo(writeRequest.opType()));
assertFalse(primaryResponse.isFailed());
closeShards(shard);
}
use of org.opensearch.index.mapper.MetadataFieldMapper in project OpenSearch by opensearch-project.
the class MappingUpdatedActionTests method testSendUpdateMappingUsingAutoPutMappingAction.
public void testSendUpdateMappingUsingAutoPutMappingAction() {
DiscoveryNodes nodes = DiscoveryNodes.builder().add(new DiscoveryNode("first", buildNewFakeTransportAddress(), LegacyESVersion.V_7_9_0)).build();
ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).nodes(nodes).build();
ClusterService clusterService = mock(ClusterService.class);
when(clusterService.state()).thenReturn(clusterState);
IndicesAdminClient indicesAdminClient = mock(IndicesAdminClient.class);
AdminClient adminClient = mock(AdminClient.class);
when(adminClient.indices()).thenReturn(indicesAdminClient);
Client client = mock(Client.class);
when(client.admin()).thenReturn(adminClient);
MappingUpdatedAction mua = new MappingUpdatedAction(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), clusterService);
mua.setClient(client);
Settings indexSettings = Settings.builder().put(SETTING_VERSION_CREATED, Version.CURRENT).build();
final Mapper.BuilderContext context = new Mapper.BuilderContext(indexSettings, new ContentPath());
RootObjectMapper rootObjectMapper = new RootObjectMapper.Builder("name").build(context);
Mapping update = new Mapping(LegacyESVersion.V_7_9_0, rootObjectMapper, new MetadataFieldMapper[0], Map.of());
mua.sendUpdateMapping(new Index("name", "uuid"), update, ActionListener.wrap(() -> {
}));
verify(indicesAdminClient).execute(eq(AutoPutMappingAction.INSTANCE), any(), any());
}
Aggregations