use of org.opensearch.common.xcontent.NamedXContentRegistry in project OpenSearch by opensearch-project.
the class WildflyIT method testRestClient.
public void testRestClient() throws URISyntaxException, IOException {
final String baseUrl = buildBaseUrl();
try (CloseableHttpClient client = HttpClientBuilder.create().build()) {
final String endpoint = baseUrl + "/employees/1";
logger.info("Connecting to uri: " + baseUrl);
final HttpPut put = new HttpPut(new URI(endpoint));
final String body = "{" + " \"first_name\": \"John\"," + " \"last_name\": \"Smith\"," + " \"age\": 25," + " \"about\": \"I love to go rock climbing\"," + " \"interests\": [" + " \"sports\"," + " \"music\"" + " ]" + "}";
put.setEntity(new StringEntity(body, ContentType.APPLICATION_JSON));
try (CloseableHttpResponse response = client.execute(put)) {
int status = response.getStatusLine().getStatusCode();
assertThat("expected a 201 response but got: " + status + " - body: " + EntityUtils.toString(response.getEntity()), status, equalTo(201));
}
logger.info("Fetching resource at " + endpoint);
final HttpGet get = new HttpGet(new URI(endpoint));
try (CloseableHttpResponse response = client.execute(get);
XContentParser parser = JsonXContent.jsonXContent.createParser(new NamedXContentRegistry(ClusterModule.getNamedXWriteables()), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, response.getEntity().getContent())) {
final Map<String, Object> map = parser.map();
assertThat(map.get("first_name"), equalTo("John"));
assertThat(map.get("last_name"), equalTo("Smith"));
assertThat(map.get("age"), equalTo(25));
assertThat(map.get("about"), equalTo("I love to go rock climbing"));
final Object interests = map.get("interests");
assertThat(interests, instanceOf(List.class));
@SuppressWarnings("unchecked") final List<String> interestsAsList = (List<String>) interests;
assertThat(interestsAsList, containsInAnyOrder("sports", "music"));
}
}
}
use of org.opensearch.common.xcontent.NamedXContentRegistry in project OpenSearch by opensearch-project.
the class BlobStoreRepository method writeUpdatedShardMetaDataAndComputeDeletes.
// updates the shard state metadata for shards of a snapshot that is to be deleted. Also computes the files to be cleaned up.
private void writeUpdatedShardMetaDataAndComputeDeletes(Collection<SnapshotId> snapshotIds, RepositoryData oldRepositoryData, boolean useUUIDs, ActionListener<Collection<ShardSnapshotMetaDeleteResult>> onAllShardsCompleted) {
final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT);
final List<IndexId> indices = oldRepositoryData.indicesToUpdateAfterRemovingSnapshot(snapshotIds);
if (indices.isEmpty()) {
onAllShardsCompleted.onResponse(Collections.emptyList());
return;
}
// Listener that flattens out the delete results for each index
final ActionListener<Collection<ShardSnapshotMetaDeleteResult>> deleteIndexMetadataListener = new GroupedActionListener<>(ActionListener.map(onAllShardsCompleted, res -> res.stream().flatMap(Collection::stream).collect(Collectors.toList())), indices.size());
for (IndexId indexId : indices) {
final Set<SnapshotId> survivingSnapshots = oldRepositoryData.getSnapshots(indexId).stream().filter(id -> snapshotIds.contains(id) == false).collect(Collectors.toSet());
final StepListener<Collection<Integer>> shardCountListener = new StepListener<>();
final Collection<String> indexMetaGenerations = snapshotIds.stream().map(id -> oldRepositoryData.indexMetaDataGenerations().indexMetaBlobId(id, indexId)).collect(Collectors.toSet());
final ActionListener<Integer> allShardCountsListener = new GroupedActionListener<>(shardCountListener, indexMetaGenerations.size());
final BlobContainer indexContainer = indexContainer(indexId);
for (String indexMetaGeneration : indexMetaGenerations) {
executor.execute(ActionRunnable.supply(allShardCountsListener, () -> {
try {
return INDEX_METADATA_FORMAT.read(indexContainer, indexMetaGeneration, namedXContentRegistry).getNumberOfShards();
} catch (Exception ex) {
logger.warn(() -> new ParameterizedMessage("[{}] [{}] failed to read metadata for index", indexMetaGeneration, indexId.getName()), ex);
// ignoring it and letting the cleanup deal with it.
return null;
}
}));
}
shardCountListener.whenComplete(counts -> {
final int shardCount = counts.stream().mapToInt(i -> i).max().orElse(0);
if (shardCount == 0) {
deleteIndexMetadataListener.onResponse(null);
return;
}
// Listener for collecting the results of removing the snapshot from each shard's metadata in the current index
final ActionListener<ShardSnapshotMetaDeleteResult> allShardsListener = new GroupedActionListener<>(deleteIndexMetadataListener, shardCount);
for (int shardId = 0; shardId < shardCount; shardId++) {
final int finalShardId = shardId;
executor.execute(new AbstractRunnable() {
@Override
protected void doRun() throws Exception {
final BlobContainer shardContainer = shardContainer(indexId, finalShardId);
final Set<String> blobs = shardContainer.listBlobs().keySet();
final BlobStoreIndexShardSnapshots blobStoreIndexShardSnapshots;
final long newGen;
if (useUUIDs) {
newGen = -1L;
blobStoreIndexShardSnapshots = buildBlobStoreIndexShardSnapshots(blobs, shardContainer, oldRepositoryData.shardGenerations().getShardGen(indexId, finalShardId)).v1();
} else {
Tuple<BlobStoreIndexShardSnapshots, Long> tuple = buildBlobStoreIndexShardSnapshots(blobs, shardContainer);
newGen = tuple.v2() + 1;
blobStoreIndexShardSnapshots = tuple.v1();
}
allShardsListener.onResponse(deleteFromShardSnapshotMeta(survivingSnapshots, indexId, finalShardId, snapshotIds, shardContainer, blobs, blobStoreIndexShardSnapshots, newGen));
}
@Override
public void onFailure(Exception ex) {
logger.warn(() -> new ParameterizedMessage("{} failed to delete shard data for shard [{}][{}]", snapshotIds, indexId.getName(), finalShardId), ex);
// Just passing null here to count down the listener instead of failing it, the stale data left behind
// here will be retried in the next delete or repository cleanup
allShardsListener.onResponse(null);
}
});
}
}, deleteIndexMetadataListener::onFailure);
}
}
use of org.opensearch.common.xcontent.NamedXContentRegistry in project OpenSearch by opensearch-project.
the class MetadataIndexTemplateServiceTests method testResolveMappings.
public void testResolveMappings() throws Exception {
final MetadataIndexTemplateService service = getMetadataIndexTemplateService();
ClusterState state = ClusterState.EMPTY_STATE;
ComponentTemplate ct1 = new ComponentTemplate(new Template(null, new CompressedXContent("{\n" + " \"properties\": {\n" + " \"field1\": {\n" + " \"type\": \"keyword\"\n" + " }\n" + " }\n" + " }"), null), null, null);
ComponentTemplate ct2 = new ComponentTemplate(new Template(null, new CompressedXContent("{\n" + " \"properties\": {\n" + " \"field2\": {\n" + " \"type\": \"text\"\n" + " }\n" + " }\n" + " }"), null), null, null);
state = service.addComponentTemplate(state, true, "ct_high", ct1);
state = service.addComponentTemplate(state, true, "ct_low", ct2);
ComposableIndexTemplate it = new ComposableIndexTemplate(Arrays.asList("i*"), new Template(null, new CompressedXContent("{\n" + " \"properties\": {\n" + " \"field3\": {\n" + " \"type\": \"integer\"\n" + " }\n" + " }\n" + " }"), null), Arrays.asList("ct_low", "ct_high"), 0L, 1L, null, null);
state = service.addIndexTemplateV2(state, true, "my-template", it);
List<CompressedXContent> mappings = MetadataIndexTemplateService.collectMappings(state, "my-template", "my-index");
assertNotNull(mappings);
assertThat(mappings.size(), equalTo(3));
List<Map<String, Object>> parsedMappings = mappings.stream().map(m -> {
try {
return MapperService.parseMapping(new NamedXContentRegistry(Collections.emptyList()), m.string());
} catch (Exception e) {
logger.error(e);
fail("failed to parse mappings: " + m.string());
return null;
}
}).collect(Collectors.toList());
assertThat(parsedMappings.get(0), equalTo(Collections.singletonMap("_doc", Collections.singletonMap("properties", Collections.singletonMap("field2", Collections.singletonMap("type", "text"))))));
assertThat(parsedMappings.get(1), equalTo(Collections.singletonMap("_doc", Collections.singletonMap("properties", Collections.singletonMap("field1", Collections.singletonMap("type", "keyword"))))));
assertThat(parsedMappings.get(2), equalTo(Collections.singletonMap("_doc", Collections.singletonMap("properties", Collections.singletonMap("field3", Collections.singletonMap("type", "integer"))))));
}
use of org.opensearch.common.xcontent.NamedXContentRegistry in project OpenSearch by opensearch-project.
the class MetadataIndexTemplateServiceTests method testResolveConflictingMappings.
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/57393")
public void testResolveConflictingMappings() throws Exception {
final MetadataIndexTemplateService service = getMetadataIndexTemplateService();
ClusterState state = ClusterState.EMPTY_STATE;
ComponentTemplate ct1 = new ComponentTemplate(new Template(null, new CompressedXContent("{\n" + " \"properties\": {\n" + " \"field2\": {\n" + " \"type\": \"keyword\"\n" + " }\n" + " }\n" + " }"), null), null, null);
ComponentTemplate ct2 = new ComponentTemplate(new Template(null, new CompressedXContent("{\n" + " \"properties\": {\n" + " \"field2\": {\n" + " \"type\": \"text\"\n" + " }\n" + " }\n" + " }"), null), null, null);
state = service.addComponentTemplate(state, true, "ct_high", ct1);
state = service.addComponentTemplate(state, true, "ct_low", ct2);
ComposableIndexTemplate it = new ComposableIndexTemplate(Collections.singletonList("i*"), new Template(null, new CompressedXContent("{\n" + " \"properties\": {\n" + " \"field\": {\n" + " \"type\": \"keyword\"\n" + " }\n" + " }\n" + " }"), null), Arrays.asList("ct_low", "ct_high"), 0L, 1L, null, null);
state = service.addIndexTemplateV2(state, true, "my-template", it);
List<CompressedXContent> mappings = MetadataIndexTemplateService.collectMappings(state, "my-template", "my-index");
assertNotNull(mappings);
assertThat(mappings.size(), equalTo(3));
List<Map<String, Object>> parsedMappings = mappings.stream().map(m -> {
try {
return MapperService.parseMapping(new NamedXContentRegistry(Collections.emptyList()), m.string());
} catch (Exception e) {
logger.error(e);
fail("failed to parse mappings: " + m.string());
return null;
}
}).collect(Collectors.toList());
// The order of mappings should be:
// - ct_low
// - ct_high
// - index template
// Because the first elements when merging mappings have the lowest precedence
assertThat(parsedMappings.get(0), equalTo(Collections.singletonMap("_doc", Collections.singletonMap("properties", Collections.singletonMap("field2", Collections.singletonMap("type", "text"))))));
assertThat(parsedMappings.get(1), equalTo(Collections.singletonMap("_doc", Collections.singletonMap("properties", Collections.singletonMap("field2", Collections.singletonMap("type", "keyword"))))));
assertThat(parsedMappings.get(2), equalTo(Collections.singletonMap("_doc", Collections.singletonMap("properties", Collections.singletonMap("field", Collections.singletonMap("type", "keyword"))))));
}
use of org.opensearch.common.xcontent.NamedXContentRegistry in project OpenSearch by opensearch-project.
the class NetworkModuleTests method testOverrideDefault.
public void testOverrideDefault() {
Settings settings = Settings.builder().put(NetworkModule.HTTP_TYPE_SETTING.getKey(), "custom").put(NetworkModule.HTTP_DEFAULT_TYPE_SETTING.getKey(), "default_custom").put(NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING.getKey(), "local").put(NetworkModule.TRANSPORT_TYPE_KEY, "default_custom").build();
// content doesn't matter we check reference equality
Supplier<Transport> customTransport = () -> null;
Supplier<HttpServerTransport> custom = FakeHttpTransport::new;
Supplier<HttpServerTransport> def = FakeHttpTransport::new;
NetworkModule module = newNetworkModule(settings, new NetworkPlugin() {
@Override
public Map<String, Supplier<Transport>> getTransports(Settings settings, ThreadPool threadPool, PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService) {
return Collections.singletonMap("default_custom", customTransport);
}
@Override
public Map<String, Supplier<HttpServerTransport>> getHttpTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher requestDispatcher, ClusterSettings clusterSettings) {
Map<String, Supplier<HttpServerTransport>> supplierMap = new HashMap<>();
supplierMap.put("custom", custom);
supplierMap.put("default_custom", def);
return supplierMap;
}
});
assertSame(custom, module.getHttpServerTransportSupplier());
assertSame(customTransport, module.getTransportSupplier());
}
Aggregations