use of org.opensearch.action.admin.indices.stats.IndexStats in project OpenSearch by opensearch-project.
the class BlobStoreIncrementalityIT method testForceMergeCausesFullSnapshot.
public void testForceMergeCausesFullSnapshot() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().ensureAtLeastNumDataNodes(2);
final String indexName = "test-index";
createIndex(indexName, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).build());
ensureGreen(indexName);
logger.info("--> adding some documents to test index and flush in between to get at least two segments");
for (int j = 0; j < 2; j++) {
final BulkRequest bulkRequest = new BulkRequest();
for (int i = 0; i < scaledRandomIntBetween(1, 100); ++i) {
bulkRequest.add(new IndexRequest(indexName).source("foo" + j, "bar" + i));
}
client().bulk(bulkRequest).get();
flushAndRefresh(indexName);
}
final IndexStats indexStats = client().admin().indices().prepareStats(indexName).get().getIndex(indexName);
assertThat(indexStats.getIndexShards().get(0).getPrimary().getSegments().getCount(), greaterThan(1L));
final String snapshot1 = "snap-1";
final String repo = "test-repo";
createRepository(repo, "fs");
logger.info("--> creating snapshot 1");
client().admin().cluster().prepareCreateSnapshot(repo, snapshot1).setIndices(indexName).setWaitForCompletion(true).get();
logger.info("--> force merging down to a single segment");
final ForceMergeResponse forceMergeResponse = client().admin().indices().prepareForceMerge(indexName).setMaxNumSegments(1).setFlush(true).get();
assertThat(forceMergeResponse.getFailedShards(), is(0));
final String snapshot2 = "snap-2";
logger.info("--> creating snapshot 2");
client().admin().cluster().prepareCreateSnapshot(repo, snapshot2).setIndices(indexName).setWaitForCompletion(true).get();
logger.info("--> asserting that the two snapshots refer to different files in the repository");
final SnapshotStats secondSnapshotShardStatus = getStats(repo, snapshot2).getIndices().get(indexName).getShards().get(0).getStats();
assertThat(secondSnapshotShardStatus.getIncrementalFileCount(), greaterThan(0));
}
use of org.opensearch.action.admin.indices.stats.IndexStats in project OpenSearch by opensearch-project.
the class RestShardsActionTests method testBuildTable.
public void testBuildTable() {
final int numShards = randomIntBetween(1, 5);
DiscoveryNode localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), Version.CURRENT);
List<ShardRouting> shardRoutings = new ArrayList<>(numShards);
Map<ShardRouting, ShardStats> shardStatsMap = new HashMap<>();
String index = "index";
for (int i = 0; i < numShards; i++) {
ShardRoutingState shardRoutingState = ShardRoutingState.fromValue((byte) randomIntBetween(2, 3));
ShardRouting shardRouting = TestShardRouting.newShardRouting(index, i, localNode.getId(), randomBoolean(), shardRoutingState);
Path path = createTempDir().resolve("indices").resolve(shardRouting.shardId().getIndex().getUUID()).resolve(String.valueOf(shardRouting.shardId().id()));
ShardStats shardStats = new ShardStats(shardRouting, new ShardPath(false, path, path, shardRouting.shardId()), null, null, null, null);
shardStatsMap.put(shardRouting, shardStats);
shardRoutings.add(shardRouting);
}
IndexStats indexStats = mock(IndexStats.class);
when(indexStats.getPrimaries()).thenReturn(new CommonStats());
when(indexStats.getTotal()).thenReturn(new CommonStats());
IndicesStatsResponse stats = mock(IndicesStatsResponse.class);
when(stats.asMap()).thenReturn(shardStatsMap);
DiscoveryNodes discoveryNodes = mock(DiscoveryNodes.class);
when(discoveryNodes.get(localNode.getId())).thenReturn(localNode);
ClusterStateResponse state = mock(ClusterStateResponse.class);
RoutingTable routingTable = mock(RoutingTable.class);
when(routingTable.allShards()).thenReturn(shardRoutings);
ClusterState clusterState = mock(ClusterState.class);
when(clusterState.routingTable()).thenReturn(routingTable);
when(clusterState.nodes()).thenReturn(discoveryNodes);
when(state.getState()).thenReturn(clusterState);
final RestShardsAction action = new RestShardsAction();
final Table table = action.buildTable(new FakeRestRequest(), state, stats);
// now, verify the table is correct
List<Table.Cell> headers = table.getHeaders();
assertThat(headers.get(0).value, equalTo("index"));
assertThat(headers.get(1).value, equalTo("shard"));
assertThat(headers.get(2).value, equalTo("prirep"));
assertThat(headers.get(3).value, equalTo("state"));
assertThat(headers.get(4).value, equalTo("docs"));
assertThat(headers.get(5).value, equalTo("store"));
assertThat(headers.get(6).value, equalTo("ip"));
assertThat(headers.get(7).value, equalTo("id"));
assertThat(headers.get(8).value, equalTo("node"));
final List<List<Table.Cell>> rows = table.getRows();
assertThat(rows.size(), equalTo(numShards));
Iterator<ShardRouting> shardRoutingsIt = shardRoutings.iterator();
for (final List<Table.Cell> row : rows) {
ShardRouting shardRouting = shardRoutingsIt.next();
ShardStats shardStats = shardStatsMap.get(shardRouting);
assertThat(row.get(0).value, equalTo(shardRouting.getIndexName()));
assertThat(row.get(1).value, equalTo(shardRouting.getId()));
assertThat(row.get(2).value, equalTo(shardRouting.primary() ? "p" : "r"));
assertThat(row.get(3).value, equalTo(shardRouting.state()));
assertThat(row.get(6).value, equalTo(localNode.getHostAddress()));
assertThat(row.get(7).value, equalTo(localNode.getId()));
assertThat(row.get(69).value, equalTo(shardStats.getDataPath()));
assertThat(row.get(70).value, equalTo(shardStats.getStatePath()));
}
}
use of org.opensearch.action.admin.indices.stats.IndexStats in project OpenSearch by opensearch-project.
the class ExceptionRetryIT method testRetryDueToExceptionOnNetworkLayer.
/**
* Tests retry mechanism when indexing. If an exception occurs when indexing then the indexing request is tried again before finally
* failing. If auto generated ids are used this must not lead to duplicate ids
* see https://github.com/elastic/elasticsearch/issues/8788
*/
public void testRetryDueToExceptionOnNetworkLayer() throws ExecutionException, InterruptedException, IOException {
final AtomicBoolean exceptionThrown = new AtomicBoolean(false);
int numDocs = scaledRandomIntBetween(100, 1000);
Client client = internalCluster().coordOnlyNodeClient();
NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get();
NodeStats unluckyNode = randomFrom(nodeStats.getNodes().stream().filter((s) -> s.getNode().isDataNode()).collect(Collectors.toList()));
assertAcked(client().admin().indices().prepareCreate("index").setSettings(Settings.builder().put("index.number_of_replicas", 1).put("index.number_of_shards", 5)));
ensureGreen("index");
logger.info("unlucky node: {}", unluckyNode.getNode());
// create a transport service that throws a ConnectTransportException for one bulk request and therefore triggers a retry.
for (NodeStats dataNode : nodeStats.getNodes()) {
MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName()));
mockTransportService.addSendBehavior(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), (connection, requestId, action, request, options) -> {
connection.sendRequest(requestId, action, request, options);
if (action.equals(TransportShardBulkAction.ACTION_NAME) && exceptionThrown.compareAndSet(false, true)) {
logger.debug("Throw ConnectTransportException");
throw new ConnectTransportException(connection.getNode(), action);
}
});
}
BulkRequestBuilder bulkBuilder = client.prepareBulk();
for (int i = 0; i < numDocs; i++) {
XContentBuilder doc = null;
doc = jsonBuilder().startObject().field("foo", "bar").endObject();
bulkBuilder.add(client.prepareIndex("index").setSource(doc));
}
BulkResponse response = bulkBuilder.get();
if (response.hasFailures()) {
for (BulkItemResponse singleIndexRespons : response.getItems()) {
if (singleIndexRespons.isFailed()) {
fail("None of the bulk items should fail but got " + singleIndexRespons.getFailureMessage());
}
}
}
refresh();
SearchResponse searchResponse = client().prepareSearch("index").setSize(numDocs * 2).addStoredField("_id").get();
Set<String> uniqueIds = new HashSet<>();
long dupCounter = 0;
boolean found_duplicate_already = false;
for (int i = 0; i < searchResponse.getHits().getHits().length; i++) {
if (!uniqueIds.add(searchResponse.getHits().getHits()[i].getId())) {
if (!found_duplicate_already) {
SearchResponse dupIdResponse = client().prepareSearch("index").setQuery(termQuery("_id", searchResponse.getHits().getHits()[i].getId())).setExplain(true).get();
assertThat(dupIdResponse.getHits().getTotalHits().value, greaterThan(1L));
logger.info("found a duplicate id:");
for (SearchHit hit : dupIdResponse.getHits()) {
logger.info("Doc {} was found on shard {}", hit.getId(), hit.getShard().getShardId());
}
logger.info("will not print anymore in case more duplicates are found.");
found_duplicate_already = true;
}
dupCounter++;
}
}
assertSearchResponse(searchResponse);
assertThat(dupCounter, equalTo(0L));
assertHitCount(searchResponse, numDocs);
IndicesStatsResponse index = client().admin().indices().prepareStats("index").clear().setSegments(true).get();
IndexStats indexStats = index.getIndex("index");
long maxUnsafeAutoIdTimestamp = Long.MIN_VALUE;
for (IndexShardStats indexShardStats : indexStats) {
for (ShardStats shardStats : indexShardStats) {
SegmentsStats segments = shardStats.getStats().getSegments();
maxUnsafeAutoIdTimestamp = Math.max(maxUnsafeAutoIdTimestamp, segments.getMaxUnsafeAutoIdTimestamp());
}
}
assertTrue("exception must have been thrown otherwise setup is broken", exceptionThrown.get());
assertTrue("maxUnsafeAutoIdTimestamp must be > than 0 we have at least one retry", maxUnsafeAutoIdTimestamp > -1);
}
use of org.opensearch.action.admin.indices.stats.IndexStats in project OpenSearch by opensearch-project.
the class IndexStatsIT method assertCumulativeQueryCacheStats.
private void assertCumulativeQueryCacheStats(IndicesStatsResponse response) {
assertAllSuccessful(response);
QueryCacheStats total = response.getTotal().queryCache;
QueryCacheStats indexTotal = new QueryCacheStats();
QueryCacheStats shardTotal = new QueryCacheStats();
for (IndexStats indexStats : response.getIndices().values()) {
indexTotal.add(indexStats.getTotal().queryCache);
for (ShardStats shardStats : response.getShards()) {
shardTotal.add(shardStats.getStats().queryCache);
}
}
assertEquals(total, indexTotal);
assertEquals(total, shardTotal);
}
use of org.opensearch.action.admin.indices.stats.IndexStats in project OpenSearch by opensearch-project.
the class TransportRolloverActionTests method createIndicesStatResponse.
private IndicesStatsResponse createIndicesStatResponse(String indexName, long totalDocs, long primariesDocs) {
final CommonStats primaryStats = mock(CommonStats.class);
when(primaryStats.getDocs()).thenReturn(new DocsStats(primariesDocs, 0, between(1, 10000)));
final CommonStats totalStats = mock(CommonStats.class);
when(totalStats.getDocs()).thenReturn(new DocsStats(totalDocs, 0, between(1, 10000)));
final IndicesStatsResponse response = mock(IndicesStatsResponse.class);
when(response.getPrimaries()).thenReturn(primaryStats);
when(response.getTotal()).thenReturn(totalStats);
final IndexStats indexStats = mock(IndexStats.class);
when(response.getIndex(indexName)).thenReturn(indexStats);
when(indexStats.getPrimaries()).thenReturn(primaryStats);
when(indexStats.getTotal()).thenReturn(totalStats);
return response;
}
Aggregations