use of com.carrotsearch.hppc.ObjectLongMap in project elasticsearch by elastic.
the class ReplicaShardAllocator method findMatchingNodes.
private MatchingNodes findMatchingNodes(ShardRouting shard, RoutingAllocation allocation, TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryStore, AsyncShardFetch.FetchResult<NodeStoreFilesMetaData> data, boolean explain) {
ObjectLongMap<DiscoveryNode> nodesToSize = new ObjectLongHashMap<>();
Map<String, NodeAllocationResult> nodeDecisions = explain ? new HashMap<>() : null;
for (Map.Entry<DiscoveryNode, NodeStoreFilesMetaData> nodeStoreEntry : data.getData().entrySet()) {
DiscoveryNode discoNode = nodeStoreEntry.getKey();
TransportNodesListShardStoreMetaData.StoreFilesMetaData storeFilesMetaData = nodeStoreEntry.getValue().storeFilesMetaData();
// we don't have any files at all, it is an empty index
if (storeFilesMetaData.isEmpty()) {
continue;
}
RoutingNode node = allocation.routingNodes().node(discoNode.getId());
if (node == null) {
continue;
}
// check if we can allocate on that node...
// we only check for NO, since if this node is THROTTLING and it has enough "same data"
// then we will try and assign it next time
Decision decision = allocation.deciders().canAllocate(shard, node, allocation);
long matchingBytes = -1;
if (explain) {
matchingBytes = computeMatchingBytes(primaryStore, storeFilesMetaData);
ShardStoreInfo shardStoreInfo = new ShardStoreInfo(matchingBytes);
nodeDecisions.put(node.nodeId(), new NodeAllocationResult(discoNode, shardStoreInfo, decision));
}
if (decision.type() == Decision.Type.NO) {
continue;
}
if (matchingBytes < 0) {
matchingBytes = computeMatchingBytes(primaryStore, storeFilesMetaData);
}
nodesToSize.put(discoNode, matchingBytes);
if (logger.isTraceEnabled()) {
if (matchingBytes == Long.MAX_VALUE) {
logger.trace("{}: node [{}] has same sync id {} as primary", shard, discoNode.getName(), storeFilesMetaData.syncId());
} else {
logger.trace("{}: node [{}] has [{}/{}] bytes of re-usable data", shard, discoNode.getName(), new ByteSizeValue(matchingBytes), matchingBytes);
}
}
}
return new MatchingNodes(nodesToSize, nodeDecisions);
}
use of com.carrotsearch.hppc.ObjectLongMap in project crate by crate.
the class TableStatsServiceTest method testRowsToTableStatConversion.
@Test
public void testRowsToTableStatConversion() throws InterruptedException, ExecutionException, TimeoutException {
CompletableFuture<ObjectLongMap<TableIdent>> statsFuture = new CompletableFuture<>();
TableStatsService.TableStatsResultReceiver receiver = new TableStatsService.TableStatsResultReceiver(statsFuture::complete);
receiver.setNextRow(new RowN(new Object[] { 1L, "custom", "foo" }));
receiver.setNextRow(new RowN(new Object[] { 2L, "doc", "foo" }));
receiver.setNextRow(new RowN(new Object[] { 3L, "bar", "foo" }));
receiver.allFinished(false);
ObjectLongMap<TableIdent> stats = statsFuture.get(10, TimeUnit.SECONDS);
assertThat(stats.size(), is(3));
assertThat(stats.get(new TableIdent("bar", "foo")), is(3L));
}
use of com.carrotsearch.hppc.ObjectLongMap in project crate by crate.
the class IndexShard method maybeSyncGlobalCheckpoint.
/**
* Syncs the global checkpoint to the replicas if the global checkpoint on at least one replica is behind the global checkpoint on the
* primary.
*/
public void maybeSyncGlobalCheckpoint(final String reason) {
verifyNotClosed();
assert shardRouting.primary() : "only call maybeSyncGlobalCheckpoint on primary shard";
if (replicationTracker.isPrimaryMode() == false) {
return;
}
assert assertPrimaryMode();
// only sync if there are no operations in flight, or when using async durability
final SeqNoStats stats = getEngine().getSeqNoStats(replicationTracker.getGlobalCheckpoint());
final boolean asyncDurability = indexSettings().getTranslogDurability() == Translog.Durability.ASYNC;
if (stats.getMaxSeqNo() == stats.getGlobalCheckpoint() || asyncDurability) {
final ObjectLongMap<String> globalCheckpoints = getInSyncGlobalCheckpoints();
final long globalCheckpoint = replicationTracker.getGlobalCheckpoint();
// async durability means that the local checkpoint might lag (as it is only advanced on fsync)
// periodically ask for the newest local checkpoint by syncing the global checkpoint, so that ultimately the global
// checkpoint can be synced. Also take into account that a shard might be pending sync, which means that it isn't
// in the in-sync set just yet but might be blocked on waiting for its persisted local checkpoint to catch up to
// the global checkpoint.
final boolean syncNeeded = (asyncDurability && (stats.getGlobalCheckpoint() < stats.getMaxSeqNo() || replicationTracker.pendingInSync())) || // check if the persisted global checkpoint
StreamSupport.stream(globalCheckpoints.values().spliterator(), false).anyMatch(v -> v.value < globalCheckpoint);
// only sync if index is not closed and there is a shard lagging the primary
if (syncNeeded && indexSettings.getIndexMetadata().getState() == IndexMetadata.State.OPEN) {
logger.trace("syncing global checkpoint for [{}]", reason);
globalCheckpointSyncer.run();
}
}
}
Aggregations