use of com.carrotsearch.hppc.ObjectLongHashMap in project elasticsearch by elastic.
the class ReplicaShardAllocator method findMatchingNodes.
private MatchingNodes findMatchingNodes(ShardRouting shard, RoutingAllocation allocation, TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryStore, AsyncShardFetch.FetchResult<NodeStoreFilesMetaData> data, boolean explain) {
ObjectLongMap<DiscoveryNode> nodesToSize = new ObjectLongHashMap<>();
Map<String, NodeAllocationResult> nodeDecisions = explain ? new HashMap<>() : null;
for (Map.Entry<DiscoveryNode, NodeStoreFilesMetaData> nodeStoreEntry : data.getData().entrySet()) {
DiscoveryNode discoNode = nodeStoreEntry.getKey();
TransportNodesListShardStoreMetaData.StoreFilesMetaData storeFilesMetaData = nodeStoreEntry.getValue().storeFilesMetaData();
// we don't have any files at all, it is an empty index
if (storeFilesMetaData.isEmpty()) {
continue;
}
RoutingNode node = allocation.routingNodes().node(discoNode.getId());
if (node == null) {
continue;
}
// check if we can allocate on that node...
// we only check for NO, since if this node is THROTTLING and it has enough "same data"
// then we will try and assign it next time
Decision decision = allocation.deciders().canAllocate(shard, node, allocation);
long matchingBytes = -1;
if (explain) {
matchingBytes = computeMatchingBytes(primaryStore, storeFilesMetaData);
ShardStoreInfo shardStoreInfo = new ShardStoreInfo(matchingBytes);
nodeDecisions.put(node.nodeId(), new NodeAllocationResult(discoNode, shardStoreInfo, decision));
}
if (decision.type() == Decision.Type.NO) {
continue;
}
if (matchingBytes < 0) {
matchingBytes = computeMatchingBytes(primaryStore, storeFilesMetaData);
}
nodesToSize.put(discoNode, matchingBytes);
if (logger.isTraceEnabled()) {
if (matchingBytes == Long.MAX_VALUE) {
logger.trace("{}: node [{}] has same sync id {} as primary", shard, discoNode.getName(), storeFilesMetaData.syncId());
} else {
logger.trace("{}: node [{}] has [{}/{}] bytes of re-usable data", shard, discoNode.getName(), new ByteSizeValue(matchingBytes), matchingBytes);
}
}
}
return new MatchingNodes(nodesToSize, nodeDecisions);
}
use of com.carrotsearch.hppc.ObjectLongHashMap in project elasticsearch by elastic.
the class BytesRefHashTests method testDuell.
public void testDuell() {
final int len = randomIntBetween(1, 100000);
final BytesRef[] values = new BytesRef[len];
for (int i = 0; i < values.length; ++i) {
values[i] = new BytesRef(randomAsciiOfLength(5));
}
final ObjectLongMap<BytesRef> valueToId = new ObjectLongHashMap<>();
final BytesRef[] idToValue = new BytesRef[values.length];
final int iters = randomInt(1000000);
for (int i = 0; i < iters; ++i) {
final BytesRef value = randomFrom(values);
if (valueToId.containsKey(value)) {
assertEquals(-1 - valueToId.get(value), hash.add(value, value.hashCode()));
} else {
assertEquals(valueToId.size(), hash.add(value, value.hashCode()));
idToValue[valueToId.size()] = value;
valueToId.put(value, valueToId.size());
}
}
assertEquals(valueToId.size(), hash.size());
for (Iterator<ObjectLongCursor<BytesRef>> iterator = valueToId.iterator(); iterator.hasNext(); ) {
final ObjectLongCursor<BytesRef> next = iterator.next();
assertEquals(next.value, hash.find(next.key, next.key.hashCode()));
}
for (long i = 0; i < hash.capacity(); ++i) {
final long id = hash.id(i);
BytesRef spare = new BytesRef();
if (id >= 0) {
hash.get(id, spare);
assertEquals(idToValue[(int) id], spare);
}
}
hash.close();
}
Aggregations