use of com.carrotsearch.hppc.ObjectFloatHashMap in project elasticsearch by elastic.
the class Gateway method performStateRecovery.
public void performStateRecovery(final GatewayStateRecoveredListener listener) throws GatewayException {
String[] nodesIds = clusterService.state().nodes().getMasterNodes().keys().toArray(String.class);
logger.trace("performing state recovery from {}", Arrays.toString(nodesIds));
TransportNodesListGatewayMetaState.NodesGatewayMetaState nodesState = listGatewayMetaState.list(nodesIds, null).actionGet();
int requiredAllocation = Math.max(1, minimumMasterNodesProvider.get());
if (nodesState.hasFailures()) {
for (FailedNodeException failedNodeException : nodesState.failures()) {
logger.warn("failed to fetch state from node", failedNodeException);
}
}
ObjectFloatHashMap<Index> indices = new ObjectFloatHashMap<>();
MetaData electedGlobalState = null;
int found = 0;
for (TransportNodesListGatewayMetaState.NodeGatewayMetaState nodeState : nodesState.getNodes()) {
if (nodeState.metaData() == null) {
continue;
}
found++;
if (electedGlobalState == null) {
electedGlobalState = nodeState.metaData();
} else if (nodeState.metaData().version() > electedGlobalState.version()) {
electedGlobalState = nodeState.metaData();
}
for (ObjectCursor<IndexMetaData> cursor : nodeState.metaData().indices().values()) {
indices.addTo(cursor.value.getIndex(), 1);
}
}
if (found < requiredAllocation) {
listener.onFailure("found [" + found + "] metadata states, required [" + requiredAllocation + "]");
return;
}
// update the global state, and clean the indices, we elect them in the next phase
MetaData.Builder metaDataBuilder = MetaData.builder(electedGlobalState).removeAllIndices();
assert !indices.containsKey(null);
final Object[] keys = indices.keys;
for (int i = 0; i < keys.length; i++) {
if (keys[i] != null) {
Index index = (Index) keys[i];
IndexMetaData electedIndexMetaData = null;
int indexMetaDataCount = 0;
for (TransportNodesListGatewayMetaState.NodeGatewayMetaState nodeState : nodesState.getNodes()) {
if (nodeState.metaData() == null) {
continue;
}
IndexMetaData indexMetaData = nodeState.metaData().index(index);
if (indexMetaData == null) {
continue;
}
if (electedIndexMetaData == null) {
electedIndexMetaData = indexMetaData;
} else if (indexMetaData.getVersion() > electedIndexMetaData.getVersion()) {
electedIndexMetaData = indexMetaData;
}
indexMetaDataCount++;
}
if (electedIndexMetaData != null) {
if (indexMetaDataCount < requiredAllocation) {
logger.debug("[{}] found [{}], required [{}], not adding", index, indexMetaDataCount, requiredAllocation);
}
// TODO if this logging statement is correct then we are missing an else here
try {
if (electedIndexMetaData.getState() == IndexMetaData.State.OPEN) {
// verify that we can actually create this index - if not we recover it as closed with lots of warn logs
indicesService.verifyIndexMetadata(electedIndexMetaData, electedIndexMetaData);
}
} catch (Exception e) {
final Index electedIndex = electedIndexMetaData.getIndex();
logger.warn((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("recovering index {} failed - recovering as closed", electedIndex), e);
electedIndexMetaData = IndexMetaData.builder(electedIndexMetaData).state(IndexMetaData.State.CLOSE).build();
}
metaDataBuilder.put(electedIndexMetaData, false);
}
}
}
final ClusterSettings clusterSettings = clusterService.getClusterSettings();
metaDataBuilder.persistentSettings(clusterSettings.archiveUnknownOrInvalidSettings(metaDataBuilder.persistentSettings(), e -> logUnknownSetting("persistent", e), (e, ex) -> logInvalidSetting("persistent", e, ex)));
metaDataBuilder.transientSettings(clusterSettings.archiveUnknownOrInvalidSettings(metaDataBuilder.transientSettings(), e -> logUnknownSetting("transient", e), (e, ex) -> logInvalidSetting("transient", e, ex)));
ClusterState.Builder builder = ClusterState.builder(clusterService.getClusterName());
builder.metaData(metaDataBuilder);
listener.onSuccess(builder.build());
}
use of com.carrotsearch.hppc.ObjectFloatHashMap in project drill by axbaretto.
the class AffinityCreator method getAffinityMap.
public static <T extends CompleteWork> List<EndpointAffinity> getAffinityMap(List<T> work) {
Stopwatch watch = Stopwatch.createStarted();
long totalBytes = 0;
for (CompleteWork entry : work) {
totalBytes += entry.getTotalBytes();
}
ObjectFloatHashMap<DrillbitEndpoint> affinities = new ObjectFloatHashMap<DrillbitEndpoint>();
for (CompleteWork entry : work) {
for (ObjectLongCursor<DrillbitEndpoint> cursor : entry.getByteMap()) {
long bytes = cursor.value;
float affinity = (float) bytes / (float) totalBytes;
affinities.putOrAdd(cursor.key, affinity, affinity);
}
}
List<EndpointAffinity> affinityList = Lists.newLinkedList();
for (ObjectFloatCursor<DrillbitEndpoint> d : affinities) {
logger.debug("Endpoint {} has affinity {}", d.key.getAddress(), d.value);
affinityList.add(new EndpointAffinity(d.key, d.value));
}
logger.debug("Took {} ms to get operator affinity", watch.elapsed(TimeUnit.MILLISECONDS));
return affinityList;
}
use of com.carrotsearch.hppc.ObjectFloatHashMap in project drill by apache.
the class AffinityCreator method getAffinityMap.
public static <T extends CompleteWork> List<EndpointAffinity> getAffinityMap(List<T> work) {
Stopwatch watch = logger.isDebugEnabled() ? Stopwatch.createStarted() : null;
long totalBytes = work.stream().mapToLong(CompleteWork::getTotalBytes).sum();
ObjectFloatHashMap<DrillbitEndpoint> affinities = new ObjectFloatHashMap<>();
for (CompleteWork entry : work) {
for (ObjectLongCursor<DrillbitEndpoint> cursor : entry.getByteMap()) {
long bytes = cursor.value;
float affinity = totalBytes == 0 ? 0.0F : (float) bytes / (float) totalBytes;
affinities.putOrAdd(cursor.key, affinity, affinity);
}
}
List<EndpointAffinity> affinityList = new LinkedList<>();
for (ObjectFloatCursor<DrillbitEndpoint> d : affinities) {
logger.debug("Endpoint {} has affinity {}", d.key.getAddress(), d.value);
affinityList.add(new EndpointAffinity(d.key, d.value));
}
if (watch != null) {
logger.debug("Took {} ms to get operator affinity", watch.elapsed(TimeUnit.MILLISECONDS));
}
return affinityList;
}
use of com.carrotsearch.hppc.ObjectFloatHashMap in project crate by crate.
the class Gateway method performStateRecovery.
public void performStateRecovery(final GatewayStateRecoveredListener listener) throws GatewayException {
final DiscoveryNode[] nodes = clusterService.state().nodes().getMasterNodes().values().toArray(DiscoveryNode.class);
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("performing state recovery from {}", Arrays.toString(nodes));
}
var request = new TransportNodesListGatewayMetaState.Request(nodes);
PlainActionFuture<TransportNodesListGatewayMetaState.NodesGatewayMetaState> future = PlainActionFuture.newFuture();
client.executeLocally(TransportNodesListGatewayMetaState.TYPE, request, future);
final TransportNodesListGatewayMetaState.NodesGatewayMetaState nodesState = future.actionGet();
final int requiredAllocation = 1;
if (nodesState.hasFailures()) {
for (final FailedNodeException failedNodeException : nodesState.failures()) {
LOGGER.warn("failed to fetch state from node", failedNodeException);
}
}
final ObjectFloatHashMap<Index> indices = new ObjectFloatHashMap<>();
Metadata electedGlobalState = null;
int found = 0;
for (final TransportNodesListGatewayMetaState.NodeGatewayMetaState nodeState : nodesState.getNodes()) {
if (nodeState.metadata() == null) {
continue;
}
found++;
if (electedGlobalState == null) {
electedGlobalState = nodeState.metadata();
} else if (nodeState.metadata().version() > electedGlobalState.version()) {
electedGlobalState = nodeState.metadata();
}
for (final ObjectCursor<IndexMetadata> cursor : nodeState.metadata().indices().values()) {
indices.addTo(cursor.value.getIndex(), 1);
}
}
if (found < requiredAllocation) {
listener.onFailure("found [" + found + "] metadata states, required [" + requiredAllocation + "]");
return;
}
// update the global state, and clean the indices, we elect them in the next phase
final Metadata.Builder metadataBuilder = Metadata.builder(electedGlobalState).removeAllIndices();
assert !indices.containsKey(null);
final Object[] keys = indices.keys;
for (int i = 0; i < keys.length; i++) {
if (keys[i] != null) {
final Index index = (Index) keys[i];
IndexMetadata electedIndexMetadata = null;
int indexMetadataCount = 0;
for (final TransportNodesListGatewayMetaState.NodeGatewayMetaState nodeState : nodesState.getNodes()) {
if (nodeState.metadata() == null) {
continue;
}
final IndexMetadata indexMetadata = nodeState.metadata().index(index);
if (indexMetadata == null) {
continue;
}
if (electedIndexMetadata == null) {
electedIndexMetadata = indexMetadata;
} else if (indexMetadata.getVersion() > electedIndexMetadata.getVersion()) {
electedIndexMetadata = indexMetadata;
}
indexMetadataCount++;
}
if (electedIndexMetadata != null) {
if (indexMetadataCount < requiredAllocation) {
LOGGER.debug("[{}] found [{}], required [{}], not adding", index, indexMetadataCount, requiredAllocation);
}
// TODO if this logging statement is correct then we are missing an else here
metadataBuilder.put(electedIndexMetadata, false);
}
}
}
ClusterState recoveredState = Function.<ClusterState>identity().andThen(state -> ClusterStateUpdaters.upgradeAndArchiveUnknownOrInvalidSettings(state, clusterService.getClusterSettings())).apply(ClusterState.builder(clusterService.getClusterName()).metadata(metadataBuilder).build());
listener.onSuccess(recoveredState);
}
Aggregations