use of com.carrotsearch.hppc.IntHashSet in project crate by crate.
the class NodeFetchRequestTest method testStreaming.
@Test
public void testStreaming() throws Exception {
IntObjectHashMap<IntContainer> toFetch = new IntObjectHashMap<>();
IntHashSet docIds = new IntHashSet(3);
toFetch.put(1, docIds);
NodeFetchRequest orig = new NodeFetchRequest(UUID.randomUUID(), 1, true, toFetch);
BytesStreamOutput out = new BytesStreamOutput();
orig.writeTo(out);
StreamInput in = StreamInput.wrap(out.bytes());
NodeFetchRequest streamed = new NodeFetchRequest();
streamed.readFrom(in);
assertThat(orig.jobId(), is(streamed.jobId()));
assertThat(orig.fetchPhaseId(), is(streamed.fetchPhaseId()));
assertThat(orig.isCloseContext(), is(streamed.isCloseContext()));
assertThat(orig.toFetch().toString(), is(streamed.toFetch().toString()));
}
use of com.carrotsearch.hppc.IntHashSet in project elasticsearch by elastic.
the class DuelScrollIT method create.
private TestContext create(SearchType... searchTypes) throws Exception {
assertAcked(prepareCreate("index").addMapping("type", jsonBuilder().startObject().startObject("type").startObject("properties").startObject("field1").field("type", "long").endObject().startObject("field2").field("type", "keyword").endObject().startObject("nested").field("type", "nested").startObject("properties").startObject("field3").field("type", "long").endObject().startObject("field4").field("type", "keyword").endObject().endObject().endObject().endObject().endObject().endObject()));
int numDocs = 2 + randomInt(512);
int scrollRequestSize = randomIntBetween(1, rarely() ? numDocs : numDocs / 2);
boolean unevenRouting = randomBoolean();
int numMissingDocs = scaledRandomIntBetween(0, numDocs / 100);
IntHashSet missingDocs = new IntHashSet(numMissingDocs);
for (int i = 0; i < numMissingDocs; i++) {
while (!missingDocs.add(randomInt(numDocs))) {
}
}
for (int i = 1; i <= numDocs; i++) {
IndexRequestBuilder indexRequestBuilder = client().prepareIndex("index", "type", String.valueOf(i));
if (missingDocs.contains(i)) {
indexRequestBuilder.setSource("x", "y");
} else {
indexRequestBuilder.setSource(jsonBuilder().startObject().field("field1", i).field("field2", String.valueOf(i)).startObject("nested").field("field3", i).field("field4", String.valueOf(i)).endObject().endObject());
}
if (unevenRouting && randomInt(3) <= 2) {
indexRequestBuilder.setRouting("a");
}
indexRandom(false, indexRequestBuilder);
}
refresh();
final SortBuilder sort;
if (randomBoolean()) {
if (randomBoolean()) {
sort = SortBuilders.fieldSort("field1").missing(1);
} else {
sort = SortBuilders.fieldSort("field2").missing("1");
}
} else {
if (randomBoolean()) {
sort = SortBuilders.fieldSort("nested.field3").missing(1);
} else {
sort = SortBuilders.fieldSort("nested.field4").missing("1");
}
}
sort.order(randomBoolean() ? SortOrder.ASC : SortOrder.DESC);
SearchType searchType = RandomPicks.randomFrom(random(), Arrays.asList(searchTypes));
logger.info("numDocs={}, scrollRequestSize={}, sort={}, searchType={}", numDocs, scrollRequestSize, sort, searchType);
return new TestContext(numDocs, scrollRequestSize, sort, searchType);
}
use of com.carrotsearch.hppc.IntHashSet in project elasticsearch by elastic.
the class UnassignedInfoTests method testNewIndexRestored.
public void testNewIndexRestored() {
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(randomIntBetween(1, 3)).numberOfReplicas(randomIntBetween(0, 3))).build();
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(RoutingTable.builder().addAsNewRestore(metaData.index("test"), new SnapshotRecoverySource(new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), Version.CURRENT, "test"), new IntHashSet()).build()).build();
for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(UNASSIGNED)) {
assertThat(shard.unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.NEW_INDEX_RESTORED));
}
}
use of com.carrotsearch.hppc.IntHashSet in project elasticsearch by elastic.
the class RestoreService method restoreSnapshot.
/**
* Restores snapshot specified in the restore request.
*
* @param request restore request
* @param listener restore listener
*/
public void restoreSnapshot(final RestoreRequest request, final ActionListener<RestoreCompletionResponse> listener) {
try {
// Read snapshot info and metadata from the repository
Repository repository = repositoriesService.repository(request.repositoryName);
final RepositoryData repositoryData = repository.getRepositoryData();
final Optional<SnapshotId> incompatibleSnapshotId = repositoryData.getIncompatibleSnapshotIds().stream().filter(s -> request.snapshotName.equals(s.getName())).findFirst();
if (incompatibleSnapshotId.isPresent()) {
throw new SnapshotRestoreException(request.repositoryName, request.snapshotName, "cannot restore incompatible snapshot");
}
final Optional<SnapshotId> matchingSnapshotId = repositoryData.getSnapshotIds().stream().filter(s -> request.snapshotName.equals(s.getName())).findFirst();
if (matchingSnapshotId.isPresent() == false) {
throw new SnapshotRestoreException(request.repositoryName, request.snapshotName, "snapshot does not exist");
}
final SnapshotId snapshotId = matchingSnapshotId.get();
final SnapshotInfo snapshotInfo = repository.getSnapshotInfo(snapshotId);
final Snapshot snapshot = new Snapshot(request.repositoryName, snapshotId);
List<String> filteredIndices = SnapshotUtils.filterIndices(snapshotInfo.indices(), request.indices(), request.indicesOptions());
MetaData metaData = repository.getSnapshotMetaData(snapshotInfo, repositoryData.resolveIndices(filteredIndices));
// Make sure that we can restore from this snapshot
validateSnapshotRestorable(request.repositoryName, snapshotInfo);
// Find list of indices that we need to restore
final Map<String, String> renamedIndices = renamedIndices(request, filteredIndices);
// Now we can start the actual restore process by adding shards to be recovered in the cluster state
// and updating cluster metadata (global and index) as needed
clusterService.submitStateUpdateTask(request.cause(), new ClusterStateUpdateTask() {
RestoreInfo restoreInfo = null;
@Override
public ClusterState execute(ClusterState currentState) {
// Check if another restore process is already running - cannot run two restore processes at the
// same time
RestoreInProgress restoreInProgress = currentState.custom(RestoreInProgress.TYPE);
if (restoreInProgress != null && !restoreInProgress.entries().isEmpty()) {
throw new ConcurrentSnapshotExecutionException(snapshot, "Restore process is already running in this cluster");
}
// Check if the snapshot to restore is currently being deleted
SnapshotDeletionsInProgress deletionsInProgress = currentState.custom(SnapshotDeletionsInProgress.TYPE);
if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) {
throw new ConcurrentSnapshotExecutionException(snapshot, "cannot restore a snapshot while a snapshot deletion is in-progress [" + deletionsInProgress.getEntries().get(0).getSnapshot() + "]");
}
// Updating cluster state
ClusterState.Builder builder = ClusterState.builder(currentState);
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
RoutingTable.Builder rtBuilder = RoutingTable.builder(currentState.routingTable());
ImmutableOpenMap<ShardId, RestoreInProgress.ShardRestoreStatus> shards;
Set<String> aliases = new HashSet<>();
if (!renamedIndices.isEmpty()) {
// We have some indices to restore
ImmutableOpenMap.Builder<ShardId, RestoreInProgress.ShardRestoreStatus> shardsBuilder = ImmutableOpenMap.builder();
final Version minIndexCompatibilityVersion = currentState.getNodes().getMaxNodeVersion().minimumIndexCompatibilityVersion();
for (Map.Entry<String, String> indexEntry : renamedIndices.entrySet()) {
String index = indexEntry.getValue();
boolean partial = checkPartial(index);
SnapshotRecoverySource recoverySource = new SnapshotRecoverySource(snapshot, snapshotInfo.version(), index);
String renamedIndexName = indexEntry.getKey();
IndexMetaData snapshotIndexMetaData = metaData.index(index);
snapshotIndexMetaData = updateIndexSettings(snapshotIndexMetaData, request.indexSettings, request.ignoreIndexSettings);
try {
snapshotIndexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(snapshotIndexMetaData, minIndexCompatibilityVersion);
} catch (Exception ex) {
throw new SnapshotRestoreException(snapshot, "cannot restore index [" + index + "] because it cannot be upgraded", ex);
}
// Check that the index is closed or doesn't exist
IndexMetaData currentIndexMetaData = currentState.metaData().index(renamedIndexName);
IntSet ignoreShards = new IntHashSet();
final Index renamedIndex;
if (currentIndexMetaData == null) {
// Index doesn't exist - create it and start recovery
// Make sure that the index we are about to create has a validate name
MetaDataCreateIndexService.validateIndexName(renamedIndexName, currentState);
createIndexService.validateIndexSettings(renamedIndexName, snapshotIndexMetaData.getSettings());
IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN).index(renamedIndexName);
indexMdBuilder.settings(Settings.builder().put(snapshotIndexMetaData.getSettings()).put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()));
if (!request.includeAliases() && !snapshotIndexMetaData.getAliases().isEmpty()) {
// Remove all aliases - they shouldn't be restored
indexMdBuilder.removeAllAliases();
} else {
for (ObjectCursor<String> alias : snapshotIndexMetaData.getAliases().keys()) {
aliases.add(alias.value);
}
}
IndexMetaData updatedIndexMetaData = indexMdBuilder.build();
if (partial) {
populateIgnoredShards(index, ignoreShards);
}
rtBuilder.addAsNewRestore(updatedIndexMetaData, recoverySource, ignoreShards);
blocks.addBlocks(updatedIndexMetaData);
mdBuilder.put(updatedIndexMetaData, true);
renamedIndex = updatedIndexMetaData.getIndex();
} else {
validateExistingIndex(currentIndexMetaData, snapshotIndexMetaData, renamedIndexName, partial);
// Index exists and it's closed - open it in metadata and start recovery
IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN);
indexMdBuilder.version(Math.max(snapshotIndexMetaData.getVersion(), currentIndexMetaData.getVersion() + 1));
if (!request.includeAliases()) {
// Remove all snapshot aliases
if (!snapshotIndexMetaData.getAliases().isEmpty()) {
indexMdBuilder.removeAllAliases();
}
/// Add existing aliases
for (ObjectCursor<AliasMetaData> alias : currentIndexMetaData.getAliases().values()) {
indexMdBuilder.putAlias(alias.value);
}
} else {
for (ObjectCursor<String> alias : snapshotIndexMetaData.getAliases().keys()) {
aliases.add(alias.value);
}
}
indexMdBuilder.settings(Settings.builder().put(snapshotIndexMetaData.getSettings()).put(IndexMetaData.SETTING_INDEX_UUID, currentIndexMetaData.getIndexUUID()));
IndexMetaData updatedIndexMetaData = indexMdBuilder.index(renamedIndexName).build();
rtBuilder.addAsRestore(updatedIndexMetaData, recoverySource);
blocks.updateBlocks(updatedIndexMetaData);
mdBuilder.put(updatedIndexMetaData, true);
renamedIndex = updatedIndexMetaData.getIndex();
}
for (int shard = 0; shard < snapshotIndexMetaData.getNumberOfShards(); shard++) {
if (!ignoreShards.contains(shard)) {
shardsBuilder.put(new ShardId(renamedIndex, shard), new RestoreInProgress.ShardRestoreStatus(clusterService.state().nodes().getLocalNodeId()));
} else {
shardsBuilder.put(new ShardId(renamedIndex, shard), new RestoreInProgress.ShardRestoreStatus(clusterService.state().nodes().getLocalNodeId(), RestoreInProgress.State.FAILURE));
}
}
}
shards = shardsBuilder.build();
RestoreInProgress.Entry restoreEntry = new RestoreInProgress.Entry(snapshot, overallState(RestoreInProgress.State.INIT, shards), Collections.unmodifiableList(new ArrayList<>(renamedIndices.keySet())), shards);
builder.putCustom(RestoreInProgress.TYPE, new RestoreInProgress(restoreEntry));
} else {
shards = ImmutableOpenMap.of();
}
checkAliasNameConflicts(renamedIndices, aliases);
// Restore global state if needed
restoreGlobalStateIfRequested(mdBuilder);
if (completed(shards)) {
// We don't have any indices to restore - we are done
restoreInfo = new RestoreInfo(snapshotId.getName(), Collections.unmodifiableList(new ArrayList<>(renamedIndices.keySet())), shards.size(), shards.size() - failedShards(shards));
}
RoutingTable rt = rtBuilder.build();
ClusterState updatedState = builder.metaData(mdBuilder).blocks(blocks).routingTable(rt).build();
return allocationService.reroute(updatedState, "restored snapshot [" + snapshot + "]");
}
private void checkAliasNameConflicts(Map<String, String> renamedIndices, Set<String> aliases) {
for (Map.Entry<String, String> renamedIndex : renamedIndices.entrySet()) {
if (aliases.contains(renamedIndex.getKey())) {
throw new SnapshotRestoreException(snapshot, "cannot rename index [" + renamedIndex.getValue() + "] into [" + renamedIndex.getKey() + "] because of conflict with an alias with the same name");
}
}
}
private void populateIgnoredShards(String index, IntSet ignoreShards) {
for (SnapshotShardFailure failure : snapshotInfo.shardFailures()) {
if (index.equals(failure.index())) {
ignoreShards.add(failure.shardId());
}
}
}
private boolean checkPartial(String index) {
// Make sure that index was fully snapshotted
if (failed(snapshotInfo, index)) {
if (request.partial()) {
return true;
} else {
throw new SnapshotRestoreException(snapshot, "index [" + index + "] wasn't fully snapshotted - cannot restore");
}
} else {
return false;
}
}
private void validateExistingIndex(IndexMetaData currentIndexMetaData, IndexMetaData snapshotIndexMetaData, String renamedIndex, boolean partial) {
// Index exist - checking that it's closed
if (currentIndexMetaData.getState() != IndexMetaData.State.CLOSE) {
// TODO: Enable restore for open indices
throw new SnapshotRestoreException(snapshot, "cannot restore index [" + renamedIndex + "] because it's open");
}
// Index exist - checking if it's partial restore
if (partial) {
throw new SnapshotRestoreException(snapshot, "cannot restore partial index [" + renamedIndex + "] because such index already exists");
}
// Make sure that the number of shards is the same. That's the only thing that we cannot change
if (currentIndexMetaData.getNumberOfShards() != snapshotIndexMetaData.getNumberOfShards()) {
throw new SnapshotRestoreException(snapshot, "cannot restore index [" + renamedIndex + "] with [" + currentIndexMetaData.getNumberOfShards() + "] shard from snapshot with [" + snapshotIndexMetaData.getNumberOfShards() + "] shards");
}
}
/**
* Optionally updates index settings in indexMetaData by removing settings listed in ignoreSettings and
* merging them with settings in changeSettings.
*/
private IndexMetaData updateIndexSettings(IndexMetaData indexMetaData, Settings changeSettings, String[] ignoreSettings) {
if (changeSettings.names().isEmpty() && ignoreSettings.length == 0) {
return indexMetaData;
}
Settings normalizedChangeSettings = Settings.builder().put(changeSettings).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX).build();
IndexMetaData.Builder builder = IndexMetaData.builder(indexMetaData);
Map<String, String> settingsMap = new HashMap<>(indexMetaData.getSettings().getAsMap());
List<String> simpleMatchPatterns = new ArrayList<>();
for (String ignoredSetting : ignoreSettings) {
if (!Regex.isSimpleMatchPattern(ignoredSetting)) {
if (UNREMOVABLE_SETTINGS.contains(ignoredSetting)) {
throw new SnapshotRestoreException(snapshot, "cannot remove setting [" + ignoredSetting + "] on restore");
} else {
settingsMap.remove(ignoredSetting);
}
} else {
simpleMatchPatterns.add(ignoredSetting);
}
}
if (!simpleMatchPatterns.isEmpty()) {
String[] removePatterns = simpleMatchPatterns.toArray(new String[simpleMatchPatterns.size()]);
Iterator<Map.Entry<String, String>> iterator = settingsMap.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<String, String> entry = iterator.next();
if (UNREMOVABLE_SETTINGS.contains(entry.getKey()) == false) {
if (Regex.simpleMatch(removePatterns, entry.getKey())) {
iterator.remove();
}
}
}
}
for (Map.Entry<String, String> entry : normalizedChangeSettings.getAsMap().entrySet()) {
if (UNMODIFIABLE_SETTINGS.contains(entry.getKey())) {
throw new SnapshotRestoreException(snapshot, "cannot modify setting [" + entry.getKey() + "] on restore");
} else {
settingsMap.put(entry.getKey(), entry.getValue());
}
}
return builder.settings(Settings.builder().put(settingsMap)).build();
}
private void restoreGlobalStateIfRequested(MetaData.Builder mdBuilder) {
if (request.includeGlobalState()) {
if (metaData.persistentSettings() != null) {
Settings settings = metaData.persistentSettings();
clusterSettings.validateUpdate(settings);
mdBuilder.persistentSettings(settings);
}
if (metaData.templates() != null) {
// TODO: Should all existing templates be deleted first?
for (ObjectCursor<IndexTemplateMetaData> cursor : metaData.templates().values()) {
mdBuilder.put(cursor.value);
}
}
if (metaData.customs() != null) {
for (ObjectObjectCursor<String, MetaData.Custom> cursor : metaData.customs()) {
if (!RepositoriesMetaData.TYPE.equals(cursor.key)) {
// Don't restore repositories while we are working with them
// TODO: Should we restore them at the end?
mdBuilder.putCustom(cursor.key, cursor.value);
}
}
}
}
}
@Override
public void onFailure(String source, Exception e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to restore snapshot", snapshotId), e);
listener.onFailure(e);
}
@Override
public TimeValue timeout() {
return request.masterNodeTimeout();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
listener.onResponse(new RestoreCompletionResponse(snapshot, restoreInfo));
}
});
} catch (Exception e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to restore snapshot", request.repositoryName + ":" + request.snapshotName), e);
listener.onFailure(e);
}
}
use of com.carrotsearch.hppc.IntHashSet in project elasticsearch by elastic.
the class ThrottlingAllocationTests method createRecoveryStateAndInitalizeAllocations.
private ClusterState createRecoveryStateAndInitalizeAllocations(MetaData metaData, TestGatewayAllocator gatewayAllocator) {
DiscoveryNode node1 = newNode("node1");
MetaData.Builder metaDataBuilder = new MetaData.Builder(metaData);
RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
for (ObjectCursor<IndexMetaData> cursor : metaData.indices().values()) {
Index index = cursor.value.getIndex();
IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(cursor.value);
final int recoveryType = randomInt(5);
if (recoveryType <= 4) {
addInSyncAllocationIds(index, indexMetaDataBuilder, gatewayAllocator, node1);
}
IndexMetaData indexMetaData = indexMetaDataBuilder.build();
metaDataBuilder.put(indexMetaData, false);
switch(recoveryType) {
case 0:
routingTableBuilder.addAsRecovery(indexMetaData);
break;
case 1:
routingTableBuilder.addAsFromCloseToOpen(indexMetaData);
break;
case 2:
routingTableBuilder.addAsFromDangling(indexMetaData);
break;
case 3:
routingTableBuilder.addAsNewRestore(indexMetaData, new SnapshotRecoverySource(new Snapshot("repo", new SnapshotId("snap", "randomId")), Version.CURRENT, indexMetaData.getIndex().getName()), new IntHashSet());
break;
case 4:
routingTableBuilder.addAsRestore(indexMetaData, new SnapshotRecoverySource(new Snapshot("repo", new SnapshotId("snap", "randomId")), Version.CURRENT, indexMetaData.getIndex().getName()));
break;
case 5:
routingTableBuilder.addAsNew(indexMetaData);
break;
default:
throw new IndexOutOfBoundsException();
}
}
return ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(DiscoveryNodes.builder().add(node1)).metaData(metaDataBuilder.build()).routingTable(routingTableBuilder.build()).build();
}
Aggregations