use of org.elasticsearch.ElasticsearchParseException in project elasticsearch by elastic.
the class MultiMatchQuery method blendTerms.
static Query blendTerms(QueryShardContext context, BytesRef[] values, Float commonTermsCutoff, float tieBreaker, FieldAndFieldType... blendedFields) {
List<Query> queries = new ArrayList<>();
Term[] terms = new Term[blendedFields.length * values.length];
float[] blendedBoost = new float[blendedFields.length * values.length];
int i = 0;
for (FieldAndFieldType ft : blendedFields) {
for (BytesRef term : values) {
Query query;
try {
query = ft.fieldType.termQuery(term, context);
} catch (IllegalArgumentException e) {
// field
continue;
} catch (ElasticsearchParseException parseException) {
// the case
if (parseException.getCause() instanceof IllegalArgumentException) {
continue;
}
throw parseException;
}
float boost = ft.boost;
while (query instanceof BoostQuery) {
BoostQuery bq = (BoostQuery) query;
query = bq.getQuery();
boost *= bq.getBoost();
}
if (query.getClass() == TermQuery.class) {
terms[i] = ((TermQuery) query).getTerm();
blendedBoost[i] = boost;
i++;
} else {
if (boost != 1f) {
query = new BoostQuery(query, boost);
}
queries.add(query);
}
}
}
if (i > 0) {
terms = Arrays.copyOf(terms, i);
blendedBoost = Arrays.copyOf(blendedBoost, i);
if (commonTermsCutoff != null) {
queries.add(BlendedTermQuery.commonTermsBlendedQuery(terms, blendedBoost, false, commonTermsCutoff));
} else if (tieBreaker == 1.0f) {
queries.add(BlendedTermQuery.booleanBlendedQuery(terms, blendedBoost, false));
} else {
queries.add(BlendedTermQuery.dismaxBlendedQuery(terms, blendedBoost, tieBreaker));
}
}
if (queries.size() == 1) {
return queries.get(0);
} else {
// best effort: add clauses that are not term queries so that they have an opportunity to match
// however their score contribution will be different
// TODO: can we improve this?
BooleanQuery.Builder bq = new BooleanQuery.Builder();
bq.setDisableCoord(true);
for (Query query : queries) {
bq.add(query, Occur.SHOULD);
}
return bq.build();
}
}
use of org.elasticsearch.ElasticsearchParseException in project elasticsearch by elastic.
the class BlobStoreIndexShardSnapshot method fromXContent.
/**
* Parses shard snapshot metadata
*
* @param parser parser
* @return shard snapshot metadata
*/
public static BlobStoreIndexShardSnapshot fromXContent(XContentParser parser) throws IOException {
String snapshot = null;
long indexVersion = -1;
long startTime = 0;
long time = 0;
int numberOfFiles = 0;
long totalSize = 0;
List<FileInfo> indexFiles = new ArrayList<>();
if (parser.currentToken() == null) {
// fresh parser? move to the first token
parser.nextToken();
}
XContentParser.Token token = parser.currentToken();
if (token == XContentParser.Token.START_OBJECT) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
String currentFieldName = parser.currentName();
token = parser.nextToken();
if (token.isValue()) {
if (PARSE_NAME.match(currentFieldName)) {
snapshot = parser.text();
} else if (PARSE_INDEX_VERSION.match(currentFieldName)) {
// The index-version is needed for backward compatibility with v 1.0
indexVersion = parser.longValue();
} else if (PARSE_START_TIME.match(currentFieldName)) {
startTime = parser.longValue();
} else if (PARSE_TIME.match(currentFieldName)) {
time = parser.longValue();
} else if (PARSE_NUMBER_OF_FILES.match(currentFieldName)) {
numberOfFiles = parser.intValue();
} else if (PARSE_TOTAL_SIZE.match(currentFieldName)) {
totalSize = parser.longValue();
} else {
throw new ElasticsearchParseException("unknown parameter [{}]", currentFieldName);
}
} else if (token == XContentParser.Token.START_ARRAY) {
if (PARSE_FILES.match(currentFieldName)) {
while ((parser.nextToken()) != XContentParser.Token.END_ARRAY) {
indexFiles.add(FileInfo.fromXContent(parser));
}
} else {
throw new ElasticsearchParseException("unknown parameter [{}]", currentFieldName);
}
} else {
throw new ElasticsearchParseException("unexpected token [{}]", token);
}
} else {
throw new ElasticsearchParseException("unexpected token [{}]", token);
}
}
}
return new BlobStoreIndexShardSnapshot(snapshot, indexVersion, Collections.unmodifiableList(indexFiles), startTime, time, numberOfFiles, totalSize);
}
use of org.elasticsearch.ElasticsearchParseException in project elasticsearch by elastic.
the class BlobStoreRepository method deleteSnapshot.
@Override
public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) {
if (isReadOnly()) {
throw new RepositoryException(metadata.name(), "cannot delete snapshot from a readonly repository");
}
final RepositoryData repositoryData = getRepositoryData();
List<String> indices = Collections.emptyList();
SnapshotInfo snapshot = null;
try {
snapshot = getSnapshotInfo(snapshotId);
indices = snapshot.indices();
} catch (SnapshotMissingException ex) {
throw ex;
} catch (IllegalStateException | SnapshotException | ElasticsearchParseException ex) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("cannot read snapshot file [{}]", snapshotId), ex);
}
MetaData metaData = null;
try {
if (snapshot != null) {
metaData = readSnapshotMetaData(snapshotId, snapshot.version(), repositoryData.resolveIndices(indices), true);
} else {
metaData = readSnapshotMetaData(snapshotId, null, repositoryData.resolveIndices(indices), true);
}
} catch (IOException | SnapshotException ex) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("cannot read metadata for snapshot [{}]", snapshotId), ex);
}
try {
// Delete snapshot from the index file, since it is the maintainer of truth of active snapshots
final RepositoryData updatedRepositoryData = repositoryData.removeSnapshot(snapshotId);
writeIndexGen(updatedRepositoryData, repositoryStateId);
// delete the snapshot file
safeSnapshotBlobDelete(snapshot, snapshotId.getUUID());
// delete the global metadata file
safeGlobalMetaDataBlobDelete(snapshot, snapshotId.getUUID());
// Now delete all indices
for (String index : indices) {
final IndexId indexId = repositoryData.resolveIndexId(index);
BlobPath indexPath = basePath().add("indices").add(indexId.getId());
BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(indexPath);
try {
indexMetaDataFormat.delete(indexMetaDataBlobContainer, snapshotId.getUUID());
} catch (IOException ex) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to delete metadata for index [{}]", snapshotId, index), ex);
}
if (metaData != null) {
IndexMetaData indexMetaData = metaData.index(index);
if (indexMetaData != null) {
for (int shardId = 0; shardId < indexMetaData.getNumberOfShards(); shardId++) {
try {
delete(snapshotId, snapshot.version(), indexId, new ShardId(indexMetaData.getIndex(), shardId));
} catch (SnapshotException ex) {
final int finalShardId = shardId;
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to delete shard data for shard [{}][{}]", snapshotId, index, finalShardId), ex);
}
}
}
}
}
// cleanup indices that are no longer part of the repository
final Collection<IndexId> indicesToCleanUp = Sets.newHashSet(repositoryData.getIndices().values());
indicesToCleanUp.removeAll(updatedRepositoryData.getIndices().values());
final BlobContainer indicesBlobContainer = blobStore().blobContainer(basePath().add("indices"));
for (final IndexId indexId : indicesToCleanUp) {
try {
indicesBlobContainer.deleteBlob(indexId.getId());
} catch (DirectoryNotEmptyException dnee) {
// if the directory isn't empty for some reason, it will fail to clean up;
// we'll ignore that and accept that cleanup didn't fully succeed.
// since we are using UUIDs for path names, this won't be an issue for
// snapshotting indices of the same name
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " + "its index folder due to the directory not being empty.", metadata.name(), indexId), dnee);
} catch (IOException ioe) {
// a different IOException occurred while trying to delete - will just log the issue for now
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " + "its index folder.", metadata.name(), indexId), ioe);
}
}
} catch (IOException ex) {
throw new RepositoryException(metadata.name(), "failed to update snapshot in repository", ex);
}
}
use of org.elasticsearch.ElasticsearchParseException in project elasticsearch by elastic.
the class RepositoryData method snapshotsFromXContent.
/**
* Reads an instance of {@link RepositoryData} from x-content, loading the snapshots and indices metadata.
*/
public static RepositoryData snapshotsFromXContent(final XContentParser parser, long genId) throws IOException {
List<SnapshotId> snapshots = new ArrayList<>();
Map<IndexId, Set<SnapshotId>> indexSnapshots = new HashMap<>();
if (parser.nextToken() == XContentParser.Token.START_OBJECT) {
while (parser.nextToken() == XContentParser.Token.FIELD_NAME) {
String currentFieldName = parser.currentName();
if (SNAPSHOTS.equals(currentFieldName)) {
if (parser.nextToken() == XContentParser.Token.START_ARRAY) {
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
snapshots.add(SnapshotId.fromXContent(parser));
}
} else {
throw new ElasticsearchParseException("expected array for [" + currentFieldName + "]");
}
} else if (INDICES.equals(currentFieldName)) {
if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
throw new ElasticsearchParseException("start object expected [indices]");
}
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
String indexName = parser.currentName();
String indexId = null;
Set<SnapshotId> snapshotIds = new LinkedHashSet<>();
if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
throw new ElasticsearchParseException("start object expected index[" + indexName + "]");
}
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
String indexMetaFieldName = parser.currentName();
parser.nextToken();
if (INDEX_ID.equals(indexMetaFieldName)) {
indexId = parser.text();
} else if (SNAPSHOTS.equals(indexMetaFieldName)) {
if (parser.currentToken() != XContentParser.Token.START_ARRAY) {
throw new ElasticsearchParseException("start array expected [snapshots]");
}
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
snapshotIds.add(SnapshotId.fromXContent(parser));
}
}
}
assert indexId != null;
indexSnapshots.put(new IndexId(indexName, indexId), snapshotIds);
}
} else {
throw new ElasticsearchParseException("unknown field name [" + currentFieldName + "]");
}
}
} else {
throw new ElasticsearchParseException("start object expected");
}
return new RepositoryData(genId, snapshots, indexSnapshots, Collections.emptyList());
}
use of org.elasticsearch.ElasticsearchParseException in project elasticsearch by elastic.
the class BlobStoreRepository method readSnapshotMetaData.
private MetaData readSnapshotMetaData(SnapshotId snapshotId, Version snapshotVersion, List<IndexId> indices, boolean ignoreIndexErrors) throws IOException {
MetaData metaData;
if (snapshotVersion == null) {
// We can try detecting the version based on the metadata file format
assert ignoreIndexErrors;
if (globalMetaDataFormat.exists(snapshotsBlobContainer, snapshotId.getUUID())) {
snapshotVersion = Version.CURRENT;
} else {
throw new SnapshotMissingException(metadata.name(), snapshotId);
}
}
try {
metaData = globalMetaDataFormat.read(snapshotsBlobContainer, snapshotId.getUUID());
} catch (NoSuchFileException ex) {
throw new SnapshotMissingException(metadata.name(), snapshotId, ex);
} catch (IOException ex) {
throw new SnapshotException(metadata.name(), snapshotId, "failed to get snapshots", ex);
}
MetaData.Builder metaDataBuilder = MetaData.builder(metaData);
for (IndexId index : indices) {
BlobPath indexPath = basePath().add("indices").add(index.getId());
BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(indexPath);
try {
metaDataBuilder.put(indexMetaDataFormat.read(indexMetaDataBlobContainer, snapshotId.getUUID()), false);
} catch (ElasticsearchParseException | IOException ex) {
if (ignoreIndexErrors) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] [{}] failed to read metadata for index", snapshotId, index.getName()), ex);
} else {
throw ex;
}
}
}
return metaDataBuilder.build();
}
Aggregations