use of java.util.HashMap in project elasticsearch by elastic.
the class ParentChildIndexFieldData method localGlobalDirect.
@Override
public IndexParentChildFieldData localGlobalDirect(DirectoryReader indexReader) throws Exception {
final long startTime = System.nanoTime();
long ramBytesUsed = 0;
final Map<String, OrdinalMapAndAtomicFieldData> perType = new HashMap<>();
for (String type : parentTypes) {
final AtomicParentChildFieldData[] fieldData = new AtomicParentChildFieldData[indexReader.leaves().size()];
for (LeafReaderContext context : indexReader.leaves()) {
fieldData[context.ord] = load(context);
}
final OrdinalMap ordMap = buildOrdinalMap(fieldData, type);
ramBytesUsed += ordMap.ramBytesUsed();
perType.put(type, new OrdinalMapAndAtomicFieldData(ordMap, fieldData));
}
final AtomicParentChildFieldData[] fielddata = new AtomicParentChildFieldData[indexReader.leaves().size()];
for (int i = 0; i < fielddata.length; ++i) {
fielddata[i] = new GlobalAtomicFieldData(parentTypes, perType, i);
}
breakerService.getBreaker(CircuitBreaker.FIELDDATA).addWithoutBreaking(ramBytesUsed);
if (logger.isDebugEnabled()) {
logger.debug("global-ordinals [_parent] took [{}]", new TimeValue(System.nanoTime() - startTime, TimeUnit.NANOSECONDS));
}
return new GlobalFieldData(indexReader, fielddata, ramBytesUsed, perType);
}
use of java.util.HashMap in project elasticsearch by elastic.
the class GetResult method fromXContentEmbedded.
public static GetResult fromXContentEmbedded(XContentParser parser) throws IOException {
XContentParser.Token token = parser.nextToken();
ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation);
String currentFieldName = parser.currentName();
String index = null, type = null, id = null;
long version = -1;
boolean found = false;
BytesReference source = null;
Map<String, GetField> fields = new HashMap<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if (_INDEX.equals(currentFieldName)) {
index = parser.text();
} else if (_TYPE.equals(currentFieldName)) {
type = parser.text();
} else if (_ID.equals(currentFieldName)) {
id = parser.text();
} else if (_VERSION.equals(currentFieldName)) {
version = parser.longValue();
} else if (FOUND.equals(currentFieldName)) {
found = parser.booleanValue();
} else {
fields.put(currentFieldName, new GetField(currentFieldName, Collections.singletonList(parser.objectText())));
}
} else if (token == XContentParser.Token.START_OBJECT) {
if (SourceFieldMapper.NAME.equals(currentFieldName)) {
try (XContentBuilder builder = XContentBuilder.builder(parser.contentType().xContent())) {
//the original document gets slightly modified: whitespaces or pretty printing are not preserved,
//it all depends on the current builder settings
builder.copyCurrentStructure(parser);
source = builder.bytes();
}
} else if (FIELDS.equals(currentFieldName)) {
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
GetField getField = GetField.fromXContent(parser);
fields.put(getField.getName(), getField);
}
} else {
throwUnknownField(currentFieldName, parser.getTokenLocation());
}
}
}
return new GetResult(index, type, id, version, found, source, fields);
}
use of java.util.HashMap in project elasticsearch by elastic.
the class ShardGetService method innerGetLoadFromStoredFields.
private GetResult innerGetLoadFromStoredFields(String type, String id, String[] gFields, FetchSourceContext fetchSourceContext, Engine.GetResult get, MapperService mapperService) {
Map<String, GetField> fields = null;
BytesReference source = null;
Versions.DocIdAndVersion docIdAndVersion = get.docIdAndVersion();
FieldsVisitor fieldVisitor = buildFieldsVisitors(gFields, fetchSourceContext);
if (fieldVisitor != null) {
try {
docIdAndVersion.context.reader().document(docIdAndVersion.docId, fieldVisitor);
} catch (IOException e) {
throw new ElasticsearchException("Failed to get type [" + type + "] and id [" + id + "]", e);
}
source = fieldVisitor.source();
if (!fieldVisitor.fields().isEmpty()) {
fieldVisitor.postProcess(mapperService);
fields = new HashMap<>(fieldVisitor.fields().size());
for (Map.Entry<String, List<Object>> entry : fieldVisitor.fields().entrySet()) {
fields.put(entry.getKey(), new GetField(entry.getKey(), entry.getValue()));
}
}
}
DocumentMapper docMapper = mapperService.documentMapper(type);
if (docMapper.parentFieldMapper().active()) {
String parentId = ParentFieldSubFetchPhase.getParentId(docMapper.parentFieldMapper(), docIdAndVersion.context.reader(), docIdAndVersion.docId);
if (fields == null) {
fields = new HashMap<>(1);
}
fields.put(ParentFieldMapper.NAME, new GetField(ParentFieldMapper.NAME, Collections.singletonList(parentId)));
}
if (gFields != null && gFields.length > 0) {
for (String field : gFields) {
FieldMapper fieldMapper = docMapper.mappers().smartNameFieldMapper(field);
if (fieldMapper == null) {
if (docMapper.objectMappers().get(field) != null) {
// Only fail if we know it is a object field, missing paths / fields shouldn't fail.
throw new IllegalArgumentException("field [" + field + "] isn't a leaf field");
}
}
}
}
if (!fetchSourceContext.fetchSource()) {
source = null;
} else if (fetchSourceContext.includes().length > 0 || fetchSourceContext.excludes().length > 0) {
Map<String, Object> sourceAsMap;
XContentType sourceContentType = null;
// TODO: The source might parsed and available in the sourceLookup but that one uses unordered maps so different. Do we care?
Tuple<XContentType, Map<String, Object>> typeMapTuple = XContentHelper.convertToMap(source, true);
sourceContentType = typeMapTuple.v1();
sourceAsMap = typeMapTuple.v2();
sourceAsMap = XContentMapValues.filter(sourceAsMap, fetchSourceContext.includes(), fetchSourceContext.excludes());
try {
source = XContentFactory.contentBuilder(sourceContentType).map(sourceAsMap).bytes();
} catch (IOException e) {
throw new ElasticsearchException("Failed to get type [" + type + "] and id [" + id + "] with includes/excludes set", e);
}
}
return new GetResult(shardId.getIndexName(), type, id, get.version(), get.exists(), source, fields);
}
use of java.util.HashMap in project elasticsearch by elastic.
the class CompletionFieldMapper method parse.
/**
* Parses and indexes inputs
*
* Parsing:
* Acceptable format:
* "STRING" - interpreted as field value (input)
* "ARRAY" - each element can be one of "OBJECT" (see below)
* "OBJECT" - { "input": STRING|ARRAY, "weight": STRING|INT, "contexts": ARRAY|OBJECT }
*
* Indexing:
* if context mappings are defined, delegates to {@link ContextMappings#addField(ParseContext.Document, String, String, int, Map)}
* else adds inputs as a {@link org.apache.lucene.search.suggest.document.SuggestField}
*/
@Override
public Mapper parse(ParseContext context) throws IOException {
// parse
XContentParser parser = context.parser();
Token token = parser.currentToken();
Map<String, CompletionInputMetaData> inputMap = new HashMap<>(1);
if (token == Token.VALUE_NULL) {
throw new MapperParsingException("completion field [" + fieldType().name() + "] does not support null values");
} else if (token == Token.START_ARRAY) {
while ((token = parser.nextToken()) != Token.END_ARRAY) {
parse(context, token, parser, inputMap);
}
} else {
parse(context, token, parser, inputMap);
}
// index
for (Map.Entry<String, CompletionInputMetaData> completionInput : inputMap.entrySet()) {
String input = completionInput.getKey();
// truncate input
if (input.length() > maxInputLength) {
int len = Math.min(maxInputLength, input.length());
if (Character.isHighSurrogate(input.charAt(len - 1))) {
assert input.length() >= len + 1 && Character.isLowSurrogate(input.charAt(len));
len += 1;
}
input = input.substring(0, len);
}
CompletionInputMetaData metaData = completionInput.getValue();
if (fieldType().hasContextMappings()) {
fieldType().getContextMappings().addField(context.doc(), fieldType().name(), input, metaData.weight, metaData.contexts);
} else {
context.doc().add(new SuggestField(fieldType().name(), input, metaData.weight));
}
}
multiFields.parse(this, context);
return null;
}
use of java.util.HashMap in project elasticsearch by elastic.
the class FsProbe method ioStats.
final FsInfo.IoStats ioStats(final Set<Tuple<Integer, Integer>> devicesNumbers, final FsInfo previous) {
try {
final Map<Tuple<Integer, Integer>, FsInfo.DeviceStats> deviceMap = new HashMap<>();
if (previous != null && previous.getIoStats() != null && previous.getIoStats().devicesStats != null) {
for (int i = 0; i < previous.getIoStats().devicesStats.length; i++) {
FsInfo.DeviceStats deviceStats = previous.getIoStats().devicesStats[i];
deviceMap.put(Tuple.tuple(deviceStats.majorDeviceNumber, deviceStats.minorDeviceNumber), deviceStats);
}
}
List<FsInfo.DeviceStats> devicesStats = new ArrayList<>();
List<String> lines = readProcDiskStats();
if (!lines.isEmpty()) {
for (String line : lines) {
String[] fields = line.trim().split("\\s+");
final int majorDeviceNumber = Integer.parseInt(fields[0]);
final int minorDeviceNumber = Integer.parseInt(fields[1]);
if (!devicesNumbers.contains(Tuple.tuple(majorDeviceNumber, minorDeviceNumber))) {
continue;
}
final String deviceName = fields[2];
final long readsCompleted = Long.parseLong(fields[3]);
final long sectorsRead = Long.parseLong(fields[5]);
final long writesCompleted = Long.parseLong(fields[7]);
final long sectorsWritten = Long.parseLong(fields[9]);
final FsInfo.DeviceStats deviceStats = new FsInfo.DeviceStats(majorDeviceNumber, minorDeviceNumber, deviceName, readsCompleted, sectorsRead, writesCompleted, sectorsWritten, deviceMap.get(Tuple.tuple(majorDeviceNumber, minorDeviceNumber)));
devicesStats.add(deviceStats);
}
}
return new FsInfo.IoStats(devicesStats.toArray(new FsInfo.DeviceStats[devicesStats.size()]));
} catch (Exception e) {
// do not fail Elasticsearch if something unexpected
// happens here
logger.debug((Supplier<?>) () -> new ParameterizedMessage("unexpected exception processing /proc/diskstats for devices {}", devicesNumbers), e);
return null;
}
}
Aggregations