Search in sources :

Example 36 with HashMap

use of java.util.HashMap in project elasticsearch by elastic.

the class ParentChildIndexFieldData method localGlobalDirect.

@Override
public IndexParentChildFieldData localGlobalDirect(DirectoryReader indexReader) throws Exception {
    final long startTime = System.nanoTime();
    long ramBytesUsed = 0;
    final Map<String, OrdinalMapAndAtomicFieldData> perType = new HashMap<>();
    for (String type : parentTypes) {
        final AtomicParentChildFieldData[] fieldData = new AtomicParentChildFieldData[indexReader.leaves().size()];
        for (LeafReaderContext context : indexReader.leaves()) {
            fieldData[context.ord] = load(context);
        }
        final OrdinalMap ordMap = buildOrdinalMap(fieldData, type);
        ramBytesUsed += ordMap.ramBytesUsed();
        perType.put(type, new OrdinalMapAndAtomicFieldData(ordMap, fieldData));
    }
    final AtomicParentChildFieldData[] fielddata = new AtomicParentChildFieldData[indexReader.leaves().size()];
    for (int i = 0; i < fielddata.length; ++i) {
        fielddata[i] = new GlobalAtomicFieldData(parentTypes, perType, i);
    }
    breakerService.getBreaker(CircuitBreaker.FIELDDATA).addWithoutBreaking(ramBytesUsed);
    if (logger.isDebugEnabled()) {
        logger.debug("global-ordinals [_parent] took [{}]", new TimeValue(System.nanoTime() - startTime, TimeUnit.NANOSECONDS));
    }
    return new GlobalFieldData(indexReader, fielddata, ramBytesUsed, perType);
}
Also used : HashMap(java.util.HashMap) AtomicParentChildFieldData(org.elasticsearch.index.fielddata.AtomicParentChildFieldData) OrdinalMap(org.apache.lucene.index.MultiDocValues.OrdinalMap) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) TimeValue(org.elasticsearch.common.unit.TimeValue)

Example 37 with HashMap

use of java.util.HashMap in project elasticsearch by elastic.

the class GetResult method fromXContentEmbedded.

public static GetResult fromXContentEmbedded(XContentParser parser) throws IOException {
    XContentParser.Token token = parser.nextToken();
    ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation);
    String currentFieldName = parser.currentName();
    String index = null, type = null, id = null;
    long version = -1;
    boolean found = false;
    BytesReference source = null;
    Map<String, GetField> fields = new HashMap<>();
    while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
        if (token == XContentParser.Token.FIELD_NAME) {
            currentFieldName = parser.currentName();
        } else if (token.isValue()) {
            if (_INDEX.equals(currentFieldName)) {
                index = parser.text();
            } else if (_TYPE.equals(currentFieldName)) {
                type = parser.text();
            } else if (_ID.equals(currentFieldName)) {
                id = parser.text();
            } else if (_VERSION.equals(currentFieldName)) {
                version = parser.longValue();
            } else if (FOUND.equals(currentFieldName)) {
                found = parser.booleanValue();
            } else {
                fields.put(currentFieldName, new GetField(currentFieldName, Collections.singletonList(parser.objectText())));
            }
        } else if (token == XContentParser.Token.START_OBJECT) {
            if (SourceFieldMapper.NAME.equals(currentFieldName)) {
                try (XContentBuilder builder = XContentBuilder.builder(parser.contentType().xContent())) {
                    //the original document gets slightly modified: whitespaces or pretty printing are not preserved,
                    //it all depends on the current builder settings
                    builder.copyCurrentStructure(parser);
                    source = builder.bytes();
                }
            } else if (FIELDS.equals(currentFieldName)) {
                while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
                    GetField getField = GetField.fromXContent(parser);
                    fields.put(getField.getName(), getField);
                }
            } else {
                throwUnknownField(currentFieldName, parser.getTokenLocation());
            }
        }
    }
    return new GetResult(index, type, id, version, found, source, fields);
}
Also used : BytesReference(org.elasticsearch.common.bytes.BytesReference) GetField.readGetField(org.elasticsearch.index.get.GetField.readGetField) HashMap(java.util.HashMap) XContentParser(org.elasticsearch.common.xcontent.XContentParser) XContentBuilder(org.elasticsearch.common.xcontent.XContentBuilder)

Example 38 with HashMap

use of java.util.HashMap in project elasticsearch by elastic.

the class ShardGetService method innerGetLoadFromStoredFields.

private GetResult innerGetLoadFromStoredFields(String type, String id, String[] gFields, FetchSourceContext fetchSourceContext, Engine.GetResult get, MapperService mapperService) {
    Map<String, GetField> fields = null;
    BytesReference source = null;
    Versions.DocIdAndVersion docIdAndVersion = get.docIdAndVersion();
    FieldsVisitor fieldVisitor = buildFieldsVisitors(gFields, fetchSourceContext);
    if (fieldVisitor != null) {
        try {
            docIdAndVersion.context.reader().document(docIdAndVersion.docId, fieldVisitor);
        } catch (IOException e) {
            throw new ElasticsearchException("Failed to get type [" + type + "] and id [" + id + "]", e);
        }
        source = fieldVisitor.source();
        if (!fieldVisitor.fields().isEmpty()) {
            fieldVisitor.postProcess(mapperService);
            fields = new HashMap<>(fieldVisitor.fields().size());
            for (Map.Entry<String, List<Object>> entry : fieldVisitor.fields().entrySet()) {
                fields.put(entry.getKey(), new GetField(entry.getKey(), entry.getValue()));
            }
        }
    }
    DocumentMapper docMapper = mapperService.documentMapper(type);
    if (docMapper.parentFieldMapper().active()) {
        String parentId = ParentFieldSubFetchPhase.getParentId(docMapper.parentFieldMapper(), docIdAndVersion.context.reader(), docIdAndVersion.docId);
        if (fields == null) {
            fields = new HashMap<>(1);
        }
        fields.put(ParentFieldMapper.NAME, new GetField(ParentFieldMapper.NAME, Collections.singletonList(parentId)));
    }
    if (gFields != null && gFields.length > 0) {
        for (String field : gFields) {
            FieldMapper fieldMapper = docMapper.mappers().smartNameFieldMapper(field);
            if (fieldMapper == null) {
                if (docMapper.objectMappers().get(field) != null) {
                    // Only fail if we know it is a object field, missing paths / fields shouldn't fail.
                    throw new IllegalArgumentException("field [" + field + "] isn't a leaf field");
                }
            }
        }
    }
    if (!fetchSourceContext.fetchSource()) {
        source = null;
    } else if (fetchSourceContext.includes().length > 0 || fetchSourceContext.excludes().length > 0) {
        Map<String, Object> sourceAsMap;
        XContentType sourceContentType = null;
        // TODO: The source might parsed and available in the sourceLookup but that one uses unordered maps so different. Do we care?
        Tuple<XContentType, Map<String, Object>> typeMapTuple = XContentHelper.convertToMap(source, true);
        sourceContentType = typeMapTuple.v1();
        sourceAsMap = typeMapTuple.v2();
        sourceAsMap = XContentMapValues.filter(sourceAsMap, fetchSourceContext.includes(), fetchSourceContext.excludes());
        try {
            source = XContentFactory.contentBuilder(sourceContentType).map(sourceAsMap).bytes();
        } catch (IOException e) {
            throw new ElasticsearchException("Failed to get type [" + type + "] and id [" + id + "] with includes/excludes set", e);
        }
    }
    return new GetResult(shardId.getIndexName(), type, id, get.version(), get.exists(), source, fields);
}
Also used : BytesReference(org.elasticsearch.common.bytes.BytesReference) FieldsVisitor(org.elasticsearch.index.fieldvisitor.FieldsVisitor) CustomFieldsVisitor(org.elasticsearch.index.fieldvisitor.CustomFieldsVisitor) DocumentMapper(org.elasticsearch.index.mapper.DocumentMapper) IOException(java.io.IOException) ElasticsearchException(org.elasticsearch.ElasticsearchException) Versions(org.elasticsearch.common.lucene.uid.Versions) XContentType(org.elasticsearch.common.xcontent.XContentType) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map) UidFieldMapper(org.elasticsearch.index.mapper.UidFieldMapper) FieldMapper(org.elasticsearch.index.mapper.FieldMapper) ParentFieldMapper(org.elasticsearch.index.mapper.ParentFieldMapper) SourceFieldMapper(org.elasticsearch.index.mapper.SourceFieldMapper) Tuple(org.elasticsearch.common.collect.Tuple)

Example 39 with HashMap

use of java.util.HashMap in project elasticsearch by elastic.

the class CompletionFieldMapper method parse.

/**
     * Parses and indexes inputs
     *
     * Parsing:
     *  Acceptable format:
     *   "STRING" - interpreted as field value (input)
     *   "ARRAY" - each element can be one of "OBJECT" (see below)
     *   "OBJECT" - { "input": STRING|ARRAY, "weight": STRING|INT, "contexts": ARRAY|OBJECT }
     *
     * Indexing:
     *  if context mappings are defined, delegates to {@link ContextMappings#addField(ParseContext.Document, String, String, int, Map)}
     *  else adds inputs as a {@link org.apache.lucene.search.suggest.document.SuggestField}
     */
@Override
public Mapper parse(ParseContext context) throws IOException {
    // parse
    XContentParser parser = context.parser();
    Token token = parser.currentToken();
    Map<String, CompletionInputMetaData> inputMap = new HashMap<>(1);
    if (token == Token.VALUE_NULL) {
        throw new MapperParsingException("completion field [" + fieldType().name() + "] does not support null values");
    } else if (token == Token.START_ARRAY) {
        while ((token = parser.nextToken()) != Token.END_ARRAY) {
            parse(context, token, parser, inputMap);
        }
    } else {
        parse(context, token, parser, inputMap);
    }
    // index
    for (Map.Entry<String, CompletionInputMetaData> completionInput : inputMap.entrySet()) {
        String input = completionInput.getKey();
        // truncate input
        if (input.length() > maxInputLength) {
            int len = Math.min(maxInputLength, input.length());
            if (Character.isHighSurrogate(input.charAt(len - 1))) {
                assert input.length() >= len + 1 && Character.isLowSurrogate(input.charAt(len));
                len += 1;
            }
            input = input.substring(0, len);
        }
        CompletionInputMetaData metaData = completionInput.getValue();
        if (fieldType().hasContextMappings()) {
            fieldType().getContextMappings().addField(context.doc(), fieldType().name(), input, metaData.weight, metaData.contexts);
        } else {
            context.doc().add(new SuggestField(fieldType().name(), input, metaData.weight));
        }
    }
    multiFields.parse(this, context);
    return null;
}
Also used : SuggestField(org.apache.lucene.search.suggest.document.SuggestField) HashMap(java.util.HashMap) Token(org.elasticsearch.common.xcontent.XContentParser.Token) HashMap(java.util.HashMap) Map(java.util.Map) XContentParser(org.elasticsearch.common.xcontent.XContentParser)

Example 40 with HashMap

use of java.util.HashMap in project elasticsearch by elastic.

the class FsProbe method ioStats.

final FsInfo.IoStats ioStats(final Set<Tuple<Integer, Integer>> devicesNumbers, final FsInfo previous) {
    try {
        final Map<Tuple<Integer, Integer>, FsInfo.DeviceStats> deviceMap = new HashMap<>();
        if (previous != null && previous.getIoStats() != null && previous.getIoStats().devicesStats != null) {
            for (int i = 0; i < previous.getIoStats().devicesStats.length; i++) {
                FsInfo.DeviceStats deviceStats = previous.getIoStats().devicesStats[i];
                deviceMap.put(Tuple.tuple(deviceStats.majorDeviceNumber, deviceStats.minorDeviceNumber), deviceStats);
            }
        }
        List<FsInfo.DeviceStats> devicesStats = new ArrayList<>();
        List<String> lines = readProcDiskStats();
        if (!lines.isEmpty()) {
            for (String line : lines) {
                String[] fields = line.trim().split("\\s+");
                final int majorDeviceNumber = Integer.parseInt(fields[0]);
                final int minorDeviceNumber = Integer.parseInt(fields[1]);
                if (!devicesNumbers.contains(Tuple.tuple(majorDeviceNumber, minorDeviceNumber))) {
                    continue;
                }
                final String deviceName = fields[2];
                final long readsCompleted = Long.parseLong(fields[3]);
                final long sectorsRead = Long.parseLong(fields[5]);
                final long writesCompleted = Long.parseLong(fields[7]);
                final long sectorsWritten = Long.parseLong(fields[9]);
                final FsInfo.DeviceStats deviceStats = new FsInfo.DeviceStats(majorDeviceNumber, minorDeviceNumber, deviceName, readsCompleted, sectorsRead, writesCompleted, sectorsWritten, deviceMap.get(Tuple.tuple(majorDeviceNumber, minorDeviceNumber)));
                devicesStats.add(deviceStats);
            }
        }
        return new FsInfo.IoStats(devicesStats.toArray(new FsInfo.DeviceStats[devicesStats.size()]));
    } catch (Exception e) {
        // do not fail Elasticsearch if something unexpected
        // happens here
        logger.debug((Supplier<?>) () -> new ParameterizedMessage("unexpected exception processing /proc/diskstats for devices {}", devicesNumbers), e);
        return null;
    }
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) IOException(java.io.IOException) Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) Tuple(org.elasticsearch.common.collect.Tuple)

Aggregations

HashMap (java.util.HashMap)69230 Test (org.junit.Test)16584 ArrayList (java.util.ArrayList)16269 Map (java.util.Map)14814 List (java.util.List)8655 IOException (java.io.IOException)5791 HashSet (java.util.HashSet)5215 LinkedHashMap (java.util.LinkedHashMap)3834 File (java.io.File)3597 Set (java.util.Set)3468 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1946 Iterator (java.util.Iterator)1890 Date (java.util.Date)1815 Test (org.junit.jupiter.api.Test)1788 Test (org.testng.annotations.Test)1747 LinkedList (java.util.LinkedList)1641 URI (java.net.URI)1558 Collection (java.util.Collection)1173 Properties (java.util.Properties)1072 InputStream (java.io.InputStream)1067