Search in sources :

Example 1 with EmbeddedSortField

use of org.apache.lucene.document.EmbeddedSortField in project Solbase by Photobucket.

the class DocumentLoader method processDocument.

private ParsedDoc processDocument(Document newDoc, Document oldDoc, String indexName, int docNumber, SolbaseIndexUtil indexUtil, IndexWriter writer, boolean updateStore) {
    try {
        @SuppressWarnings("unchecked") List<Fieldable> newFields = newDoc.getFields();
        boolean termVectorChanged = false;
        for (Fieldable field : newFields) {
            if (field.isIndexed() || field instanceof EmbeddedSortField) {
                termVectorChanged = true;
                break;
            }
        }
        // do diff on terms
        if (termVectorChanged) {
            Field docIdField = oldDoc.getField("docId");
            // cloning old doc, so it won't conflict with read
            oldDoc = new Document(oldDoc);
            oldDoc.removeField("docId");
            // parsing old doc to get all terms
            try {
                ParsedDoc oldParsedDoc = writer.parseDoc(oldDoc, schema.getAnalyzer(), indexName, docNumber, indexUtil.getSortFieldNames());
                List<Term> oldTerms = oldParsedDoc.getAllTerms();
                List<TermDocMetadata> oldTermDocMetas = oldParsedDoc.getTermDocMetadatas();
                Document mergedDoc = mergeOldAndNew(oldDoc, newDoc);
                ParsedDoc parsedDoc = writer.parseDoc(mergedDoc, schema.getAnalyzer(), indexName, docNumber, indexUtil.getSortFieldNames());
                List<TermDocMetadata> newTermDocMetas = parsedDoc.getTermDocMetadatas();
                List<Term> newTerms = parsedDoc.getAllTerms();
                List<Term> updateList = new ArrayList<Term>(oldTerms);
                List<Term> deleteList = new ArrayList<Term>(oldTerms);
                List<Term> addList = new ArrayList<Term>(newTerms);
                Collections.copy(updateList, oldTerms);
                Collections.copy(deleteList, oldTerms);
                Collections.copy(addList, newTerms);
                updateList.retainAll(newTerms);
                deleteList.removeAll(newTerms);
                addList.removeAll(oldTerms);
                int shardNum = SolbaseShardUtil.getShardNum(indexName);
                int startDocId = SolbaseShardUtil.getStartDocId(shardNum);
                int endDocId = SolbaseShardUtil.getEndDocId(shardNum);
                // updating tv first
                for (TermDocMetadata termDocMeta : newTermDocMetas) {
                    Term term = termDocMeta.getTerm();
                    if (updateList.contains(term)) {
                        logger.debug("updating this term: " + term.toString());
                        ReaderCache.updateTermDocsMetadata(term, termDocMeta, indexName, writer, LayeredCache.ModificationType.UPDATE, updateStore, startDocId, endDocId);
                    } else if (addList.contains(term)) {
                        ReaderCache.updateTermDocsMetadata(term, termDocMeta, indexName, writer, LayeredCache.ModificationType.ADD, updateStore, startDocId, endDocId);
                    }
                }
                // clean up deletes
                if (deleteList.size() > 0) {
                    for (TermDocMetadata termDocMeta : oldTermDocMetas) {
                        Term term = termDocMeta.getTerm();
                        if (deleteList.contains(term)) {
                            ReaderCache.updateTermDocsMetadata(term, termDocMeta, indexName, writer, LayeredCache.ModificationType.DELETE, updateStore, startDocId, endDocId);
                        }
                    }
                }
                parsedDoc.getDocument().add(docIdField);
                return parsedDoc;
            } catch (NullPointerException e) {
                return null;
            }
        } else {
            Document mergedDoc = mergeOldAndNew(oldDoc, newDoc);
            ParsedDoc parsedDoc = writer.parseDoc(mergedDoc, schema.getAnalyzer(), indexName, docNumber, indexUtil.getSortFieldNames());
            return parsedDoc;
        }
    } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    } catch (InterruptedException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    } catch (MemcachedException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    } catch (TimeoutException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
    return null;
}
Also used : ArrayList(java.util.ArrayList) EmbeddedSortField(org.apache.lucene.document.EmbeddedSortField) Term(org.apache.lucene.index.Term) IOException(java.io.IOException) Document(org.apache.lucene.document.Document) SolrInputDocument(org.apache.solr.common.SolrInputDocument) EmbeddedIndexedIntField(org.apache.solr.schema.EmbeddedIndexedIntField) SchemaField(org.apache.solr.schema.SchemaField) Field(org.apache.lucene.document.Field) EmbeddedSortField(org.apache.lucene.document.EmbeddedSortField) ParsedDoc(org.solbase.indexer.ParsedDoc) Fieldable(org.apache.lucene.document.Fieldable) MemcachedException(net.rubyeye.xmemcached.exception.MemcachedException) TimeoutException(java.util.concurrent.TimeoutException)

Example 2 with EmbeddedSortField

use of org.apache.lucene.document.EmbeddedSortField in project Solbase by Photobucket.

the class DocumentLoader method loadObject.

public CachedObjectWrapper<Document, Long> loadObject(Integer docNum, int start, int end, LayeredCache<Integer, Document, Long, ParsedDoc> cache) throws IOException {
    Document document = new Document();
    Get documentGet = new Get(SolbaseUtil.randomize(docNum));
    if (fieldNames == null || fieldNames.size() == 0) {
        // get all columns ( except this skips meta info )
        documentGet.addFamily(Bytes.toBytes("field"));
    } else {
        for (byte[] fieldName : fieldNames) {
            documentGet.addColumn(Bytes.toBytes("field"), fieldName);
        }
    }
    Result documentResult = null;
    // if docTable is set up, reuse instance, otherwise create brand new one and close after done
    if (this.docTable == null) {
        HTableInterface docTable = null;
        try {
            docTable = SolbaseUtil.getDocTable();
            documentResult = docTable.get(documentGet);
        } finally {
            SolbaseUtil.releaseTable(docTable);
        }
    } else {
        documentResult = this.docTable.get(documentGet);
    }
    if (documentResult == null || documentResult.isEmpty()) {
        return null;
    }
    // TODO, get from result
    Long versionIdentifier = 0l;
    NavigableMap<byte[], byte[]> familyMap = documentResult.getFamilyMap(Bytes.toBytes("field"));
    for (Map.Entry<byte[], byte[]> fieldColumn : familyMap.entrySet()) {
        Field field = null;
        String fieldName = Bytes.toString(fieldColumn.getKey());
        byte[] value;
        ByteBuffer v = ByteBuffer.wrap(fieldColumn.getValue());
        int vlimit = v.limit() + v.arrayOffset();
        if (v.array()[vlimit - 1] != Byte.MAX_VALUE && v.array()[vlimit - 1] != Byte.MIN_VALUE) {
            throw new CorruptIndexException("Solbase field is not properly encoded: " + docNum + "(" + fieldName + ")");
        } else if (v.array()[vlimit - 1] == Byte.MAX_VALUE) {
            // Binary
            value = new byte[vlimit - 1];
            System.arraycopy(v.array(), v.position() + v.arrayOffset(), value, 0, vlimit - 1);
            field = new Field(fieldName, value, Store.YES);
            document.add(field);
        } else if (v.array()[vlimit - 1] == Byte.MIN_VALUE) {
            // String
            value = new byte[vlimit - 1];
            System.arraycopy(v.array(), v.position() + v.arrayOffset(), value, 0, vlimit - 1);
            // Check for multi-fields
            String fieldString = new String(value, "UTF-8");
            if (fieldString.indexOf(Bytes.toString(SolbaseUtil.delimiter)) >= 0) {
                StringTokenizer tok = new StringTokenizer(fieldString, Bytes.toString(SolbaseUtil.delimiter));
                while (tok.hasMoreTokens()) {
                    // update logic
                    if (schema != null) {
                        SchemaField sfield = schema.getFieldOrNull(fieldName);
                        if (sfield.getType() instanceof EmbeddedIndexedIntField) {
                            EmbeddedIndexedIntField eiif = (EmbeddedIndexedIntField) sfield.getType();
                            EmbeddedSortField sf = new EmbeddedSortField(fieldName, tok.nextToken(), Field.Store.YES, Field.Index.NO, eiif.getFieldNumber());
                            document.add(sf);
                        } else {
                            Field f = sfield.createField(tok.nextToken(), 1.0f);
                            if (f != null) {
                                // null fields are not added
                                document.add(f);
                            }
                        }
                    } else {
                        field = new Field(fieldName, tok.nextToken(), Store.YES, Index.ANALYZED);
                        document.add(field);
                    }
                }
            } else {
                // update logic
                if (schema != null) {
                    SchemaField sfield = schema.getFieldOrNull(fieldName);
                    if (sfield.getType() instanceof EmbeddedIndexedIntField) {
                        EmbeddedIndexedIntField eiif = (EmbeddedIndexedIntField) sfield.getType();
                        EmbeddedSortField sf = new EmbeddedSortField(fieldName, fieldString, Field.Store.YES, Field.Index.NO, eiif.getFieldNumber());
                        document.add(sf);
                    } else {
                        Field f = sfield.createField(fieldString, 1.0f);
                        if (f != null) {
                            // null fields are not added
                            document.add(f);
                        }
                    }
                } else {
                    field = new Field(fieldName, fieldString, Store.YES, Index.ANALYZED);
                    document.add(field);
                }
            }
        }
    }
    return new CachedObjectWrapper<Document, Long>(document, versionIdentifier, System.currentTimeMillis());
}
Also used : CachedObjectWrapper(org.solbase.cache.CachedObjectWrapper) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) EmbeddedSortField(org.apache.lucene.document.EmbeddedSortField) Document(org.apache.lucene.document.Document) SolrInputDocument(org.apache.solr.common.SolrInputDocument) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ByteBuffer(java.nio.ByteBuffer) Result(org.apache.hadoop.hbase.client.Result) SchemaField(org.apache.solr.schema.SchemaField) EmbeddedIndexedIntField(org.apache.solr.schema.EmbeddedIndexedIntField) SchemaField(org.apache.solr.schema.SchemaField) Field(org.apache.lucene.document.Field) EmbeddedSortField(org.apache.lucene.document.EmbeddedSortField) StringTokenizer(java.util.StringTokenizer) Get(org.apache.hadoop.hbase.client.Get) Map(java.util.Map) NavigableMap(java.util.NavigableMap) EmbeddedIndexedIntField(org.apache.solr.schema.EmbeddedIndexedIntField)

Example 3 with EmbeddedSortField

use of org.apache.lucene.document.EmbeddedSortField in project Solbase by Photobucket.

the class IndexWriter method parseDoc.

@SuppressWarnings("unchecked")
public ParsedDoc parseDoc(Document doc, Analyzer analyzer, String indexName, int docNumber, List<String> sortFieldNames) throws CorruptIndexException, IOException {
    // given doc, what are all of terms we indexed
    List<Term> allIndexedTerms = new ArrayList<Term>();
    Map<String, byte[]> fieldCache = new HashMap<String, byte[]>(1024);
    // need to hold onto TermDocMetaData, so it can return this array
    List<TermDocMetadata> metadatas = new ArrayList<TermDocMetadata>();
    byte[] docId = Bytes.toBytes(docNumber);
    int position = 0;
    for (Fieldable field : (List<Fieldable>) doc.getFields()) {
        // Indexed field
        if (field.isIndexed() && field.isTokenized()) {
            TokenStream tokens = field.tokenStreamValue();
            if (tokens == null) {
                tokens = analyzer.tokenStream(field.name(), new StringReader(field.stringValue()));
            }
            // collect term information per field
            Map<Term, Map<ByteBuffer, List<Number>>> allTermInformation = new ConcurrentSkipListMap<Term, Map<ByteBuffer, List<Number>>>();
            int lastOffset = 0;
            if (position > 0) {
                position += analyzer.getPositionIncrementGap(field.name());
            }
            // reset the TokenStream to the first token
            tokens.reset();
            // offsets
            OffsetAttribute offsetAttribute = null;
            if (field.isStoreOffsetWithTermVector())
                offsetAttribute = (OffsetAttribute) tokens.addAttribute(OffsetAttribute.class);
            // positions
            PositionIncrementAttribute posIncrAttribute = null;
            if (field.isStorePositionWithTermVector())
                posIncrAttribute = (PositionIncrementAttribute) tokens.addAttribute(PositionIncrementAttribute.class);
            TermAttribute termAttribute = (TermAttribute) tokens.addAttribute(TermAttribute.class);
            // store normalizations of field per term per document
            // rather
            // than per field.
            // this adds more to write but less to read on other side
            Integer tokensInField = new Integer(0);
            while (tokens.incrementToken()) {
                tokensInField++;
                Term term = new Term(field.name(), termAttribute.term());
                allIndexedTerms.add(term);
                // fetch all collected information for this term
                Map<ByteBuffer, List<Number>> termInfo = allTermInformation.get(term);
                if (termInfo == null) {
                    termInfo = new ConcurrentSkipListMap<ByteBuffer, List<Number>>();
                    allTermInformation.put(term, termInfo);
                }
                // term frequency
                List<Number> termFrequency = termInfo.get(TermDocMetadata.termFrequencyKeyBytes);
                if (termFrequency == null) {
                    termFrequency = new ArrayList<Number>();
                    termFrequency.add(new Integer(0));
                    termInfo.put(TermDocMetadata.termFrequencyKeyBytes, termFrequency);
                }
                // increment
                termFrequency.set(0, termFrequency.get(0).intValue() + 1);
                // position vector
                if (field.isStorePositionWithTermVector()) {
                    position += (posIncrAttribute.getPositionIncrement() - 1);
                    List<Number> positionVector = termInfo.get(TermDocMetadata.positionVectorKeyBytes);
                    if (positionVector == null) {
                        positionVector = new ArrayList<Number>();
                        termInfo.put(TermDocMetadata.positionVectorKeyBytes, positionVector);
                    }
                    positionVector.add(++position);
                }
                // term offsets
                if (field.isStoreOffsetWithTermVector()) {
                    List<Number> offsetVector = termInfo.get(TermDocMetadata.offsetVectorKeyBytes);
                    if (offsetVector == null) {
                        offsetVector = new ArrayList<Number>();
                        termInfo.put(TermDocMetadata.offsetVectorKeyBytes, offsetVector);
                    }
                    offsetVector.add(lastOffset + offsetAttribute.startOffset());
                    offsetVector.add(lastOffset + offsetAttribute.endOffset());
                }
                List<Number> sortValues = new ArrayList<Number>();
                // init sortValues
                for (int i = 0; i < Scorer.numSort; i++) {
                    sortValues.add(new Integer(-1));
                }
                int order = 0;
                // extract sort field value and store it in term doc metadata obj
                for (String fieldName : sortFieldNames) {
                    Fieldable fieldable = doc.getFieldable(fieldName);
                    if (fieldable instanceof EmbeddedSortField) {
                        EmbeddedSortField sortField = (EmbeddedSortField) fieldable;
                        int value = -1;
                        if (sortField.stringValue() != null) {
                            value = Integer.parseInt(sortField.stringValue());
                        }
                        int sortSlot = sortField.getSortSlot();
                        sortValues.set(sortSlot - 1, new Integer(value));
                    } else {
                        // TODO: this logic is used for real time indexing.
                        // hacky. depending on order of sort field names in array
                        int value = -1;
                        if (fieldable.stringValue() != null) {
                            value = Integer.parseInt(fieldable.stringValue());
                        }
                        sortValues.set(order++, new Integer(value));
                    }
                }
                termInfo.put(TermDocMetadata.sortFieldKeyBytes, sortValues);
            }
            List<Number> bnorm = null;
            if (!field.getOmitNorms()) {
                bnorm = new ArrayList<Number>();
                float norm = doc.getBoost();
                norm *= field.getBoost();
                norm *= similarity.lengthNorm(field.name(), tokensInField);
                bnorm.add(Similarity.encodeNorm(norm));
            }
            for (Map.Entry<Term, Map<ByteBuffer, List<Number>>> term : allTermInformation.entrySet()) {
                Term tempTerm = term.getKey();
                byte[] fieldTermKeyBytes = SolbaseUtil.generateTermKey(tempTerm);
                // more writes but faster on read side.
                if (!field.getOmitNorms()) {
                    term.getValue().put(TermDocMetadata.normsKeyBytes, bnorm);
                }
                TermDocMetadata data = new TermDocMetadata(docNumber, term.getValue(), fieldTermKeyBytes, tempTerm);
                metadatas.add(data);
            }
        }
        // Untokenized fields go in without a termPosition
        if (field.isIndexed() && !field.isTokenized()) {
            Term term = new Term(field.name(), field.stringValue());
            allIndexedTerms.add(term);
            byte[] fieldTermKeyBytes = SolbaseUtil.generateTermKey(term);
            Map<ByteBuffer, List<Number>> termMap = new ConcurrentSkipListMap<ByteBuffer, List<Number>>();
            termMap.put(TermDocMetadata.termFrequencyKeyBytes, Arrays.asList(new Number[] {}));
            termMap.put(TermDocMetadata.positionVectorKeyBytes, Arrays.asList(new Number[] {}));
            TermDocMetadata data = new TermDocMetadata(docNumber, termMap, fieldTermKeyBytes, term);
            metadatas.add(data);
        }
        // Stores each field as a column under this doc key
        if (field.isStored()) {
            byte[] _value = field.isBinary() ? field.getBinaryValue() : Bytes.toBytes(field.stringValue());
            // first byte flags if binary or not
            byte[] value = new byte[_value.length + 1];
            System.arraycopy(_value, 0, value, 0, _value.length);
            value[value.length - 1] = (byte) (field.isBinary() ? Byte.MAX_VALUE : Byte.MIN_VALUE);
            // logic to handle multiple fields w/ same name
            byte[] currentValue = fieldCache.get(field.name());
            if (currentValue == null) {
                fieldCache.put(field.name(), value);
            } else {
                // append new data
                byte[] newValue = new byte[currentValue.length + SolbaseUtil.delimiter.length + value.length - 1];
                System.arraycopy(currentValue, 0, newValue, 0, currentValue.length - 1);
                System.arraycopy(SolbaseUtil.delimiter, 0, newValue, currentValue.length - 1, SolbaseUtil.delimiter.length);
                System.arraycopy(value, 0, newValue, currentValue.length + SolbaseUtil.delimiter.length - 1, value.length);
                fieldCache.put(field.name(), newValue);
            }
        }
    }
    Put documentPut = new Put(SolbaseUtil.randomize(docNumber));
    // Store each field as a column under this docId
    for (Map.Entry<String, byte[]> field : fieldCache.entrySet()) {
        documentPut.add(Bytes.toBytes("field"), Bytes.toBytes(field.getKey()), field.getValue());
    }
    // in case of real time update, we need to add back docId field
    if (!documentPut.has(Bytes.toBytes("field"), Bytes.toBytes("docId"))) {
        byte[] docIdStr = Bytes.toBytes(new Integer(docNumber).toString());
        // first byte flags if binary or not
        byte[] value = new byte[docIdStr.length + 1];
        System.arraycopy(docIdStr, 0, value, 0, docIdStr.length);
        value[value.length - 1] = (byte) (Byte.MIN_VALUE);
        documentPut.add(Bytes.toBytes("field"), Bytes.toBytes("docId"), value);
    }
    // Finally, Store meta-data so we can delete this document
    documentPut.add(Bytes.toBytes("allTerms"), Bytes.toBytes("allTerms"), SolbaseUtil.toBytes(allIndexedTerms).array());
    ParsedDoc parsedDoc = new ParsedDoc(metadatas, doc, documentPut, fieldCache.entrySet(), allIndexedTerms);
    return parsedDoc;
}
Also used : TokenStream(org.apache.lucene.analysis.TokenStream) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) EmbeddedSortField(org.apache.lucene.document.EmbeddedSortField) Fieldable(org.apache.lucene.document.Fieldable) StringReader(java.io.StringReader) ArrayList(java.util.ArrayList) List(java.util.List) TermAttribute(org.apache.lucene.analysis.tokenattributes.TermAttribute) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap) Term(org.apache.lucene.index.Term) ByteBuffer(java.nio.ByteBuffer) Put(org.apache.hadoop.hbase.client.Put) PositionIncrementAttribute(org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute) ParsedDoc(org.solbase.indexer.ParsedDoc) OffsetAttribute(org.apache.lucene.analysis.tokenattributes.OffsetAttribute) HashMap(java.util.HashMap) Map(java.util.Map) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap)

Aggregations

EmbeddedSortField (org.apache.lucene.document.EmbeddedSortField)3 ByteBuffer (java.nio.ByteBuffer)2 ArrayList (java.util.ArrayList)2 Map (java.util.Map)2 Document (org.apache.lucene.document.Document)2 Field (org.apache.lucene.document.Field)2 Fieldable (org.apache.lucene.document.Fieldable)2 Term (org.apache.lucene.index.Term)2 SolrInputDocument (org.apache.solr.common.SolrInputDocument)2 EmbeddedIndexedIntField (org.apache.solr.schema.EmbeddedIndexedIntField)2 SchemaField (org.apache.solr.schema.SchemaField)2 ParsedDoc (org.solbase.indexer.ParsedDoc)2 IOException (java.io.IOException)1 StringReader (java.io.StringReader)1 HashMap (java.util.HashMap)1 List (java.util.List)1 NavigableMap (java.util.NavigableMap)1 StringTokenizer (java.util.StringTokenizer)1 ConcurrentSkipListMap (java.util.concurrent.ConcurrentSkipListMap)1 TimeoutException (java.util.concurrent.TimeoutException)1