Search in sources :

Example 11 with FlushInfo

use of org.apache.lucene.store.FlushInfo in project lucene-solr by apache.

the class ReadersAndUpdates method handleBinaryDVUpdates.

@SuppressWarnings("synthetic-access")
private void handleBinaryDVUpdates(FieldInfos infos, Map<String, BinaryDocValuesFieldUpdates> updates, TrackingDirectoryWrapper dir, DocValuesFormat dvFormat, final SegmentReader reader, Map<Integer, Set<String>> fieldFiles) throws IOException {
    for (Entry<String, BinaryDocValuesFieldUpdates> e : updates.entrySet()) {
        final String field = e.getKey();
        final BinaryDocValuesFieldUpdates fieldUpdates = e.getValue();
        final long nextDocValuesGen = info.getNextDocValuesGen();
        final String segmentSuffix = Long.toString(nextDocValuesGen, Character.MAX_RADIX);
        final long estUpdatesSize = fieldUpdates.ramBytesPerDoc() * info.info.maxDoc();
        final IOContext updatesContext = new IOContext(new FlushInfo(info.info.maxDoc(), estUpdatesSize));
        final FieldInfo fieldInfo = infos.fieldInfo(field);
        assert fieldInfo != null;
        fieldInfo.setDocValuesGen(nextDocValuesGen);
        final FieldInfos fieldInfos = new FieldInfos(new FieldInfo[] { fieldInfo });
        // separately also track which files were created for this gen
        final TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(dir);
        final SegmentWriteState state = new SegmentWriteState(null, trackingDir, info.info, fieldInfos, null, updatesContext, segmentSuffix);
        try (final DocValuesConsumer fieldsConsumer = dvFormat.fieldsConsumer(state)) {
            // write the binary updates to a new gen'd docvalues file
            fieldsConsumer.addBinaryField(fieldInfo, new EmptyDocValuesProducer() {

                @Override
                public BinaryDocValues getBinary(FieldInfo fieldInfoIn) throws IOException {
                    if (fieldInfoIn != fieldInfo) {
                        throw new IllegalArgumentException("wrong fieldInfo");
                    }
                    final int maxDoc = reader.maxDoc();
                    final BinaryDocValuesFieldUpdates.Iterator updatesIter = fieldUpdates.iterator();
                    updatesIter.reset();
                    final BinaryDocValues currentValues = reader.getBinaryDocValues(field);
                    // Merge sort of the original doc values with updated doc values:
                    return new BinaryDocValues() {

                        // merged docID
                        private int docIDOut = -1;

                        // docID from our original doc values
                        private int docIDIn = -1;

                        // docID from our updates
                        private int updateDocID = -1;

                        private BytesRef value;

                        @Override
                        public int docID() {
                            return docIDOut;
                        }

                        @Override
                        public int advance(int target) {
                            throw new UnsupportedOperationException();
                        }

                        @Override
                        public boolean advanceExact(int target) throws IOException {
                            throw new UnsupportedOperationException();
                        }

                        @Override
                        public long cost() {
                            return currentValues.cost();
                        }

                        @Override
                        public BytesRef binaryValue() {
                            return value;
                        }

                        @Override
                        public int nextDoc() throws IOException {
                            if (docIDIn == docIDOut) {
                                if (currentValues == null) {
                                    docIDIn = NO_MORE_DOCS;
                                } else {
                                    docIDIn = currentValues.nextDoc();
                                }
                            }
                            if (updateDocID == docIDOut) {
                                updateDocID = updatesIter.nextDoc();
                            }
                            if (docIDIn < updateDocID) {
                                // no update to this doc
                                docIDOut = docIDIn;
                                value = currentValues.binaryValue();
                            } else {
                                docIDOut = updateDocID;
                                if (docIDOut != NO_MORE_DOCS) {
                                    value = updatesIter.value();
                                }
                            }
                            return docIDOut;
                        }
                    };
                }
            });
        }
        info.advanceDocValuesGen();
        assert !fieldFiles.containsKey(fieldInfo.number);
        fieldFiles.put(fieldInfo.number, trackingDir.getCreatedFiles());
    }
}
Also used : DocValuesConsumer(org.apache.lucene.codecs.DocValuesConsumer) IOException(java.io.IOException) TrackingDirectoryWrapper(org.apache.lucene.store.TrackingDirectoryWrapper) IOContext(org.apache.lucene.store.IOContext) FlushInfo(org.apache.lucene.store.FlushInfo) BytesRef(org.apache.lucene.util.BytesRef)

Example 12 with FlushInfo

use of org.apache.lucene.store.FlushInfo in project lucene-solr by apache.

the class ReadersAndUpdates method writeFieldInfosGen.

private Set<String> writeFieldInfosGen(FieldInfos fieldInfos, Directory dir, DocValuesFormat dvFormat, FieldInfosFormat infosFormat) throws IOException {
    final long nextFieldInfosGen = info.getNextFieldInfosGen();
    final String segmentSuffix = Long.toString(nextFieldInfosGen, Character.MAX_RADIX);
    // we write approximately that many bytes (based on Lucene46DVF):
    // HEADER + FOOTER: 40
    // 90 bytes per-field (over estimating long name and attributes map)
    final long estInfosSize = 40 + 90 * fieldInfos.size();
    final IOContext infosContext = new IOContext(new FlushInfo(info.info.maxDoc(), estInfosSize));
    // separately also track which files were created for this gen
    final TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(dir);
    infosFormat.write(trackingDir, info.info, segmentSuffix, fieldInfos, infosContext);
    info.advanceFieldInfosGen();
    return trackingDir.getCreatedFiles();
}
Also used : IOContext(org.apache.lucene.store.IOContext) FlushInfo(org.apache.lucene.store.FlushInfo) TrackingDirectoryWrapper(org.apache.lucene.store.TrackingDirectoryWrapper)

Aggregations

FlushInfo (org.apache.lucene.store.FlushInfo)12 IOContext (org.apache.lucene.store.IOContext)12 TrackingDirectoryWrapper (org.apache.lucene.store.TrackingDirectoryWrapper)4 IOException (java.io.IOException)3 DocValuesConsumer (org.apache.lucene.codecs.DocValuesConsumer)3 Directory (org.apache.lucene.store.Directory)3 Codec (org.apache.lucene.codecs.Codec)2 FieldsConsumer (org.apache.lucene.codecs.FieldsConsumer)2 BytesRef (org.apache.lucene.util.BytesRef)2 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 MockAnalyzer (org.apache.lucene.analysis.MockAnalyzer)1 DocValuesProducer (org.apache.lucene.codecs.DocValuesProducer)1 FieldsProducer (org.apache.lucene.codecs.FieldsProducer)1 NormsConsumer (org.apache.lucene.codecs.NormsConsumer)1 NormsProducer (org.apache.lucene.codecs.NormsProducer)1 StoredFieldsReader (org.apache.lucene.codecs.StoredFieldsReader)1 StoredFieldsWriter (org.apache.lucene.codecs.StoredFieldsWriter)1 TermVectorsReader (org.apache.lucene.codecs.TermVectorsReader)1 TermVectorsWriter (org.apache.lucene.codecs.TermVectorsWriter)1