Search in sources :

Example 41 with IndexOutput

use of org.apache.lucene.store.IndexOutput in project lucene-solr by apache.

the class TestDirectMonotonic method testSimple.

public void testSimple() throws IOException {
    Directory dir = newDirectory();
    final int blockShift = 2;
    List<Long> actualValues = Arrays.asList(1L, 2L, 5L, 7L, 8L, 100L);
    final int numValues = actualValues.size();
    final long dataLength;
    try (IndexOutput metaOut = dir.createOutput("meta", IOContext.DEFAULT);
        IndexOutput dataOut = dir.createOutput("data", IOContext.DEFAULT)) {
        DirectMonotonicWriter w = DirectMonotonicWriter.getInstance(metaOut, dataOut, numValues, blockShift);
        for (long v : actualValues) {
            w.add(v);
        }
        w.finish();
        dataLength = dataOut.getFilePointer();
    }
    try (IndexInput metaIn = dir.openInput("meta", IOContext.READONCE);
        IndexInput dataIn = dir.openInput("data", IOContext.DEFAULT)) {
        DirectMonotonicReader.Meta meta = DirectMonotonicReader.loadMeta(metaIn, numValues, blockShift);
        LongValues values = DirectMonotonicReader.getInstance(meta, dataIn.randomAccessSlice(0, dataLength));
        for (int i = 0; i < numValues; ++i) {
            final long v = values.get(i);
            assertEquals(actualValues.get(i).longValue(), v);
        }
    }
    dir.close();
}
Also used : IndexInput(org.apache.lucene.store.IndexInput) LongValues(org.apache.lucene.util.LongValues) IndexOutput(org.apache.lucene.store.IndexOutput) Directory(org.apache.lucene.store.Directory)

Example 42 with IndexOutput

use of org.apache.lucene.store.IndexOutput in project lucene-solr by apache.

the class Lucene60PointsWriter method finish.

@Override
public void finish() throws IOException {
    if (finished) {
        throw new IllegalStateException("already finished");
    }
    finished = true;
    CodecUtil.writeFooter(dataOut);
    String indexFileName = IndexFileNames.segmentFileName(writeState.segmentInfo.name, writeState.segmentSuffix, Lucene60PointsFormat.INDEX_EXTENSION);
    // Write index file
    try (IndexOutput indexOut = writeState.directory.createOutput(indexFileName, writeState.context)) {
        CodecUtil.writeIndexHeader(indexOut, Lucene60PointsFormat.META_CODEC_NAME, Lucene60PointsFormat.INDEX_VERSION_CURRENT, writeState.segmentInfo.getId(), writeState.segmentSuffix);
        int count = indexFPs.size();
        indexOut.writeVInt(count);
        for (Map.Entry<String, Long> ent : indexFPs.entrySet()) {
            FieldInfo fieldInfo = writeState.fieldInfos.fieldInfo(ent.getKey());
            if (fieldInfo == null) {
                throw new IllegalStateException("wrote field=\"" + ent.getKey() + "\" but that field doesn't exist in FieldInfos");
            }
            indexOut.writeVInt(fieldInfo.number);
            indexOut.writeVLong(ent.getValue());
        }
        CodecUtil.writeFooter(indexOut);
    }
}
Also used : IndexOutput(org.apache.lucene.store.IndexOutput) HashMap(java.util.HashMap) Map(java.util.Map) FieldInfo(org.apache.lucene.index.FieldInfo)

Example 43 with IndexOutput

use of org.apache.lucene.store.IndexOutput in project lucene-solr by apache.

the class OfflineSorter method sort.

/** 
   * Sort input to a new temp file, returning its name.
   */
public String sort(String inputFileName) throws IOException {
    sortInfo = new SortInfo();
    long startMS = System.currentTimeMillis();
    List<Future<Partition>> segments = new ArrayList<>();
    int[] levelCounts = new int[1];
    // So we can remove any partially written temp files on exception:
    TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(dir);
    boolean success = false;
    try (ByteSequencesReader is = getReader(dir.openChecksumInput(inputFileName, IOContext.READONCE), inputFileName)) {
        while (true) {
            Partition part = readPartition(is);
            if (part.count == 0) {
                if (partitionsInRAM != null) {
                    partitionsInRAM.release();
                }
                assert part.exhausted;
                break;
            }
            Callable<Partition> job = new SortPartitionTask(trackingDir, part);
            segments.add(exec.submit(job));
            sortInfo.tempMergeFiles++;
            sortInfo.lineCount += part.count;
            levelCounts[0]++;
            // Handle intermediate merges; we need a while loop to "cascade" the merge when necessary:
            int mergeLevel = 0;
            while (levelCounts[mergeLevel] == maxTempFiles) {
                mergePartitions(trackingDir, segments);
                if (mergeLevel + 2 > levelCounts.length) {
                    levelCounts = ArrayUtil.grow(levelCounts, mergeLevel + 2);
                }
                levelCounts[mergeLevel + 1]++;
                levelCounts[mergeLevel] = 0;
                mergeLevel++;
            }
            if (part.exhausted) {
                break;
            }
        }
        // Merge all partitions down to 1 (basically a forceMerge(1)):
        while (segments.size() > 1) {
            mergePartitions(trackingDir, segments);
        }
        String result;
        if (segments.isEmpty()) {
            try (IndexOutput out = trackingDir.createTempOutput(tempFileNamePrefix, "sort", IOContext.DEFAULT)) {
                // Write empty file footer
                CodecUtil.writeFooter(out);
                result = out.getName();
            }
        } else {
            result = getPartition(segments.get(0)).fileName;
        }
        // We should be explicitly removing all intermediate files ourselves unless there is an exception:
        assert trackingDir.getCreatedFiles().size() == 1 && trackingDir.getCreatedFiles().contains(result);
        sortInfo.totalTimeMS = System.currentTimeMillis() - startMS;
        CodecUtil.checkFooter(is.in);
        success = true;
        return result;
    } catch (InterruptedException ie) {
        throw new ThreadInterruptedException(ie);
    } finally {
        if (success == false) {
            IOUtils.deleteFilesIgnoringExceptions(trackingDir, trackingDir.getCreatedFiles());
        }
    }
}
Also used : ArrayList(java.util.ArrayList) IndexOutput(org.apache.lucene.store.IndexOutput) TrackingDirectoryWrapper(org.apache.lucene.store.TrackingDirectoryWrapper) Future(java.util.concurrent.Future)

Example 44 with IndexOutput

use of org.apache.lucene.store.IndexOutput in project lucene-solr by apache.

the class HdfsDirectoryFactoryTest method testLocalityReporter.

@Test
public void testLocalityReporter() throws Exception {
    Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
    conf.set("dfs.permissions.enabled", "false");
    Random r = random();
    HdfsDirectoryFactory factory = new HdfsDirectoryFactory();
    SolrMetricManager metricManager = new SolrMetricManager();
    String registry = TestUtil.randomSimpleString(r, 2, 10);
    String scope = TestUtil.randomSimpleString(r, 2, 10);
    Map<String, String> props = new HashMap<String, String>();
    props.put(HdfsDirectoryFactory.HDFS_HOME, HdfsTestUtil.getURI(dfsCluster) + "/solr");
    props.put(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, "false");
    props.put(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_ENABLE, "false");
    props.put(HdfsDirectoryFactory.LOCALITYMETRICS_ENABLED, "true");
    factory.init(new NamedList<>(props));
    factory.initializeMetrics(metricManager, registry, scope);
    // get the metrics map for the locality bean
    MetricsMap metrics = (MetricsMap) metricManager.registry(registry).getMetrics().get("OTHER." + scope + ".hdfsLocality");
    // We haven't done anything, so there should be no data
    Map<String, Object> statistics = metrics.getValue();
    assertEquals("Saw bytes that were not written: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL), 0l, statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL));
    assertEquals("Counted bytes as local when none written: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_RATIO), 0, statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_RATIO));
    // create a directory and a file
    String path = HdfsTestUtil.getURI(dfsCluster) + "/solr3/";
    Directory dir = factory.create(path, NoLockFactory.INSTANCE, DirContext.DEFAULT);
    try (IndexOutput writer = dir.createOutput("output", null)) {
        writer.writeLong(42l);
    }
    final long long_bytes = Long.SIZE / Byte.SIZE;
    // no locality because hostname not set
    factory.setHost("bogus");
    statistics = metrics.getValue();
    assertEquals("Wrong number of total bytes counted: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL), long_bytes, statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL));
    assertEquals("Wrong number of total blocks counted: " + statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_TOTAL), 1, statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_TOTAL));
    assertEquals("Counted block as local when bad hostname set: " + statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_LOCAL), 0, statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_LOCAL));
    // set hostname and check again
    factory.setHost("127.0.0.1");
    statistics = metrics.getValue();
    assertEquals("Did not count block as local after setting hostname: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_LOCAL), long_bytes, statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_LOCAL));
    factory.close();
}
Also used : MetricsMap(org.apache.solr.metrics.MetricsMap) Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) IndexOutput(org.apache.lucene.store.IndexOutput) Random(java.util.Random) SolrMetricManager(org.apache.solr.metrics.SolrMetricManager) Directory(org.apache.lucene.store.Directory) Test(org.junit.Test)

Example 45 with IndexOutput

use of org.apache.lucene.store.IndexOutput in project lucene-solr by apache.

the class HdfsDirectoryTest method testRename.

public void testRename() throws IOException {
    String[] listAll = directory.listAll();
    for (String file : listAll) {
        directory.deleteFile(file);
    }
    IndexOutput output = directory.createOutput("testing.test", new IOContext());
    output.writeInt(12345);
    output.close();
    directory.rename("testing.test", "testing.test.renamed");
    assertFalse(slowFileExists(directory, "testing.test"));
    assertTrue(slowFileExists(directory, "testing.test.renamed"));
    IndexInput input = directory.openInput("testing.test.renamed", new IOContext());
    assertEquals(12345, input.readInt());
    assertEquals(input.getFilePointer(), input.length());
    input.close();
    directory.deleteFile("testing.test.renamed");
    assertFalse(slowFileExists(directory, "testing.test.renamed"));
}
Also used : IOContext(org.apache.lucene.store.IOContext) IndexInput(org.apache.lucene.store.IndexInput) IndexOutput(org.apache.lucene.store.IndexOutput)

Aggregations

IndexOutput (org.apache.lucene.store.IndexOutput)182 Directory (org.apache.lucene.store.Directory)79 IndexInput (org.apache.lucene.store.IndexInput)76 RAMDirectory (org.apache.lucene.store.RAMDirectory)36 FilterDirectory (org.apache.lucene.store.FilterDirectory)34 CorruptIndexException (org.apache.lucene.index.CorruptIndexException)27 ChecksumIndexInput (org.apache.lucene.store.ChecksumIndexInput)27 BytesRef (org.apache.lucene.util.BytesRef)26 IOException (java.io.IOException)20 CorruptingIndexOutput (org.apache.lucene.store.CorruptingIndexOutput)18 RAMFile (org.apache.lucene.store.RAMFile)16 RAMOutputStream (org.apache.lucene.store.RAMOutputStream)16 IndexFormatTooNewException (org.apache.lucene.index.IndexFormatTooNewException)14 IndexFormatTooOldException (org.apache.lucene.index.IndexFormatTooOldException)14 IOContext (org.apache.lucene.store.IOContext)13 ArrayList (java.util.ArrayList)11 BufferedChecksumIndexInput (org.apache.lucene.store.BufferedChecksumIndexInput)11 RAMInputStream (org.apache.lucene.store.RAMInputStream)11 NIOFSDirectory (org.apache.lucene.store.NIOFSDirectory)10 NRTCachingDirectory (org.apache.lucene.store.NRTCachingDirectory)10