use of org.apache.lucene.store.IndexOutput in project lucene-solr by apache.
the class TestDirectMonotonic method testSimple.
public void testSimple() throws IOException {
Directory dir = newDirectory();
final int blockShift = 2;
List<Long> actualValues = Arrays.asList(1L, 2L, 5L, 7L, 8L, 100L);
final int numValues = actualValues.size();
final long dataLength;
try (IndexOutput metaOut = dir.createOutput("meta", IOContext.DEFAULT);
IndexOutput dataOut = dir.createOutput("data", IOContext.DEFAULT)) {
DirectMonotonicWriter w = DirectMonotonicWriter.getInstance(metaOut, dataOut, numValues, blockShift);
for (long v : actualValues) {
w.add(v);
}
w.finish();
dataLength = dataOut.getFilePointer();
}
try (IndexInput metaIn = dir.openInput("meta", IOContext.READONCE);
IndexInput dataIn = dir.openInput("data", IOContext.DEFAULT)) {
DirectMonotonicReader.Meta meta = DirectMonotonicReader.loadMeta(metaIn, numValues, blockShift);
LongValues values = DirectMonotonicReader.getInstance(meta, dataIn.randomAccessSlice(0, dataLength));
for (int i = 0; i < numValues; ++i) {
final long v = values.get(i);
assertEquals(actualValues.get(i).longValue(), v);
}
}
dir.close();
}
use of org.apache.lucene.store.IndexOutput in project lucene-solr by apache.
the class Lucene60PointsWriter method finish.
@Override
public void finish() throws IOException {
if (finished) {
throw new IllegalStateException("already finished");
}
finished = true;
CodecUtil.writeFooter(dataOut);
String indexFileName = IndexFileNames.segmentFileName(writeState.segmentInfo.name, writeState.segmentSuffix, Lucene60PointsFormat.INDEX_EXTENSION);
// Write index file
try (IndexOutput indexOut = writeState.directory.createOutput(indexFileName, writeState.context)) {
CodecUtil.writeIndexHeader(indexOut, Lucene60PointsFormat.META_CODEC_NAME, Lucene60PointsFormat.INDEX_VERSION_CURRENT, writeState.segmentInfo.getId(), writeState.segmentSuffix);
int count = indexFPs.size();
indexOut.writeVInt(count);
for (Map.Entry<String, Long> ent : indexFPs.entrySet()) {
FieldInfo fieldInfo = writeState.fieldInfos.fieldInfo(ent.getKey());
if (fieldInfo == null) {
throw new IllegalStateException("wrote field=\"" + ent.getKey() + "\" but that field doesn't exist in FieldInfos");
}
indexOut.writeVInt(fieldInfo.number);
indexOut.writeVLong(ent.getValue());
}
CodecUtil.writeFooter(indexOut);
}
}
use of org.apache.lucene.store.IndexOutput in project lucene-solr by apache.
the class OfflineSorter method sort.
/**
* Sort input to a new temp file, returning its name.
*/
public String sort(String inputFileName) throws IOException {
sortInfo = new SortInfo();
long startMS = System.currentTimeMillis();
List<Future<Partition>> segments = new ArrayList<>();
int[] levelCounts = new int[1];
// So we can remove any partially written temp files on exception:
TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(dir);
boolean success = false;
try (ByteSequencesReader is = getReader(dir.openChecksumInput(inputFileName, IOContext.READONCE), inputFileName)) {
while (true) {
Partition part = readPartition(is);
if (part.count == 0) {
if (partitionsInRAM != null) {
partitionsInRAM.release();
}
assert part.exhausted;
break;
}
Callable<Partition> job = new SortPartitionTask(trackingDir, part);
segments.add(exec.submit(job));
sortInfo.tempMergeFiles++;
sortInfo.lineCount += part.count;
levelCounts[0]++;
// Handle intermediate merges; we need a while loop to "cascade" the merge when necessary:
int mergeLevel = 0;
while (levelCounts[mergeLevel] == maxTempFiles) {
mergePartitions(trackingDir, segments);
if (mergeLevel + 2 > levelCounts.length) {
levelCounts = ArrayUtil.grow(levelCounts, mergeLevel + 2);
}
levelCounts[mergeLevel + 1]++;
levelCounts[mergeLevel] = 0;
mergeLevel++;
}
if (part.exhausted) {
break;
}
}
// Merge all partitions down to 1 (basically a forceMerge(1)):
while (segments.size() > 1) {
mergePartitions(trackingDir, segments);
}
String result;
if (segments.isEmpty()) {
try (IndexOutput out = trackingDir.createTempOutput(tempFileNamePrefix, "sort", IOContext.DEFAULT)) {
// Write empty file footer
CodecUtil.writeFooter(out);
result = out.getName();
}
} else {
result = getPartition(segments.get(0)).fileName;
}
// We should be explicitly removing all intermediate files ourselves unless there is an exception:
assert trackingDir.getCreatedFiles().size() == 1 && trackingDir.getCreatedFiles().contains(result);
sortInfo.totalTimeMS = System.currentTimeMillis() - startMS;
CodecUtil.checkFooter(is.in);
success = true;
return result;
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
} finally {
if (success == false) {
IOUtils.deleteFilesIgnoringExceptions(trackingDir, trackingDir.getCreatedFiles());
}
}
}
use of org.apache.lucene.store.IndexOutput in project lucene-solr by apache.
the class HdfsDirectoryFactoryTest method testLocalityReporter.
@Test
public void testLocalityReporter() throws Exception {
Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
conf.set("dfs.permissions.enabled", "false");
Random r = random();
HdfsDirectoryFactory factory = new HdfsDirectoryFactory();
SolrMetricManager metricManager = new SolrMetricManager();
String registry = TestUtil.randomSimpleString(r, 2, 10);
String scope = TestUtil.randomSimpleString(r, 2, 10);
Map<String, String> props = new HashMap<String, String>();
props.put(HdfsDirectoryFactory.HDFS_HOME, HdfsTestUtil.getURI(dfsCluster) + "/solr");
props.put(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, "false");
props.put(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_ENABLE, "false");
props.put(HdfsDirectoryFactory.LOCALITYMETRICS_ENABLED, "true");
factory.init(new NamedList<>(props));
factory.initializeMetrics(metricManager, registry, scope);
// get the metrics map for the locality bean
MetricsMap metrics = (MetricsMap) metricManager.registry(registry).getMetrics().get("OTHER." + scope + ".hdfsLocality");
// We haven't done anything, so there should be no data
Map<String, Object> statistics = metrics.getValue();
assertEquals("Saw bytes that were not written: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL), 0l, statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL));
assertEquals("Counted bytes as local when none written: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_RATIO), 0, statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_RATIO));
// create a directory and a file
String path = HdfsTestUtil.getURI(dfsCluster) + "/solr3/";
Directory dir = factory.create(path, NoLockFactory.INSTANCE, DirContext.DEFAULT);
try (IndexOutput writer = dir.createOutput("output", null)) {
writer.writeLong(42l);
}
final long long_bytes = Long.SIZE / Byte.SIZE;
// no locality because hostname not set
factory.setHost("bogus");
statistics = metrics.getValue();
assertEquals("Wrong number of total bytes counted: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL), long_bytes, statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL));
assertEquals("Wrong number of total blocks counted: " + statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_TOTAL), 1, statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_TOTAL));
assertEquals("Counted block as local when bad hostname set: " + statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_LOCAL), 0, statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_LOCAL));
// set hostname and check again
factory.setHost("127.0.0.1");
statistics = metrics.getValue();
assertEquals("Did not count block as local after setting hostname: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_LOCAL), long_bytes, statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_LOCAL));
factory.close();
}
use of org.apache.lucene.store.IndexOutput in project lucene-solr by apache.
the class HdfsDirectoryTest method testRename.
public void testRename() throws IOException {
String[] listAll = directory.listAll();
for (String file : listAll) {
directory.deleteFile(file);
}
IndexOutput output = directory.createOutput("testing.test", new IOContext());
output.writeInt(12345);
output.close();
directory.rename("testing.test", "testing.test.renamed");
assertFalse(slowFileExists(directory, "testing.test"));
assertTrue(slowFileExists(directory, "testing.test.renamed"));
IndexInput input = directory.openInput("testing.test.renamed", new IOContext());
assertEquals(12345, input.readInt());
assertEquals(input.getFilePointer(), input.length());
input.close();
directory.deleteFile("testing.test.renamed");
assertFalse(slowFileExists(directory, "testing.test.renamed"));
}
Aggregations