use of org.apache.lucene.store.IndexOutput in project lucene-solr by apache.
the class HdfsDirectoryTest method testWritingAndReadingAFile.
@Test
public void testWritingAndReadingAFile() throws IOException {
String[] listAll = directory.listAll();
for (String file : listAll) {
directory.deleteFile(file);
}
IndexOutput output = directory.createOutput("testing.test", new IOContext());
output.writeInt(12345);
output.close();
IndexInput input = directory.openInput("testing.test", new IOContext());
assertEquals(12345, input.readInt());
input.close();
listAll = directory.listAll();
assertEquals(1, listAll.length);
assertEquals("testing.test", listAll[0]);
assertEquals(4, directory.fileLength("testing.test"));
IndexInput input1 = directory.openInput("testing.test", new IOContext());
IndexInput input2 = (IndexInput) input1.clone();
assertEquals(12345, input2.readInt());
input2.close();
assertEquals(12345, input1.readInt());
input1.close();
assertFalse(slowFileExists(directory, "testing.test.other"));
assertTrue(slowFileExists(directory, "testing.test"));
directory.deleteFile("testing.test");
assertFalse(slowFileExists(directory, "testing.test"));
}
use of org.apache.lucene.store.IndexOutput in project lucene-solr by apache.
the class HdfsDirectoryTest method createFile.
private void createFile(String name, Directory fsDir, HdfsDirectory hdfs) throws IOException {
int writes = random.nextInt(MAX_NUMBER_OF_WRITES);
int fileLength = random.nextInt(MAX_FILE_SIZE - MIN_FILE_SIZE) + MIN_FILE_SIZE;
IndexOutput fsOutput = fsDir.createOutput(name, new IOContext());
IndexOutput hdfsOutput = hdfs.createOutput(name, new IOContext());
for (int i = 0; i < writes; i++) {
byte[] buf = new byte[random.nextInt(Math.min(MAX_BUFFER_SIZE - MIN_BUFFER_SIZE, fileLength)) + MIN_BUFFER_SIZE];
random.nextBytes(buf);
int offset = random.nextInt(buf.length);
int length = random.nextInt(buf.length - offset);
fsOutput.writeBytes(buf, offset, length);
hdfsOutput.writeBytes(buf, offset, length);
}
fsOutput.close();
hdfsOutput.close();
}
use of org.apache.lucene.store.IndexOutput in project lucene-solr by apache.
the class BlockDirectoryTest method createFile.
private void createFile(String name, Directory fsDir, Directory hdfs) throws IOException {
int writes = random.nextInt(MAX_NUMBER_OF_WRITES);
int fileLength = random.nextInt(MAX_FILE_SIZE - MIN_FILE_SIZE) + MIN_FILE_SIZE;
IndexOutput fsOutput = fsDir.createOutput(name, IOContext.DEFAULT);
IndexOutput hdfsOutput = hdfs.createOutput(name, IOContext.DEFAULT);
for (int i = 0; i < writes; i++) {
byte[] buf = new byte[random.nextInt(Math.min(MAX_BUFFER_SIZE - MIN_BUFFER_SIZE, fileLength)) + MIN_BUFFER_SIZE];
random.nextBytes(buf);
int offset = random.nextInt(buf.length);
int length = random.nextInt(buf.length - offset);
fsOutput.writeBytes(buf, offset, length);
hdfsOutput.writeBytes(buf, offset, length);
}
fsOutput.close();
hdfsOutput.close();
}
use of org.apache.lucene.store.IndexOutput in project lucene-solr by apache.
the class SimpleTextCompoundFormat method write.
@Override
public void write(Directory dir, SegmentInfo si, IOContext context) throws IOException {
String dataFile = IndexFileNames.segmentFileName(si.name, "", DATA_EXTENSION);
int numFiles = si.files().size();
String[] names = si.files().toArray(new String[numFiles]);
Arrays.sort(names);
long[] startOffsets = new long[numFiles];
long[] endOffsets = new long[numFiles];
BytesRefBuilder scratch = new BytesRefBuilder();
try (IndexOutput out = dir.createOutput(dataFile, context)) {
for (int i = 0; i < names.length; i++) {
// write header for file
SimpleTextUtil.write(out, HEADER);
SimpleTextUtil.write(out, names[i], scratch);
SimpleTextUtil.writeNewline(out);
// write bytes for file
startOffsets[i] = out.getFilePointer();
try (IndexInput in = dir.openInput(names[i], IOContext.READONCE)) {
out.copyBytes(in, in.length());
}
endOffsets[i] = out.getFilePointer();
}
long tocPos = out.getFilePointer();
// write CFS table
SimpleTextUtil.write(out, TABLE);
SimpleTextUtil.write(out, Integer.toString(numFiles), scratch);
SimpleTextUtil.writeNewline(out);
for (int i = 0; i < names.length; i++) {
SimpleTextUtil.write(out, TABLENAME);
SimpleTextUtil.write(out, names[i], scratch);
SimpleTextUtil.writeNewline(out);
SimpleTextUtil.write(out, TABLESTART);
SimpleTextUtil.write(out, Long.toString(startOffsets[i]), scratch);
SimpleTextUtil.writeNewline(out);
SimpleTextUtil.write(out, TABLEEND);
SimpleTextUtil.write(out, Long.toString(endOffsets[i]), scratch);
SimpleTextUtil.writeNewline(out);
}
DecimalFormat df = new DecimalFormat(OFFSETPATTERN, DecimalFormatSymbols.getInstance(Locale.ROOT));
SimpleTextUtil.write(out, TABLEPOS);
SimpleTextUtil.write(out, df.format(tocPos), scratch);
SimpleTextUtil.writeNewline(out);
}
}
use of org.apache.lucene.store.IndexOutput in project lucene-solr by apache.
the class SimpleTextBKDWriter method sort.
private PointWriter sort(int dim) throws IOException {
assert dim >= 0 && dim < numDims;
if (heapPointWriter != null) {
assert tempInput == null;
// We never spilled the incoming points to disk, so now we sort in heap:
HeapPointWriter sorted;
if (dim == 0) {
// First dim can re-use the current heap writer
sorted = heapPointWriter;
} else {
// Subsequent dims need a private copy
sorted = new HeapPointWriter((int) pointCount, (int) pointCount, packedBytesLength, longOrds, singleValuePerDoc);
sorted.copyFrom(heapPointWriter);
}
//long t0 = System.nanoTime();
sortHeapPointWriter(sorted, dim);
//long t1 = System.nanoTime();
//System.out.println("BKD: sort took " + ((t1-t0)/1000000.0) + " msec");
sorted.close();
return sorted;
} else {
// Offline sort:
assert tempInput != null;
final int offset = bytesPerDim * dim;
Comparator<BytesRef> cmp;
if (dim == numDims - 1) {
// in that case the bytes for the dimension and for the doc id are contiguous,
// so we don't need a branch
cmp = new BytesRefComparator(bytesPerDim + Integer.BYTES) {
@Override
protected int byteAt(BytesRef ref, int i) {
return ref.bytes[ref.offset + offset + i] & 0xff;
}
};
} else {
cmp = new BytesRefComparator(bytesPerDim + Integer.BYTES) {
@Override
protected int byteAt(BytesRef ref, int i) {
if (i < bytesPerDim) {
return ref.bytes[ref.offset + offset + i] & 0xff;
} else {
return ref.bytes[ref.offset + packedBytesLength + i - bytesPerDim] & 0xff;
}
}
};
}
OfflineSorter sorter = new OfflineSorter(tempDir, tempFileNamePrefix + "_bkd" + dim, cmp, offlineSorterBufferMB, offlineSorterMaxTempFiles, bytesPerDoc, null, 0) {
/** We write/read fixed-byte-width file that {@link OfflinePointReader} can read. */
@Override
protected ByteSequencesWriter getWriter(IndexOutput out, long count) {
return new ByteSequencesWriter(out) {
@Override
public void write(byte[] bytes, int off, int len) throws IOException {
assert len == bytesPerDoc : "len=" + len + " bytesPerDoc=" + bytesPerDoc;
out.writeBytes(bytes, off, len);
}
};
}
/** We write/read fixed-byte-width file that {@link OfflinePointReader} can read. */
@Override
protected ByteSequencesReader getReader(ChecksumIndexInput in, String name) throws IOException {
return new ByteSequencesReader(in, name) {
final BytesRef scratch = new BytesRef(new byte[bytesPerDoc]);
@Override
public BytesRef next() throws IOException {
if (in.getFilePointer() >= end) {
return null;
}
in.readBytes(scratch.bytes, 0, bytesPerDoc);
return scratch;
}
};
}
};
String name = sorter.sort(tempInput.getName());
return new OfflinePointWriter(tempDir, name, packedBytesLength, pointCount, longOrds, singleValuePerDoc);
}
}
Aggregations