use of org.apache.lucene.store.IndexInput in project elasticsearch by elastic.
the class InputStreamIndexInputTests method testSingleReadTwoBytesLimit.
public void testSingleReadTwoBytesLimit() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexOutput output = dir.createOutput("test", IOContext.DEFAULT);
for (int i = 0; i < 3; i++) {
output.writeByte((byte) 1);
}
for (int i = 0; i < 3; i++) {
output.writeByte((byte) 2);
}
output.close();
IndexInput input = dir.openInput("test", IOContext.DEFAULT);
assertThat(input.getFilePointer(), lessThan(input.length()));
InputStreamIndexInput is = new InputStreamIndexInput(input, 2);
assertThat(is.actualSizeToRead(), equalTo(2L));
assertThat(is.read(), equalTo(1));
assertThat(is.read(), equalTo(1));
assertThat(is.read(), equalTo(-1));
assertThat(input.getFilePointer(), lessThan(input.length()));
is = new InputStreamIndexInput(input, 2);
assertThat(is.actualSizeToRead(), equalTo(2L));
assertThat(is.read(), equalTo(1));
assertThat(is.read(), equalTo(2));
assertThat(is.read(), equalTo(-1));
assertThat(input.getFilePointer(), lessThan(input.length()));
is = new InputStreamIndexInput(input, 2);
assertThat(is.actualSizeToRead(), equalTo(2L));
assertThat(is.read(), equalTo(2));
assertThat(is.read(), equalTo(2));
assertThat(is.read(), equalTo(-1));
assertThat(input.getFilePointer(), equalTo(input.length()));
is = new InputStreamIndexInput(input, 2);
assertThat(is.actualSizeToRead(), equalTo(0L));
assertThat(is.read(), equalTo(-1));
}
use of org.apache.lucene.store.IndexInput in project elasticsearch by elastic.
the class StoreTests method testVerifyingIndexOutput.
public void testVerifyingIndexOutput() throws IOException {
Directory dir = newDirectory();
IndexOutput output = dir.createOutput("foo.bar", IOContext.DEFAULT);
int iters = scaledRandomIntBetween(10, 100);
for (int i = 0; i < iters; i++) {
BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
}
CodecUtil.writeFooter(output);
output.close();
IndexInput indexInput = dir.openInput("foo.bar", IOContext.DEFAULT);
String checksum = Store.digestToString(CodecUtil.retrieveChecksum(indexInput));
indexInput.seek(0);
BytesRef ref = new BytesRef(scaledRandomIntBetween(1, 1024));
long length = indexInput.length();
IndexOutput verifyingOutput = new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo1.bar", length, checksum), dir.createOutput("foo1.bar", IOContext.DEFAULT));
while (length > 0) {
if (random().nextInt(10) == 0) {
verifyingOutput.writeByte(indexInput.readByte());
length--;
} else {
int min = (int) Math.min(length, ref.bytes.length);
indexInput.readBytes(ref.bytes, ref.offset, min);
verifyingOutput.writeBytes(ref.bytes, ref.offset, min);
length -= min;
}
}
Store.verify(verifyingOutput);
try {
appendRandomData(verifyingOutput);
fail("should be a corrupted index");
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
// ok
}
try {
Store.verify(verifyingOutput);
fail("should be a corrupted index");
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
// ok
}
IOUtils.close(indexInput, verifyingOutput, dir);
}
use of org.apache.lucene.store.IndexInput in project elasticsearch by elastic.
the class StoreTests method corruptFile.
private void corruptFile(Directory dir, String fileIn, String fileOut) throws IOException {
IndexInput input = dir.openInput(fileIn, IOContext.READONCE);
IndexOutput output = dir.createOutput(fileOut, IOContext.DEFAULT);
long len = input.length();
byte[] b = new byte[1024];
long broken = randomInt((int) len - 1);
long pos = 0;
while (pos < len) {
int min = (int) Math.min(input.length() - pos, b.length);
input.readBytes(b, 0, min);
if (broken >= pos && broken < pos + min) {
// Flip one byte
int flipPos = (int) (broken - pos);
b[flipPos] = (byte) (b[flipPos] ^ 42);
}
output.writeBytes(b, min);
pos += min;
}
IOUtils.close(input, output);
}
use of org.apache.lucene.store.IndexInput in project elasticsearch by elastic.
the class StoreTests method testNewChecksums.
public void testNewChecksums() throws IOException {
final ShardId shardId = new ShardId("index", "_na_", 1);
DirectoryService directoryService = new LuceneManagedDirectoryService(random());
Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId));
// set default codec - all segments need checksums
IndexWriter writer = new IndexWriter(store.directory(), newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec()));
int docs = 1 + random().nextInt(100);
for (int i = 0; i < docs; i++) {
Document doc = new Document();
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
writer.addDocument(doc);
}
if (random().nextBoolean()) {
for (int i = 0; i < docs; i++) {
if (random().nextBoolean()) {
Document doc = new Document();
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.updateDocument(new Term("id", "" + i), doc);
}
}
}
if (random().nextBoolean()) {
// flush
DirectoryReader.open(writer).close();
}
Store.MetadataSnapshot metadata;
// check before we committed
try {
store.getMetadata(null);
fail("no index present - expected exception");
} catch (IndexNotFoundException ex) {
// expected
}
writer.commit();
writer.close();
metadata = store.getMetadata(null);
assertThat(metadata.asMap().isEmpty(), is(false));
for (StoreFileMetaData meta : metadata) {
try (IndexInput input = store.directory().openInput(meta.name(), IOContext.DEFAULT)) {
String checksum = Store.digestToString(CodecUtil.retrieveChecksum(input));
assertThat("File: " + meta.name() + " has a different checksum", meta.checksum(), equalTo(checksum));
assertThat(meta.writtenBy(), equalTo(Version.LATEST));
if (meta.name().endsWith(".si") || meta.name().startsWith("segments_")) {
assertThat(meta.hash().length, greaterThan(0));
}
}
}
assertConsistent(store, metadata);
TestUtil.checkIndex(store.directory());
assertDeleteContent(store, directoryService);
IOUtils.close(store);
}
use of org.apache.lucene.store.IndexInput in project lucene-solr by apache.
the class HdfsDirectoryTest method testWritingAndReadingAFile.
@Test
public void testWritingAndReadingAFile() throws IOException {
String[] listAll = directory.listAll();
for (String file : listAll) {
directory.deleteFile(file);
}
IndexOutput output = directory.createOutput("testing.test", new IOContext());
output.writeInt(12345);
output.close();
IndexInput input = directory.openInput("testing.test", new IOContext());
assertEquals(12345, input.readInt());
input.close();
listAll = directory.listAll();
assertEquals(1, listAll.length);
assertEquals("testing.test", listAll[0]);
assertEquals(4, directory.fileLength("testing.test"));
IndexInput input1 = directory.openInput("testing.test", new IOContext());
IndexInput input2 = (IndexInput) input1.clone();
assertEquals(12345, input2.readInt());
input2.close();
assertEquals(12345, input1.readInt());
input1.close();
assertFalse(slowFileExists(directory, "testing.test.other"));
assertTrue(slowFileExists(directory, "testing.test"));
directory.deleteFile("testing.test");
assertFalse(slowFileExists(directory, "testing.test"));
}
Aggregations