use of org.apache.lucene.store.IndexInput in project lucene-solr by apache.
the class SolrSnapshotMetaDataManager method loadFromSnapshotMetadataFile.
/**
* Reads the snapshot meta-data information from the given {@link Directory}.
*/
private synchronized void loadFromSnapshotMetadataFile() throws IOException {
log.debug("Loading from snapshot metadata file...");
long genLoaded = -1;
IOException ioe = null;
List<String> snapshotFiles = new ArrayList<>();
for (String file : dir.listAll()) {
if (file.startsWith(SNAPSHOTS_PREFIX)) {
long gen = Long.parseLong(file.substring(SNAPSHOTS_PREFIX.length()));
if (genLoaded == -1 || gen > genLoaded) {
snapshotFiles.add(file);
Map<String, SnapshotMetaData> snapshotMetaDataMapping = new HashMap<>();
IndexInput in = dir.openInput(file, IOContext.DEFAULT);
try {
CodecUtil.checkHeader(in, CODEC_NAME, VERSION_START, VERSION_START);
int count = in.readVInt();
for (int i = 0; i < count; i++) {
String name = in.readString();
String indexDirPath = in.readString();
long commitGen = in.readVLong();
snapshotMetaDataMapping.put(name, new SnapshotMetaData(name, indexDirPath, commitGen));
}
} catch (IOException ioe2) {
// Save first exception & throw in the end
if (ioe == null) {
ioe = ioe2;
}
} finally {
in.close();
}
genLoaded = gen;
nameToDetailsMapping.clear();
nameToDetailsMapping.putAll(snapshotMetaDataMapping);
}
}
}
if (genLoaded == -1) {
// Nothing was loaded...
if (ioe != null) {
// ... not for lack of trying:
throw ioe;
}
} else {
if (snapshotFiles.size() > 1) {
// Remove any broken / old snapshot files:
String curFileName = SNAPSHOTS_PREFIX + genLoaded;
for (String file : snapshotFiles) {
if (!curFileName.equals(file)) {
IOUtils.deleteFilesIgnoringExceptions(dir, file);
}
}
}
nextWriteGen = 1 + genLoaded;
}
}
use of org.apache.lucene.store.IndexInput in project lucene-solr by apache.
the class BackupManager method readCollectionState.
/**
* This method reads the meta-data information for the backed-up collection.
*
* @param backupLoc The base path used to store the backup data.
* @param backupId The unique name for the backup.
* @param collectionName The name of the collection whose meta-data is to be returned.
* @return the meta-data information for the backed-up collection.
* @throws IOException in case of errors.
*/
public DocCollection readCollectionState(URI backupLoc, String backupId, String collectionName) throws IOException {
Objects.requireNonNull(collectionName);
URI zkStateDir = repository.resolve(backupLoc, backupId, ZK_STATE_DIR);
try (IndexInput is = repository.openInput(zkStateDir, COLLECTION_PROPS_FILE, IOContext.DEFAULT)) {
// probably ok since the json file should be small.
byte[] arr = new byte[(int) is.length()];
is.readBytes(arr, 0, (int) is.length());
ClusterState c_state = ClusterState.load(-1, arr, Collections.emptySet());
return c_state.getCollection(collectionName);
}
}
use of org.apache.lucene.store.IndexInput in project lucene-solr by apache.
the class TestIndexWriterExceptions method testSimulatedCorruptIndex1.
// Simulate a corrupt index by removing last byte of
// latest segments file and make sure we get an
// IOException trying to open the index:
public void testSimulatedCorruptIndex1() throws IOException {
BaseDirectoryWrapper dir = newDirectory();
// we are corrupting it!
dir.setCheckIndexOnClose(false);
IndexWriter writer = null;
writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
// add 100 documents
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// close
writer.close();
long gen = SegmentInfos.getLastCommitGeneration(dir);
assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
String fileNameIn = SegmentInfos.getLastCommitSegmentsFileName(dir);
String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", 1 + gen);
IndexInput in = dir.openInput(fileNameIn, newIOContext(random()));
IndexOutput out = dir.createOutput(fileNameOut, newIOContext(random()));
long length = in.length();
for (int i = 0; i < length - 1; i++) {
out.writeByte(in.readByte());
}
in.close();
out.close();
dir.deleteFile(fileNameIn);
expectThrows(Exception.class, () -> {
DirectoryReader.open(dir);
});
dir.close();
}
use of org.apache.lucene.store.IndexInput in project lucene-solr by apache.
the class TestIndexWriter method testWithPendingDeletions.
public void testWithPendingDeletions() throws Exception {
// irony: currently we don't emulate windows well enough to work on windows!
assumeFalse("windows is not supported", Constants.WINDOWS);
Path path = createTempDir();
// Use WindowsFS to prevent open files from being deleted:
FileSystem fs = new WindowsFS(path.getFileSystem()).getFileSystem(URI.create("file:///"));
Path root = new FilterPath(path, fs);
// MMapDirectory doesn't work because it closes its file handles after mapping!
try (FSDirectory dir = new SimpleFSDirectory(root)) {
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
IndexWriter w = new IndexWriter(dir, iwc);
w.commit();
IndexInput in = dir.openInput("segments_1", IOContext.DEFAULT);
w.addDocument(new Document());
w.close();
assertTrue(dir.checkPendingDeletions());
// make sure we get NFSF if we try to delete and already-pending-delete file:
expectThrows(NoSuchFileException.class, () -> {
dir.deleteFile("segments_1");
});
IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
});
assertTrue(expected.getMessage().contains("still has pending deleted files; cannot initialize IndexWriter"));
in.close();
}
}
use of org.apache.lucene.store.IndexInput in project lucene-solr by apache.
the class Lucene54DocValuesProducer method getFixedBinary.
private LegacyBinaryDocValues getFixedBinary(FieldInfo field, final BinaryEntry bytes) throws IOException {
final IndexInput data = this.data.slice("fixed-binary", bytes.offset, bytes.count * bytes.maxLength);
final BytesRef term = new BytesRef(bytes.maxLength);
final byte[] buffer = term.bytes;
final int length = term.length = bytes.maxLength;
return new LongBinaryDocValues() {
@Override
public BytesRef get(long id) {
try {
data.seek(id * length);
data.readBytes(buffer, 0, buffer.length);
return term;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
};
}
Aggregations