use of org.apache.lucene.store.IndexInput in project lucene-solr by apache.
the class SolrCore method getNewIndexDir.
/**
* Returns the indexdir as given in index.properties. If index.properties exists in dataDir and
* there is a property <i>index</i> available and it points to a valid directory
* in dataDir that is returned Else dataDir/index is returned. Only called for creating new indexSearchers
* and indexwriters. Use the getIndexDir() method to know the active index directory
*
* @return the indexdir as given in index.properties
*/
public String getNewIndexDir() {
String result = dataDir + "index/";
Properties p = new Properties();
Directory dir = null;
try {
dir = getDirectoryFactory().get(getDataDir(), DirContext.META_DATA, getSolrConfig().indexConfig.lockType);
IndexInput input;
try {
input = dir.openInput(IndexFetcher.INDEX_PROPERTIES, IOContext.DEFAULT);
} catch (FileNotFoundException | NoSuchFileException e) {
input = null;
}
if (input != null) {
final InputStream is = new PropertiesInputStream(input);
try {
p.load(new InputStreamReader(is, StandardCharsets.UTF_8));
String s = p.getProperty("index");
if (s != null && s.trim().length() > 0) {
result = dataDir + s;
}
} catch (Exception e) {
log.error("Unable to load " + IndexFetcher.INDEX_PROPERTIES, e);
} finally {
IOUtils.closeQuietly(is);
}
}
} catch (IOException e) {
SolrException.log(log, "", e);
} finally {
if (dir != null) {
try {
getDirectoryFactory().release(dir);
} catch (IOException e) {
SolrException.log(log, "", e);
}
}
}
if (!result.equals(lastNewIndexDir)) {
log.debug("New index directory detected: old=" + lastNewIndexDir + " new=" + result);
}
lastNewIndexDir = result;
return result;
}
use of org.apache.lucene.store.IndexInput in project lucene-solr by apache.
the class BackupManager method uploadToZk.
private void uploadToZk(SolrZkClient zkClient, URI sourceDir, String destZkPath) throws IOException {
Preconditions.checkArgument(repository.exists(sourceDir), "Path {} does not exist", sourceDir);
Preconditions.checkArgument(repository.getPathType(sourceDir) == PathType.DIRECTORY, "Path {} is not a directory", sourceDir);
for (String file : repository.listAll(sourceDir)) {
String zkNodePath = destZkPath + "/" + file;
URI path = repository.resolve(sourceDir, file);
PathType t = repository.getPathType(path);
switch(t) {
case FILE:
{
try (IndexInput is = repository.openInput(sourceDir, file, IOContext.DEFAULT)) {
// probably ok since the config file should be small.
byte[] arr = new byte[(int) is.length()];
is.readBytes(arr, 0, (int) is.length());
zkClient.makePath(zkNodePath, arr, true);
} catch (KeeperException | InterruptedException e) {
throw new IOException(e);
}
break;
}
case DIRECTORY:
{
if (!file.startsWith(".")) {
uploadToZk(zkClient, path, zkNodePath);
}
break;
}
default:
throw new IllegalStateException("Unknown path type " + t);
}
}
}
use of org.apache.lucene.store.IndexInput in project lucene-solr by apache.
the class SimplePrimaryNode method handleFetchFiles.
/** Called when another node (replica) wants to copy files from us */
private boolean handleFetchFiles(Random random, Socket socket, DataInput destIn, DataOutput destOut, BufferedOutputStream bos) throws IOException {
Thread.currentThread().setName("send");
int replicaID = destIn.readVInt();
message("top: start fetch for R" + replicaID + " socket=" + socket);
byte b = destIn.readByte();
CopyState copyState;
if (b == 0) {
// Caller already has CopyState
copyState = null;
} else if (b == 1) {
// Caller does not have CopyState; we pull the latest one:
copyState = getCopyState();
Thread.currentThread().setName("send-R" + replicaID + "-" + copyState.version);
} else {
// Protocol error:
throw new IllegalArgumentException("invalid CopyState byte=" + b);
}
try {
if (copyState != null) {
// Serialize CopyState on the wire to the client:
writeCopyState(copyState, destOut);
bos.flush();
}
byte[] buffer = new byte[16384];
int fileCount = 0;
long totBytesSent = 0;
while (true) {
byte done = destIn.readByte();
if (done == 1) {
break;
} else if (done != 0) {
throw new IllegalArgumentException("expected 0 or 1 byte but got " + done);
}
// Name of the file the replica wants us to send:
String fileName = destIn.readString();
// Starting offset in the file we should start sending bytes from:
long fpStart = destIn.readVLong();
try (IndexInput in = dir.openInput(fileName, IOContext.DEFAULT)) {
long len = in.length();
//message("fetch " + fileName + ": send len=" + len);
destOut.writeVLong(len);
in.seek(fpStart);
long upto = fpStart;
while (upto < len) {
int chunk = (int) Math.min(buffer.length, (len - upto));
in.readBytes(buffer, 0, chunk);
if (doFlipBitsDuringCopy) {
if (random.nextInt(3000) == 17 && bitFlipped.contains(fileName) == false) {
bitFlipped.add(fileName);
message("file " + fileName + " to R" + replicaID + ": now randomly flipping a bit at byte=" + upto);
int x = random.nextInt(chunk);
int bit = random.nextInt(8);
buffer[x] ^= 1 << bit;
}
}
destOut.writeBytes(buffer, 0, chunk);
upto += chunk;
totBytesSent += chunk;
}
}
fileCount++;
}
message("top: done fetch files for R" + replicaID + ": sent " + fileCount + " files; sent " + totBytesSent + " bytes");
} catch (Throwable t) {
message("top: exception during fetch: " + t.getMessage() + "; now close socket");
socket.close();
return false;
} finally {
if (copyState != null) {
message("top: fetch: now release CopyState");
releaseCopyState(copyState);
}
}
return true;
}
use of org.apache.lucene.store.IndexInput in project lucene-solr by apache.
the class BaseCompoundFormatTestCase method testManySubFiles.
// Make sure we don't somehow use more than 1 descriptor
// when reading a CFS with many subs:
public void testManySubFiles() throws IOException {
final MockDirectoryWrapper dir = newMockFSDirectory(createTempDir("CFSManySubFiles"));
final int FILE_COUNT = atLeast(500);
List<String> files = new ArrayList<>();
SegmentInfo si = newSegmentInfo(dir, "_123");
for (int fileIdx = 0; fileIdx < FILE_COUNT; fileIdx++) {
String file = "_123." + fileIdx;
files.add(file);
try (IndexOutput out = dir.createOutput(file, newIOContext(random()))) {
CodecUtil.writeIndexHeader(out, "Foo", 0, si.getId(), "suffix");
out.writeByte((byte) fileIdx);
CodecUtil.writeFooter(out);
}
}
assertEquals(0, dir.getFileHandleCount());
si.setFiles(files);
si.getCodec().compoundFormat().write(dir, si, IOContext.DEFAULT);
Directory cfs = si.getCodec().compoundFormat().getCompoundReader(dir, si, IOContext.DEFAULT);
final IndexInput[] ins = new IndexInput[FILE_COUNT];
for (int fileIdx = 0; fileIdx < FILE_COUNT; fileIdx++) {
ins[fileIdx] = cfs.openInput("_123." + fileIdx, newIOContext(random()));
CodecUtil.checkIndexHeader(ins[fileIdx], "Foo", 0, 0, si.getId(), "suffix");
}
assertEquals(1, dir.getFileHandleCount());
for (int fileIdx = 0; fileIdx < FILE_COUNT; fileIdx++) {
assertEquals((byte) fileIdx, ins[fileIdx].readByte());
}
assertEquals(1, dir.getFileHandleCount());
for (int fileIdx = 0; fileIdx < FILE_COUNT; fileIdx++) {
ins[fileIdx].close();
}
cfs.close();
dir.close();
}
use of org.apache.lucene.store.IndexInput in project lucene-solr by apache.
the class BaseCompoundFormatTestCase method testReadPastEOF.
public void testReadPastEOF() throws IOException {
Directory dir = newDirectory();
Directory cr = createLargeCFS(dir);
IndexInput is = cr.openInput("_123.f2", newIOContext(random()));
is.seek(is.length() - 10);
byte[] b = new byte[100];
is.readBytes(b, 0, 10);
// Single byte read past end of file
expectThrows(IOException.class, () -> {
is.readByte();
});
is.seek(is.length() - 10);
// Block read past end of file
expectThrows(IOException.class, () -> {
is.readBytes(b, 0, 50);
});
is.close();
cr.close();
dir.close();
}
Aggregations