use of org.apache.lucene.store.IOContext in project jackrabbit-oak by apache.
the class IndexCopierTest method cowFailureInCopy.
@Test
public void cowFailureInCopy() throws Exception {
ExecutorService executorService = Executors.newFixedThreadPool(2);
Directory baseDir = new CloseSafeDir();
IndexDefinition defn = new IndexDefinition(root, builder.getNodeState(), "/foo");
IndexCopier copier = new RAMIndexCopier(baseDir, executorService, getWorkDir());
final Set<String> toFail = Sets.newHashSet();
Directory remote = new CloseSafeDir() {
@Override
public IndexOutput createOutput(String name, IOContext context) throws IOException {
if (toFail.contains(name)) {
throw new RuntimeException("Failing copy for " + name);
}
return super.createOutput(name, context);
}
};
final Directory local = copier.wrapForWrite(defn, remote, false, INDEX_DATA_CHILD_NAME);
toFail.add("t2");
byte[] t1 = writeFile(local, "t1");
byte[] t2 = writeFile(local, "t2");
try {
local.close();
fail();
} catch (IOException ignore) {
}
executorService.shutdown();
}
use of org.apache.lucene.store.IOContext in project jackrabbit-oak by apache.
the class IndexCopierTest method cowConcurrentAccess.
/**
* Test the interaction between COR and COW using same underlying directory
*/
@Test
public void cowConcurrentAccess() throws Exception {
CollectingExecutor executor = new CollectingExecutor();
ExecutorService executorService = Executors.newFixedThreadPool(2);
executor.setForwardingExecutor(executorService);
Directory baseDir = new CloseSafeDir();
IndexDefinition defn = new IndexDefinition(root, builder.getNodeState(), indexPath);
IndexCopier copier = new RAMIndexCopier(baseDir, executor, getWorkDir(), true);
Directory remote = new CloseSafeDir();
byte[] f1 = writeFile(remote, "f1");
Directory cor1 = copier.wrapForRead(indexPath, defn, remote, INDEX_DATA_CHILD_NAME);
readAndAssert(cor1, "f1", f1);
cor1.close();
final CountDownLatch pauseCopyLatch = new CountDownLatch(1);
Directory remote2 = new FilterDirectory(remote) {
@Override
public IndexOutput createOutput(String name, IOContext context) throws IOException {
try {
pauseCopyLatch.await();
} catch (InterruptedException ignore) {
}
return super.createOutput(name, context);
}
};
//Start copying a file to remote via COW
Directory cow1 = copier.wrapForWrite(defn, remote2, false, INDEX_DATA_CHILD_NAME);
byte[] f2 = writeFile(cow1, "f2");
//Before copy is done to remote lets delete f1 from remote and
//open a COR and close it such that it triggers delete of f1
remote.deleteFile("f1");
Directory cor2 = copier.wrapForRead(indexPath, defn, remote, INDEX_DATA_CHILD_NAME);
//Ensure that deletion task submitted to executor get processed immediately
executor.enableImmediateExecution();
cor2.close();
executor.enableDelayedExecution();
assertFalse(baseDir.fileExists("f1"));
assertFalse("f2 should not have been copied to remote so far", remote.fileExists("f2"));
assertTrue("f2 should exist", baseDir.fileExists("f2"));
pauseCopyLatch.countDown();
cow1.close();
assertTrue("f2 should exist", remote.fileExists("f2"));
executorService.shutdown();
}
use of org.apache.lucene.store.IOContext in project jackrabbit-oak by apache.
the class IndexCopierTest method cowPoolClosedWithTaskInQueue.
@Test
public void cowPoolClosedWithTaskInQueue() throws Exception {
ExecutorService executorService = Executors.newFixedThreadPool(2);
Directory baseDir = new CloseSafeDir();
IndexDefinition defn = new IndexDefinition(root, builder.getNodeState(), "/foo");
IndexCopier copier = new RAMIndexCopier(baseDir, executorService, getWorkDir());
final Set<String> toPause = Sets.newHashSet();
final CountDownLatch pauseCopyLatch = new CountDownLatch(1);
Directory remote = new CloseSafeDir() {
@Override
public IndexOutput createOutput(String name, IOContext context) throws IOException {
if (toPause.contains(name)) {
try {
pauseCopyLatch.await();
} catch (InterruptedException ignore) {
}
}
return super.createOutput(name, context);
}
};
final Directory local = copier.wrapForWrite(defn, remote, false, INDEX_DATA_CHILD_NAME);
toPause.add("t2");
byte[] t1 = writeFile(local, "t1");
byte[] t2 = writeFile(local, "t2");
byte[] t3 = writeFile(local, "t3");
byte[] t4 = writeFile(local, "t4");
final AtomicReference<Throwable> error = new AtomicReference<Throwable>();
Thread closer = new Thread(new Runnable() {
@Override
public void run() {
try {
local.close();
} catch (Throwable e) {
e.printStackTrace();
error.set(e);
}
}
});
closer.start();
copier.close();
executorService.shutdown();
executorService.awaitTermination(100, TimeUnit.MILLISECONDS);
pauseCopyLatch.countDown();
closer.join();
assertNotNull("Close should have thrown an exception", error.get());
}
use of org.apache.lucene.store.IOContext in project lucene-solr by apache.
the class ReadersAndUpdates method handleBinaryDVUpdates.
@SuppressWarnings("synthetic-access")
private void handleBinaryDVUpdates(FieldInfos infos, Map<String, BinaryDocValuesFieldUpdates> updates, TrackingDirectoryWrapper dir, DocValuesFormat dvFormat, final SegmentReader reader, Map<Integer, Set<String>> fieldFiles) throws IOException {
for (Entry<String, BinaryDocValuesFieldUpdates> e : updates.entrySet()) {
final String field = e.getKey();
final BinaryDocValuesFieldUpdates fieldUpdates = e.getValue();
final long nextDocValuesGen = info.getNextDocValuesGen();
final String segmentSuffix = Long.toString(nextDocValuesGen, Character.MAX_RADIX);
final long estUpdatesSize = fieldUpdates.ramBytesPerDoc() * info.info.maxDoc();
final IOContext updatesContext = new IOContext(new FlushInfo(info.info.maxDoc(), estUpdatesSize));
final FieldInfo fieldInfo = infos.fieldInfo(field);
assert fieldInfo != null;
fieldInfo.setDocValuesGen(nextDocValuesGen);
final FieldInfos fieldInfos = new FieldInfos(new FieldInfo[] { fieldInfo });
// separately also track which files were created for this gen
final TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(dir);
final SegmentWriteState state = new SegmentWriteState(null, trackingDir, info.info, fieldInfos, null, updatesContext, segmentSuffix);
try (final DocValuesConsumer fieldsConsumer = dvFormat.fieldsConsumer(state)) {
// write the binary updates to a new gen'd docvalues file
fieldsConsumer.addBinaryField(fieldInfo, new EmptyDocValuesProducer() {
@Override
public BinaryDocValues getBinary(FieldInfo fieldInfoIn) throws IOException {
if (fieldInfoIn != fieldInfo) {
throw new IllegalArgumentException("wrong fieldInfo");
}
final int maxDoc = reader.maxDoc();
final BinaryDocValuesFieldUpdates.Iterator updatesIter = fieldUpdates.iterator();
updatesIter.reset();
final BinaryDocValues currentValues = reader.getBinaryDocValues(field);
// Merge sort of the original doc values with updated doc values:
return new BinaryDocValues() {
// merged docID
private int docIDOut = -1;
// docID from our original doc values
private int docIDIn = -1;
// docID from our updates
private int updateDocID = -1;
private BytesRef value;
@Override
public int docID() {
return docIDOut;
}
@Override
public int advance(int target) {
throw new UnsupportedOperationException();
}
@Override
public boolean advanceExact(int target) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long cost() {
return currentValues.cost();
}
@Override
public BytesRef binaryValue() {
return value;
}
@Override
public int nextDoc() throws IOException {
if (docIDIn == docIDOut) {
if (currentValues == null) {
docIDIn = NO_MORE_DOCS;
} else {
docIDIn = currentValues.nextDoc();
}
}
if (updateDocID == docIDOut) {
updateDocID = updatesIter.nextDoc();
}
if (docIDIn < updateDocID) {
// no update to this doc
docIDOut = docIDIn;
value = currentValues.binaryValue();
} else {
docIDOut = updateDocID;
if (docIDOut != NO_MORE_DOCS) {
value = updatesIter.value();
}
}
return docIDOut;
}
};
}
});
}
info.advanceDocValuesGen();
assert !fieldFiles.containsKey(fieldInfo.number);
fieldFiles.put(fieldInfo.number, trackingDir.getCreatedFiles());
}
}
use of org.apache.lucene.store.IOContext in project lucene-solr by apache.
the class ReadersAndUpdates method writeFieldInfosGen.
private Set<String> writeFieldInfosGen(FieldInfos fieldInfos, Directory dir, DocValuesFormat dvFormat, FieldInfosFormat infosFormat) throws IOException {
final long nextFieldInfosGen = info.getNextFieldInfosGen();
final String segmentSuffix = Long.toString(nextFieldInfosGen, Character.MAX_RADIX);
// we write approximately that many bytes (based on Lucene46DVF):
// HEADER + FOOTER: 40
// 90 bytes per-field (over estimating long name and attributes map)
final long estInfosSize = 40 + 90 * fieldInfos.size();
final IOContext infosContext = new IOContext(new FlushInfo(info.info.maxDoc(), estInfosSize));
// separately also track which files were created for this gen
final TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(dir);
infosFormat.write(trackingDir, info.info, segmentSuffix, fieldInfos, infosContext);
info.advanceFieldInfosGen();
return trackingDir.getCreatedFiles();
}
Aggregations