use of org.apache.nifi.provenance.index.EventIndexWriter in project nifi by apache.
the class TestPersistentProvenanceRepository method testModifyIndexWhileSearching.
@Test(timeout = 10000)
public void testModifyIndexWhileSearching() throws IOException, InterruptedException, ParseException {
assumeFalse(isWindowsEnvironment());
final RepositoryConfiguration config = createConfiguration();
config.setMaxRecordLife(30, TimeUnit.SECONDS);
config.setMaxStorageCapacity(1024L * 1024L * 10);
config.setMaxEventFileLife(500, TimeUnit.MILLISECONDS);
config.setMaxEventFileCapacity(1024L * 1024L * 10);
config.setSearchableFields(new ArrayList<>(SearchableFields.getStandardFields()));
final CountDownLatch obtainIndexSearcherLatch = new CountDownLatch(2);
repo = new PersistentProvenanceRepository(config, DEFAULT_ROLLOVER_MILLIS) {
private CachingIndexManager wrappedManager = null;
// Create an IndexManager that adds a delay before returning the Index Searcher.
@Override
protected synchronized CachingIndexManager getIndexManager() {
if (wrappedManager == null) {
final IndexManager mgr = super.getIndexManager();
final Logger logger = LoggerFactory.getLogger("IndexManager");
wrappedManager = new CachingIndexManager() {
final AtomicInteger indexSearcherCount = new AtomicInteger(0);
@Override
public EventIndexSearcher borrowIndexSearcher(File indexDir) throws IOException {
final EventIndexSearcher searcher = mgr.borrowIndexSearcher(indexDir);
final int idx = indexSearcherCount.incrementAndGet();
obtainIndexSearcherLatch.countDown();
// second thread is still holding the searcher
try {
if (idx == 1) {
Thread.sleep(3000L);
} else {
Thread.sleep(5000L);
}
} catch (InterruptedException e) {
throw new IOException("Interrupted", e);
}
logger.info("Releasing index searcher");
return searcher;
}
@Override
public EventIndexWriter borrowIndexWriter(File indexingDirectory) throws IOException {
return mgr.borrowIndexWriter(indexingDirectory);
}
@Override
public void close() throws IOException {
mgr.close();
}
@Override
public boolean removeIndex(File indexDirectory) {
mgr.removeIndex(indexDirectory);
return true;
}
@Override
public void returnIndexSearcher(EventIndexSearcher searcher) {
mgr.returnIndexSearcher(searcher);
}
@Override
public void returnIndexWriter(EventIndexWriter writer) {
mgr.returnIndexWriter(writer);
}
};
}
return wrappedManager;
}
};
repo.initialize(getEventReporter(), null, null, IdentifierLookup.EMPTY);
final String uuid = "10000000-0000-0000-0000-000000000000";
final Map<String, String> attributes = new HashMap<>();
attributes.put("abc", "xyz");
attributes.put("xyz", "abc");
attributes.put("filename", "file-" + uuid);
final ProvenanceEventBuilder builder = new StandardProvenanceEventRecord.Builder();
builder.setEventTime(System.currentTimeMillis());
builder.setEventType(ProvenanceEventType.RECEIVE);
builder.setTransitUri("nifi://unit-test");
attributes.put("uuid", uuid);
builder.fromFlowFile(createFlowFile(3L, 3000L, attributes));
builder.setComponentId("1234");
builder.setComponentType("dummy processor");
for (int i = 0; i < 10; i++) {
builder.fromFlowFile(createFlowFile(i, 3000L, attributes));
attributes.put("uuid", "00000000-0000-0000-0000-00000000000" + i);
repo.registerEvent(builder.build());
}
repo.waitForRollover();
// Perform a query. This will ensure that an IndexSearcher is created and cached.
final Query query = new Query(UUID.randomUUID().toString());
query.addSearchTerm(SearchTerms.newSearchTerm(SearchableFields.Filename, "file-*"));
query.addSearchTerm(SearchTerms.newSearchTerm(SearchableFields.ComponentID, "12?4"));
query.addSearchTerm(SearchTerms.newSearchTerm(SearchableFields.TransitURI, "nifi://*"));
query.setMaxResults(100);
// Run a query in a background thread. When this thread goes to obtain the IndexSearcher, it will have a 5 second delay.
// That delay will occur as the main thread is updating the index. This should result in the search creating a new Index Reader
// that can properly query the index.
final int numThreads = 2;
final CountDownLatch performSearchLatch = new CountDownLatch(numThreads);
final Runnable searchRunnable = new Runnable() {
@Override
public void run() {
QueryResult result;
try {
result = repo.queryEvents(query, createUser());
} catch (IOException e) {
e.printStackTrace();
Assert.fail(e.toString());
return;
}
System.out.println("Finished search: " + result);
performSearchLatch.countDown();
}
};
// Kick off the searcher threads
for (int i = 0; i < numThreads; i++) {
final Thread searchThread = new Thread(searchRunnable);
searchThread.start();
}
// Wait until we've obtained the Index Searchers before modifying the index.
obtainIndexSearcherLatch.await();
// add more events to the repo
for (int i = 0; i < 10; i++) {
builder.fromFlowFile(createFlowFile(i, 3000L, attributes));
attributes.put("uuid", "00000000-0000-0000-0000-00000000000" + i);
repo.registerEvent(builder.build());
}
// Force a rollover to occur. This will modify the index.
repo.rolloverWithLock(true);
// Wait for the repository to roll over.
repo.waitForRollover();
// Wait for the searches to complete.
performSearchLatch.await();
}
use of org.apache.nifi.provenance.index.EventIndexWriter in project nifi by apache.
the class TestEventIndexTask method testIndexWriterCommittedWhenAppropriate.
@Test(timeout = 5000)
public void testIndexWriterCommittedWhenAppropriate() throws IOException, InterruptedException {
final BlockingQueue<StoredDocument> docQueue = new LinkedBlockingQueue<>();
final RepositoryConfiguration repoConfig = new RepositoryConfiguration();
final File storageDir = new File("target/storage/TestEventIndexTask/1");
repoConfig.addStorageDirectory("1", storageDir);
final AtomicInteger commitCount = new AtomicInteger(0);
// Mock out an IndexWriter and keep track of the number of events that are indexed.
final IndexWriter indexWriter = Mockito.mock(IndexWriter.class);
final EventIndexWriter eventIndexWriter = new LuceneEventIndexWriter(indexWriter, storageDir);
final IndexManager indexManager = Mockito.mock(IndexManager.class);
Mockito.when(indexManager.borrowIndexWriter(Mockito.any(File.class))).thenReturn(eventIndexWriter);
final IndexDirectoryManager directoryManager = new IndexDirectoryManager(repoConfig);
// Create an EventIndexTask and override the commit(IndexWriter) method so that we can keep track of how
// many times the index writer gets committed.
final EventIndexTask task = new EventIndexTask(docQueue, repoConfig, indexManager, directoryManager, 201, EventReporter.NO_OP) {
@Override
protected void commit(EventIndexWriter indexWriter) throws IOException {
commitCount.incrementAndGet();
}
};
// Create 4 threads, each one a daemon thread running the EventIndexTask
for (int i = 0; i < 4; i++) {
final Thread t = new Thread(task);
t.setDaemon(true);
t.start();
}
assertEquals(0, commitCount.get());
// Index 100 documents with a storage filename of "0.0.prov"
for (int i = 0; i < 100; i++) {
final Document document = new Document();
document.add(new LongField(SearchableFields.EventTime.getSearchableFieldName(), System.currentTimeMillis(), Store.NO));
final StorageSummary location = new StorageSummary(1L, "0.0.prov", "1", 0, 1000L, 1000L);
final StoredDocument storedDoc = new StoredDocument(document, location);
docQueue.add(storedDoc);
}
assertEquals(0, commitCount.get());
// Index 100 documents
for (int i = 0; i < 100; i++) {
final Document document = new Document();
document.add(new LongField(SearchableFields.EventTime.getSearchableFieldName(), System.currentTimeMillis(), Store.NO));
final StorageSummary location = new StorageSummary(1L, "0.0.prov", "1", 0, 1000L, 1000L);
final StoredDocument storedDoc = new StoredDocument(document, location);
docQueue.add(storedDoc);
}
// Wait until we've indexed all 200 events
while (eventIndexWriter.getEventsIndexed() < 200) {
Thread.sleep(10L);
}
// Wait a bit and make sure that we still haven't committed the index writer.
Thread.sleep(100L);
assertEquals(0, commitCount.get());
// Add another document.
final Document document = new Document();
document.add(new LongField(SearchableFields.EventTime.getSearchableFieldName(), System.currentTimeMillis(), Store.NO));
final StorageSummary location = new StorageSummary(1L, "0.0.prov", "1", 0, 1000L, 1000L);
StoredDocument storedDoc = new StoredDocument(document, location);
docQueue.add(storedDoc);
// Wait until index writer is committed.
while (commitCount.get() == 0) {
Thread.sleep(10L);
}
assertEquals(1, commitCount.get());
// Add a new IndexableDocument with a count of 1 to ensure that the writer is committed again.
storedDoc = new StoredDocument(document, location);
docQueue.add(storedDoc);
Thread.sleep(100L);
assertEquals(1, commitCount.get());
// Add a new IndexableDocument with a count of 3. Index writer should not be committed again.
storedDoc = new StoredDocument(document, location);
docQueue.add(storedDoc);
Thread.sleep(100L);
assertEquals(1, commitCount.get());
}
use of org.apache.nifi.provenance.index.EventIndexWriter in project nifi by apache.
the class TestCachingIndexManager method test.
@Test
public void test() throws IOException {
// Create and IndexWriter and add a document to the index, then close the writer.
// This gives us something that we can query.
final EventIndexWriter writer = manager.borrowIndexWriter(indexDir);
final Document doc = new Document();
doc.add(new StringField("unit test", "true", Store.YES));
writer.index(doc, 1000);
manager.returnIndexWriter(writer);
// Get an Index Searcher that we can use to query the index.
final EventIndexSearcher cachedSearcher = manager.borrowIndexSearcher(indexDir);
// Ensure that we get the expected results.
assertCount(cachedSearcher, 1);
// While we already have an Index Searcher, get a writer for the same index.
// This will cause the Index Searcher to be marked as poisoned.
final EventIndexWriter writer2 = manager.borrowIndexWriter(indexDir);
// Obtain a new Index Searcher with the writer open. This Index Searcher should *NOT*
// be the same as the previous searcher because the new one will be a Near-Real-Time Index Searcher
// while the other is not.
final EventIndexSearcher nrtSearcher = manager.borrowIndexSearcher(indexDir);
assertNotSame(cachedSearcher, nrtSearcher);
// Ensure that we get the expected query results.
assertCount(nrtSearcher, 1);
// Return the writer, so that there is no longer an active writer for the index.
manager.returnIndexWriter(writer2);
// Ensure that we still get the same result.
assertCount(cachedSearcher, 1);
manager.returnIndexSearcher(cachedSearcher);
// Ensure that our near-real-time index searcher still gets the same result.
assertCount(nrtSearcher, 1);
manager.returnIndexSearcher(nrtSearcher);
}
use of org.apache.nifi.provenance.index.EventIndexWriter in project nifi by apache.
the class TestSimpleIndexManager method testWriterCloseIfPreviouslyMarkedCloseable.
@Test
public void testWriterCloseIfPreviouslyMarkedCloseable() throws IOException {
final AtomicInteger closeCount = new AtomicInteger(0);
final SimpleIndexManager mgr = new SimpleIndexManager(new RepositoryConfiguration()) {
@Override
protected void close(IndexWriterCount count) throws IOException {
closeCount.incrementAndGet();
}
};
final File dir = new File("target/" + UUID.randomUUID().toString());
final EventIndexWriter writer1 = mgr.borrowIndexWriter(dir);
final EventIndexWriter writer2 = mgr.borrowIndexWriter(dir);
assertTrue(writer1 == writer2);
mgr.returnIndexWriter(writer1, true, true);
assertEquals(0, closeCount.get());
final EventIndexWriter[] writers = new EventIndexWriter[10];
for (int i = 0; i < writers.length; i++) {
writers[i] = mgr.borrowIndexWriter(dir);
assertTrue(writers[i] == writer1);
}
for (int i = 0; i < writers.length; i++) {
mgr.returnIndexWriter(writers[i], true, false);
assertEquals(0, closeCount.get());
assertEquals(1, mgr.getWriterCount());
}
// this should close the index writer even though 'false' is passed in
// because the previous call marked the writer as closeable and this is
// the last reference to the writer.
mgr.returnIndexWriter(writer2, false, false);
assertEquals(1, closeCount.get());
assertEquals(0, mgr.getWriterCount());
}
use of org.apache.nifi.provenance.index.EventIndexWriter in project nifi by apache.
the class TestSimpleIndexManager method testWriterLeftOpenIfNotCloseable.
@Test
public void testWriterLeftOpenIfNotCloseable() throws IOException {
final AtomicInteger closeCount = new AtomicInteger(0);
final SimpleIndexManager mgr = new SimpleIndexManager(new RepositoryConfiguration()) {
@Override
protected void close(IndexWriterCount count) throws IOException {
closeCount.incrementAndGet();
}
};
final File dir = new File("target/" + UUID.randomUUID().toString());
final EventIndexWriter writer = mgr.borrowIndexWriter(dir);
mgr.returnIndexWriter(writer, true, false);
assertEquals(0, closeCount.get());
}
Aggregations