use of org.apache.nifi.processor.FlowFileFilter in project nifi by apache.
the class TestWriteAheadFlowFileRepository method testUpdatePerformance.
@Test
@Ignore("Intended only for local performance testing before/after making changes")
public void testUpdatePerformance() throws IOException, InterruptedException {
final FlowFileQueue queue = new FlowFileQueue() {
@Override
public String getIdentifier() {
return "4444";
}
@Override
public List<FlowFilePrioritizer> getPriorities() {
return null;
}
@Override
public SwapSummary recoverSwappedFlowFiles() {
return null;
}
@Override
public void purgeSwapFiles() {
}
@Override
public int getSwapFileCount() {
return 0;
}
@Override
public void setPriorities(List<FlowFilePrioritizer> newPriorities) {
}
@Override
public void setBackPressureObjectThreshold(long maxQueueSize) {
}
@Override
public long getBackPressureObjectThreshold() {
return 0;
}
@Override
public void setBackPressureDataSizeThreshold(String maxDataSize) {
}
@Override
public String getBackPressureDataSizeThreshold() {
return null;
}
@Override
public QueueSize size() {
return null;
}
@Override
public boolean isEmpty() {
return false;
}
@Override
public boolean isActiveQueueEmpty() {
return false;
}
@Override
public QueueSize getUnacknowledgedQueueSize() {
return null;
}
@Override
public QueueSize getActiveQueueSize() {
return size();
}
@Override
public QueueSize getSwapQueueSize() {
return null;
}
@Override
public void acknowledge(FlowFileRecord flowFile) {
}
@Override
public void acknowledge(Collection<FlowFileRecord> flowFiles) {
}
@Override
public boolean isAllActiveFlowFilesPenalized() {
return false;
}
@Override
public boolean isAnyActiveFlowFilePenalized() {
return false;
}
@Override
public boolean isFull() {
return false;
}
@Override
public void put(FlowFileRecord file) {
}
@Override
public void putAll(Collection<FlowFileRecord> files) {
}
@Override
public FlowFileRecord poll(Set<FlowFileRecord> expiredRecords) {
return null;
}
@Override
public List<FlowFileRecord> poll(int maxResults, Set<FlowFileRecord> expiredRecords) {
return null;
}
@Override
public long drainQueue(Queue<FlowFileRecord> sourceQueue, List<FlowFileRecord> destination, int maxResults, Set<FlowFileRecord> expiredRecords) {
return 0;
}
@Override
public List<FlowFileRecord> poll(FlowFileFilter filter, Set<FlowFileRecord> expiredRecords) {
return null;
}
@Override
public String getFlowFileExpiration() {
return null;
}
@Override
public int getFlowFileExpiration(TimeUnit timeUnit) {
return 0;
}
@Override
public void setFlowFileExpiration(String flowExpirationPeriod) {
}
@Override
public DropFlowFileStatus dropFlowFiles(String requestIdentifier, String requestor) {
return null;
}
@Override
public DropFlowFileStatus getDropFlowFileStatus(String requestIdentifier) {
return null;
}
@Override
public DropFlowFileStatus cancelDropFlowFileRequest(String requestIdentifier) {
return null;
}
@Override
public ListFlowFileStatus listFlowFiles(String requestIdentifier, int maxResults) {
return null;
}
@Override
public ListFlowFileStatus getListFlowFileStatus(String requestIdentifier) {
return null;
}
@Override
public ListFlowFileStatus cancelListFlowFileRequest(String requestIdentifier) {
return null;
}
@Override
public FlowFileRecord getFlowFile(String flowFileUuid) throws IOException {
return null;
}
@Override
public void verifyCanList() throws IllegalStateException {
}
};
final int numPartitions = 16;
final int numThreads = 8;
final int totalUpdates = 160_000_000;
final int batchSize = 10;
final Path path = Paths.get("target/minimal-locking-repo");
deleteRecursively(path.toFile());
assertTrue(path.toFile().mkdirs());
final ResourceClaimManager claimManager = new StandardResourceClaimManager();
final RepositoryRecordSerdeFactory serdeFactory = new RepositoryRecordSerdeFactory(claimManager);
final WriteAheadRepository<RepositoryRecord> repo = new MinimalLockingWriteAheadLog<>(path, numPartitions, serdeFactory, null);
final Collection<RepositoryRecord> initialRecs = repo.recoverRecords();
assertTrue(initialRecs.isEmpty());
final int updateCountPerThread = totalUpdates / numThreads;
final Thread[] threads = new Thread[numThreads];
for (int j = 0; j < 2; j++) {
for (int i = 0; i < numThreads; i++) {
final Thread t = new Thread(new Runnable() {
@Override
public void run() {
final List<RepositoryRecord> records = new ArrayList<>();
final int numBatches = updateCountPerThread / batchSize;
final MockFlowFile baseFlowFile = new MockFlowFile(0L);
for (int i = 0; i < numBatches; i++) {
records.clear();
for (int k = 0; k < batchSize; k++) {
final FlowFileRecord flowFile = new MockFlowFile(i % 100_000, baseFlowFile);
final String uuid = flowFile.getAttribute("uuid");
final StandardRepositoryRecord record = new StandardRepositoryRecord(null, flowFile);
record.setDestination(queue);
final Map<String, String> updatedAttrs = Collections.singletonMap("uuid", uuid);
record.setWorking(flowFile, updatedAttrs);
records.add(record);
}
try {
repo.update(records, false);
} catch (IOException e) {
e.printStackTrace();
Assert.fail(e.toString());
}
}
}
});
t.setDaemon(true);
threads[i] = t;
}
final long start = System.nanoTime();
for (final Thread t : threads) {
t.start();
}
for (final Thread t : threads) {
t.join();
}
final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
if (j == 0) {
System.out.println(millis + " ms to insert " + updateCountPerThread * numThreads + " updates using " + numPartitions + " partitions and " + numThreads + " threads, *as a warmup!*");
} else {
System.out.println(millis + " ms to insert " + updateCountPerThread * numThreads + " updates using " + numPartitions + " partitions and " + numThreads + " threads");
}
}
}
use of org.apache.nifi.processor.FlowFileFilter in project kylo by Teradata.
the class SetSavepoint method getNextFlowFile.
/**
* Return the next available flow file in the queue that is not in a waiting state.
*
* @param session the process session
* @param provider the save point provider
* @param pvSavepointId the savepoint id
* @return the first flowfile not in a waiting savepoint state
*/
private Optional<FlowFile> getNextFlowFile(ProcessContext context, ProcessSession session, SavepointController controller, SavepointProvider provider, PropertyValue pvSavepointId) {
long expirationDuration = context.getProperty(EXPIRATION_DURATION).asTimePeriod(TimeUnit.MILLISECONDS);
FlowFileFilter flowFileFilter = null;
try {
Optional<String> nextFlowFile = controller.getNextFlowFile(getIdentifier());
flowFileFilter = new FindFirstFlowFileFilter(nextFlowFile, expirationDuration, controller);
return session.get(flowFileFilter).stream().findFirst();
} catch (CacheNotInitializedException e) {
CacheInitializingFilter filter = new CacheInitializingFilter(pvSavepointId, controller, provider, expirationDuration);
return filter.initializeAndGetNextFlowfile(session);
}
}
use of org.apache.nifi.processor.FlowFileFilter in project nifi by apache.
the class TestStandardFlowFileQueue method testBackPressureAfterPollFilter.
@Test
public void testBackPressureAfterPollFilter() throws InterruptedException {
queue.setBackPressureObjectThreshold(10);
queue.setFlowFileExpiration("10 millis");
for (int i = 0; i < 9; i++) {
queue.put(new TestFlowFile());
assertFalse(queue.isFull());
}
queue.put(new TestFlowFile());
assertTrue(queue.isFull());
Thread.sleep(100L);
final FlowFileFilter filter = new FlowFileFilter() {
@Override
public FlowFileFilterResult filter(final FlowFile flowFile) {
return FlowFileFilterResult.REJECT_AND_CONTINUE;
}
};
final Set<FlowFileRecord> expiredRecords = new HashSet<>();
final List<FlowFileRecord> polled = queue.poll(filter, expiredRecords);
assertTrue(polled.isEmpty());
assertEquals(10, expiredRecords.size());
assertFalse(queue.isFull());
assertTrue(queue.isEmpty());
assertTrue(queue.isActiveQueueEmpty());
}
use of org.apache.nifi.processor.FlowFileFilter in project nifi by apache.
the class TestStandardProcessSession method testRoundRobinOnSessionGetWithFilter.
@Test
@SuppressWarnings("unchecked")
public void testRoundRobinOnSessionGetWithFilter() {
final List<Connection> connList = new ArrayList<>();
final Connection conn1 = createConnection();
final Connection conn2 = createConnection();
connList.add(conn1);
connList.add(conn2);
final FlowFileRecord flowFileRecord = new StandardFlowFileRecord.Builder().id(1000L).addAttribute("uuid", "12345678-1234-1234-1234-123456789012").entryDate(System.currentTimeMillis()).build();
flowFileQueue.put(flowFileRecord);
flowFileQueue.put(flowFileRecord);
when(connectable.getIncomingConnections()).thenReturn(connList);
final FlowFileFilter filter = ff -> FlowFileFilterResult.ACCEPT_AND_TERMINATE;
session.get(filter);
session.get(filter);
verify(conn1, times(1)).poll(any(FlowFileFilter.class), any(Set.class));
verify(conn2, times(1)).poll(any(FlowFileFilter.class), any(Set.class));
}
use of org.apache.nifi.processor.FlowFileFilter in project nifi by apache.
the class StandardProcessSession method expireFlowFiles.
public void expireFlowFiles() {
final Set<FlowFileRecord> expired = new HashSet<>();
final FlowFileFilter filter = new FlowFileFilter() {
@Override
public FlowFileFilterResult filter(final FlowFile flowFile) {
return FlowFileFilterResult.REJECT_AND_CONTINUE;
}
};
for (final Connection conn : context.getConnectable().getIncomingConnections()) {
do {
expired.clear();
conn.getFlowFileQueue().poll(filter, expired);
removeExpired(expired, conn);
} while (!expired.isEmpty());
}
}
Aggregations