use of org.apache.nifi.controller.queue.FlowFileQueue in project nifi by apache.
the class StandardConnectionDAO method deleteFlowFileListingRequest.
@Override
public ListFlowFileStatus deleteFlowFileListingRequest(String connectionId, String listingRequestId) {
final Connection connection = locateConnection(connectionId);
final FlowFileQueue queue = connection.getFlowFileQueue();
final ListFlowFileStatus listFlowFileStatus = queue.cancelListFlowFileRequest(listingRequestId);
if (listFlowFileStatus == null) {
throw new ResourceNotFoundException(String.format("Unable to find listing request with id '%s'.", listingRequestId));
}
return listFlowFileStatus;
}
use of org.apache.nifi.controller.queue.FlowFileQueue in project nifi by apache.
the class TestWriteAheadFlowFileRepository method testResourceClaimsIncremented.
@Test
public void testResourceClaimsIncremented() throws IOException {
final ResourceClaimManager claimManager = new StandardResourceClaimManager();
final TestQueueProvider queueProvider = new TestQueueProvider();
final Connection connection = Mockito.mock(Connection.class);
when(connection.getIdentifier()).thenReturn("1234");
when(connection.getDestination()).thenReturn(Mockito.mock(Connectable.class));
final FlowFileSwapManager swapMgr = new MockFlowFileSwapManager();
final FlowFileQueue queue = new StandardFlowFileQueue("1234", connection, null, null, claimManager, null, swapMgr, null, 10000);
when(connection.getFlowFileQueue()).thenReturn(queue);
queueProvider.addConnection(connection);
final ResourceClaim resourceClaim1 = claimManager.newResourceClaim("container", "section", "1", false, false);
final ContentClaim claim1 = new StandardContentClaim(resourceClaim1, 0L);
final ResourceClaim resourceClaim2 = claimManager.newResourceClaim("container", "section", "2", false, false);
final ContentClaim claim2 = new StandardContentClaim(resourceClaim2, 0L);
// resource claims' counts should be updated for both the swapped out FlowFile and the non-swapped out FlowFile
try (final WriteAheadFlowFileRepository repo = new WriteAheadFlowFileRepository(NiFiProperties.createBasicNiFiProperties(null, null))) {
repo.initialize(claimManager);
repo.loadFlowFiles(queueProvider, -1L);
// Create a Repository Record that indicates that a FlowFile was created
final FlowFileRecord flowFile1 = new StandardFlowFileRecord.Builder().id(1L).addAttribute("uuid", "11111111-1111-1111-1111-111111111111").contentClaim(claim1).build();
final StandardRepositoryRecord rec1 = new StandardRepositoryRecord(queue);
rec1.setWorking(flowFile1);
rec1.setDestination(queue);
// Create a Record that we can swap out
final FlowFileRecord flowFile2 = new StandardFlowFileRecord.Builder().id(2L).addAttribute("uuid", "11111111-1111-1111-1111-111111111112").contentClaim(claim2).build();
final StandardRepositoryRecord rec2 = new StandardRepositoryRecord(queue);
rec2.setWorking(flowFile2);
rec2.setDestination(queue);
final List<RepositoryRecord> records = new ArrayList<>();
records.add(rec1);
records.add(rec2);
repo.updateRepository(records);
final String swapLocation = swapMgr.swapOut(Collections.singletonList(flowFile2), queue);
repo.swapFlowFilesOut(Collections.singletonList(flowFile2), queue, swapLocation);
}
final ResourceClaimManager recoveryClaimManager = new StandardResourceClaimManager();
try (final WriteAheadFlowFileRepository repo = new WriteAheadFlowFileRepository(NiFiProperties.createBasicNiFiProperties(null, null))) {
repo.initialize(recoveryClaimManager);
final long largestId = repo.loadFlowFiles(queueProvider, 0L);
// largest ID known is 1 because this doesn't take into account the FlowFiles that have been swapped out
assertEquals(1, largestId);
}
// resource claim 1 will have a single claimant count while resource claim 2 will have no claimant counts
// because resource claim 2 is referenced only by flowfiles that are swapped out.
assertEquals(1, recoveryClaimManager.getClaimantCount(resourceClaim1));
assertEquals(0, recoveryClaimManager.getClaimantCount(resourceClaim2));
final SwapSummary summary = queue.recoverSwappedFlowFiles();
assertNotNull(summary);
assertEquals(2, summary.getMaxFlowFileId().intValue());
assertEquals(new QueueSize(1, 0L), summary.getQueueSize());
final List<ResourceClaim> swappedOutClaims = summary.getResourceClaims();
assertNotNull(swappedOutClaims);
assertEquals(1, swappedOutClaims.size());
assertEquals(claim2.getResourceClaim(), swappedOutClaims.get(0));
}
use of org.apache.nifi.controller.queue.FlowFileQueue in project nifi by apache.
the class TestWriteAheadFlowFileRepository method testUpdatePerformance.
@Test
@Ignore("Intended only for local performance testing before/after making changes")
public void testUpdatePerformance() throws IOException, InterruptedException {
final FlowFileQueue queue = new FlowFileQueue() {
@Override
public String getIdentifier() {
return "4444";
}
@Override
public List<FlowFilePrioritizer> getPriorities() {
return null;
}
@Override
public SwapSummary recoverSwappedFlowFiles() {
return null;
}
@Override
public void purgeSwapFiles() {
}
@Override
public int getSwapFileCount() {
return 0;
}
@Override
public void setPriorities(List<FlowFilePrioritizer> newPriorities) {
}
@Override
public void setBackPressureObjectThreshold(long maxQueueSize) {
}
@Override
public long getBackPressureObjectThreshold() {
return 0;
}
@Override
public void setBackPressureDataSizeThreshold(String maxDataSize) {
}
@Override
public String getBackPressureDataSizeThreshold() {
return null;
}
@Override
public QueueSize size() {
return null;
}
@Override
public boolean isEmpty() {
return false;
}
@Override
public boolean isActiveQueueEmpty() {
return false;
}
@Override
public QueueSize getUnacknowledgedQueueSize() {
return null;
}
@Override
public QueueSize getActiveQueueSize() {
return size();
}
@Override
public QueueSize getSwapQueueSize() {
return null;
}
@Override
public void acknowledge(FlowFileRecord flowFile) {
}
@Override
public void acknowledge(Collection<FlowFileRecord> flowFiles) {
}
@Override
public boolean isAllActiveFlowFilesPenalized() {
return false;
}
@Override
public boolean isAnyActiveFlowFilePenalized() {
return false;
}
@Override
public boolean isFull() {
return false;
}
@Override
public void put(FlowFileRecord file) {
}
@Override
public void putAll(Collection<FlowFileRecord> files) {
}
@Override
public FlowFileRecord poll(Set<FlowFileRecord> expiredRecords) {
return null;
}
@Override
public List<FlowFileRecord> poll(int maxResults, Set<FlowFileRecord> expiredRecords) {
return null;
}
@Override
public long drainQueue(Queue<FlowFileRecord> sourceQueue, List<FlowFileRecord> destination, int maxResults, Set<FlowFileRecord> expiredRecords) {
return 0;
}
@Override
public List<FlowFileRecord> poll(FlowFileFilter filter, Set<FlowFileRecord> expiredRecords) {
return null;
}
@Override
public String getFlowFileExpiration() {
return null;
}
@Override
public int getFlowFileExpiration(TimeUnit timeUnit) {
return 0;
}
@Override
public void setFlowFileExpiration(String flowExpirationPeriod) {
}
@Override
public DropFlowFileStatus dropFlowFiles(String requestIdentifier, String requestor) {
return null;
}
@Override
public DropFlowFileStatus getDropFlowFileStatus(String requestIdentifier) {
return null;
}
@Override
public DropFlowFileStatus cancelDropFlowFileRequest(String requestIdentifier) {
return null;
}
@Override
public ListFlowFileStatus listFlowFiles(String requestIdentifier, int maxResults) {
return null;
}
@Override
public ListFlowFileStatus getListFlowFileStatus(String requestIdentifier) {
return null;
}
@Override
public ListFlowFileStatus cancelListFlowFileRequest(String requestIdentifier) {
return null;
}
@Override
public FlowFileRecord getFlowFile(String flowFileUuid) throws IOException {
return null;
}
@Override
public void verifyCanList() throws IllegalStateException {
}
};
final int numPartitions = 16;
final int numThreads = 8;
final int totalUpdates = 160_000_000;
final int batchSize = 10;
final Path path = Paths.get("target/minimal-locking-repo");
deleteRecursively(path.toFile());
assertTrue(path.toFile().mkdirs());
final ResourceClaimManager claimManager = new StandardResourceClaimManager();
final RepositoryRecordSerdeFactory serdeFactory = new RepositoryRecordSerdeFactory(claimManager);
final WriteAheadRepository<RepositoryRecord> repo = new MinimalLockingWriteAheadLog<>(path, numPartitions, serdeFactory, null);
final Collection<RepositoryRecord> initialRecs = repo.recoverRecords();
assertTrue(initialRecs.isEmpty());
final int updateCountPerThread = totalUpdates / numThreads;
final Thread[] threads = new Thread[numThreads];
for (int j = 0; j < 2; j++) {
for (int i = 0; i < numThreads; i++) {
final Thread t = new Thread(new Runnable() {
@Override
public void run() {
final List<RepositoryRecord> records = new ArrayList<>();
final int numBatches = updateCountPerThread / batchSize;
final MockFlowFile baseFlowFile = new MockFlowFile(0L);
for (int i = 0; i < numBatches; i++) {
records.clear();
for (int k = 0; k < batchSize; k++) {
final FlowFileRecord flowFile = new MockFlowFile(i % 100_000, baseFlowFile);
final String uuid = flowFile.getAttribute("uuid");
final StandardRepositoryRecord record = new StandardRepositoryRecord(null, flowFile);
record.setDestination(queue);
final Map<String, String> updatedAttrs = Collections.singletonMap("uuid", uuid);
record.setWorking(flowFile, updatedAttrs);
records.add(record);
}
try {
repo.update(records, false);
} catch (IOException e) {
e.printStackTrace();
Assert.fail(e.toString());
}
}
}
});
t.setDaemon(true);
threads[i] = t;
}
final long start = System.nanoTime();
for (final Thread t : threads) {
t.start();
}
for (final Thread t : threads) {
t.join();
}
final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
if (j == 0) {
System.out.println(millis + " ms to insert " + updateCountPerThread * numThreads + " updates using " + numPartitions + " partitions and " + numThreads + " threads, *as a warmup!*");
} else {
System.out.println(millis + " ms to insert " + updateCountPerThread * numThreads + " updates using " + numPartitions + " partitions and " + numThreads + " threads");
}
}
}
use of org.apache.nifi.controller.queue.FlowFileQueue in project nifi by apache.
the class TestSchemaSwapSerializerDeserializer method testRoundTripSerializeDeserializeFullSwapFile.
@Test
public void testRoundTripSerializeDeserializeFullSwapFile() throws IOException, InterruptedException {
final ResourceClaimManager resourceClaimManager = new StandardResourceClaimManager();
final List<FlowFileRecord> toSwap = new ArrayList<>(10000);
final Map<String, String> attrs = new HashMap<>();
long size = 0L;
for (int i = 0; i < 10000; i++) {
attrs.put("i", String.valueOf(i));
final FlowFileRecord ff = new MockFlowFile(attrs, i, resourceClaimManager);
toSwap.add(ff);
size += i;
}
final FlowFileQueue flowFileQueue = Mockito.mock(FlowFileQueue.class);
Mockito.when(flowFileQueue.getIdentifier()).thenReturn("87bb99fe-412c-49f6-a441-d1b0af4e20b4");
final String swapLocation = "target/testRoundTrip.swap";
final File swapFile = new File(swapLocation);
Files.deleteIfExists(swapFile.toPath());
final SwapSerializer serializer = new SchemaSwapSerializer();
try (final OutputStream fos = new FileOutputStream(swapFile);
final OutputStream out = new BufferedOutputStream(fos)) {
serializer.serializeFlowFiles(toSwap, flowFileQueue, swapLocation, out);
}
final SwapContents contents;
final SwapDeserializer deserializer = new SchemaSwapDeserializer();
try (final FileInputStream fis = new FileInputStream(swapFile);
final InputStream bufferedIn = new BufferedInputStream(fis);
final DataInputStream dis = new DataInputStream(bufferedIn)) {
contents = deserializer.deserializeFlowFiles(dis, swapLocation, flowFileQueue, resourceClaimManager);
}
final SwapSummary swapSummary = contents.getSummary();
assertEquals(10000, swapSummary.getQueueSize().getObjectCount());
assertEquals(size, swapSummary.getQueueSize().getByteCount());
assertEquals(9999, swapSummary.getMaxFlowFileId().intValue());
assertEquals(10000, contents.getFlowFiles().size());
int counter = 0;
for (final FlowFileRecord flowFile : contents.getFlowFiles()) {
final int i = counter++;
assertEquals(String.valueOf(i), flowFile.getAttribute("i"));
assertEquals(i, flowFile.getSize());
}
}
use of org.apache.nifi.controller.queue.FlowFileQueue in project nifi by apache.
the class TestSchemaSwapSerializerDeserializer method testWritePerformance.
@Test
@Ignore("For manual testing, in order to ensure that changes do not negatively impact performance")
public void testWritePerformance() throws IOException, InterruptedException {
final ResourceClaimManager resourceClaimManager = new StandardResourceClaimManager();
final List<FlowFileRecord> toSwap = new ArrayList<>(10000);
final Map<String, String> attrs = new HashMap<>();
for (int i = 0; i < 10000; i++) {
attrs.put("i", String.valueOf(i));
final FlowFileRecord ff = new MockFlowFile(attrs, i, resourceClaimManager);
toSwap.add(ff);
}
final FlowFileQueue flowFileQueue = Mockito.mock(FlowFileQueue.class);
Mockito.when(flowFileQueue.getIdentifier()).thenReturn("87bb99fe-412c-49f6-a441-d1b0af4e20b4");
final String swapLocation = "target/testRoundTrip.swap";
final int iterations = 1000;
final long start = System.nanoTime();
final SwapSerializer serializer = new SchemaSwapSerializer();
for (int i = 0; i < iterations; i++) {
try (final OutputStream out = new NullOutputStream()) {
serializer.serializeFlowFiles(toSwap, flowFileQueue, swapLocation, out);
}
}
final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
System.out.println("Wrote " + iterations + " Swap Files in " + millis + " millis");
}
Aggregations