use of org.apache.nifi.controller.queue.QueueSize in project nifi by apache.
the class DtoFactory method createDropRequestDTO.
/**
* Creates a DropRequestDTO from the specified flow file status.
*
* @param dropRequest dropRequest
* @return dto
*/
public DropRequestDTO createDropRequestDTO(final DropFlowFileStatus dropRequest) {
final DropRequestDTO dto = new DropRequestDTO();
dto.setId(dropRequest.getRequestIdentifier());
dto.setSubmissionTime(new Date(dropRequest.getRequestSubmissionTime()));
dto.setLastUpdated(new Date(dropRequest.getLastUpdated()));
dto.setState(dropRequest.getState().toString());
dto.setFailureReason(dropRequest.getFailureReason());
dto.setFinished(isDropRequestComplete(dropRequest.getState()));
final QueueSize dropped = dropRequest.getDroppedSize();
dto.setDroppedCount(dropped.getObjectCount());
dto.setDroppedSize(dropped.getByteCount());
dto.setDropped(FormatUtils.formatCount(dropped.getObjectCount()) + " / " + FormatUtils.formatDataSize(dropped.getByteCount()));
final QueueSize current = dropRequest.getCurrentSize();
dto.setCurrentCount(current.getObjectCount());
dto.setCurrentSize(current.getByteCount());
dto.setCurrent(FormatUtils.formatCount(current.getObjectCount()) + " / " + FormatUtils.formatDataSize(current.getByteCount()));
final QueueSize original = dropRequest.getOriginalSize();
dto.setOriginalCount(original.getObjectCount());
dto.setOriginalSize(original.getByteCount());
dto.setOriginal(FormatUtils.formatCount(original.getObjectCount()) + " / " + FormatUtils.formatDataSize(original.getByteCount()));
if (isDropRequestComplete(dropRequest.getState())) {
dto.setPercentCompleted(100);
} else {
dto.setPercentCompleted((dropped.getObjectCount() * 100) / original.getObjectCount());
}
return dto;
}
use of org.apache.nifi.controller.queue.QueueSize in project nifi by apache.
the class ControllerFacade method getControllerStatus.
/**
* Gets the status of this controller.
*
* @return the status of this controller
*/
public ControllerStatusDTO getControllerStatus() {
final ProcessGroup rootGroup = flowController.getGroup(flowController.getRootGroupId());
final QueueSize controllerQueueSize = flowController.getTotalFlowFileCount(rootGroup);
final ControllerStatusDTO controllerStatus = new ControllerStatusDTO();
controllerStatus.setActiveThreadCount(flowController.getActiveThreadCount());
controllerStatus.setQueued(FormatUtils.formatCount(controllerQueueSize.getObjectCount()) + " / " + FormatUtils.formatDataSize(controllerQueueSize.getByteCount()));
controllerStatus.setBytesQueued(controllerQueueSize.getByteCount());
controllerStatus.setFlowFilesQueued(controllerQueueSize.getObjectCount());
final ProcessGroupCounts counts = rootGroup.getCounts();
controllerStatus.setRunningCount(counts.getRunningCount());
controllerStatus.setStoppedCount(counts.getStoppedCount());
controllerStatus.setInvalidCount(counts.getInvalidCount());
controllerStatus.setDisabledCount(counts.getDisabledCount());
controllerStatus.setActiveRemotePortCount(counts.getActiveRemotePortCount());
controllerStatus.setInactiveRemotePortCount(counts.getInactiveRemotePortCount());
controllerStatus.setUpToDateCount(counts.getUpToDateCount());
controllerStatus.setLocallyModifiedCount(counts.getLocallyModifiedCount());
controllerStatus.setStaleCount(counts.getStaleCount());
controllerStatus.setLocallyModifiedAndStaleCount(counts.getLocallyModifiedAndStaleCount());
controllerStatus.setSyncFailureCount(counts.getSyncFailureCount());
return controllerStatus;
}
use of org.apache.nifi.controller.queue.QueueSize in project nifi by apache.
the class TestWriteAheadFlowFileRepository method testResourceClaimsIncremented.
@Test
public void testResourceClaimsIncremented() throws IOException {
final ResourceClaimManager claimManager = new StandardResourceClaimManager();
final TestQueueProvider queueProvider = new TestQueueProvider();
final Connection connection = Mockito.mock(Connection.class);
when(connection.getIdentifier()).thenReturn("1234");
when(connection.getDestination()).thenReturn(Mockito.mock(Connectable.class));
final FlowFileSwapManager swapMgr = new MockFlowFileSwapManager();
final FlowFileQueue queue = new StandardFlowFileQueue("1234", connection, null, null, claimManager, null, swapMgr, null, 10000);
when(connection.getFlowFileQueue()).thenReturn(queue);
queueProvider.addConnection(connection);
final ResourceClaim resourceClaim1 = claimManager.newResourceClaim("container", "section", "1", false, false);
final ContentClaim claim1 = new StandardContentClaim(resourceClaim1, 0L);
final ResourceClaim resourceClaim2 = claimManager.newResourceClaim("container", "section", "2", false, false);
final ContentClaim claim2 = new StandardContentClaim(resourceClaim2, 0L);
// resource claims' counts should be updated for both the swapped out FlowFile and the non-swapped out FlowFile
try (final WriteAheadFlowFileRepository repo = new WriteAheadFlowFileRepository(NiFiProperties.createBasicNiFiProperties(null, null))) {
repo.initialize(claimManager);
repo.loadFlowFiles(queueProvider, -1L);
// Create a Repository Record that indicates that a FlowFile was created
final FlowFileRecord flowFile1 = new StandardFlowFileRecord.Builder().id(1L).addAttribute("uuid", "11111111-1111-1111-1111-111111111111").contentClaim(claim1).build();
final StandardRepositoryRecord rec1 = new StandardRepositoryRecord(queue);
rec1.setWorking(flowFile1);
rec1.setDestination(queue);
// Create a Record that we can swap out
final FlowFileRecord flowFile2 = new StandardFlowFileRecord.Builder().id(2L).addAttribute("uuid", "11111111-1111-1111-1111-111111111112").contentClaim(claim2).build();
final StandardRepositoryRecord rec2 = new StandardRepositoryRecord(queue);
rec2.setWorking(flowFile2);
rec2.setDestination(queue);
final List<RepositoryRecord> records = new ArrayList<>();
records.add(rec1);
records.add(rec2);
repo.updateRepository(records);
final String swapLocation = swapMgr.swapOut(Collections.singletonList(flowFile2), queue);
repo.swapFlowFilesOut(Collections.singletonList(flowFile2), queue, swapLocation);
}
final ResourceClaimManager recoveryClaimManager = new StandardResourceClaimManager();
try (final WriteAheadFlowFileRepository repo = new WriteAheadFlowFileRepository(NiFiProperties.createBasicNiFiProperties(null, null))) {
repo.initialize(recoveryClaimManager);
final long largestId = repo.loadFlowFiles(queueProvider, 0L);
// largest ID known is 1 because this doesn't take into account the FlowFiles that have been swapped out
assertEquals(1, largestId);
}
// resource claim 1 will have a single claimant count while resource claim 2 will have no claimant counts
// because resource claim 2 is referenced only by flowfiles that are swapped out.
assertEquals(1, recoveryClaimManager.getClaimantCount(resourceClaim1));
assertEquals(0, recoveryClaimManager.getClaimantCount(resourceClaim2));
final SwapSummary summary = queue.recoverSwappedFlowFiles();
assertNotNull(summary);
assertEquals(2, summary.getMaxFlowFileId().intValue());
assertEquals(new QueueSize(1, 0L), summary.getQueueSize());
final List<ResourceClaim> swappedOutClaims = summary.getResourceClaims();
assertNotNull(swappedOutClaims);
assertEquals(1, swappedOutClaims.size());
assertEquals(claim2.getResourceClaim(), swappedOutClaims.get(0));
}
use of org.apache.nifi.controller.queue.QueueSize in project nifi by apache.
the class StandardFlowFileQueue method isFull.
@Override
public boolean isFull() {
final MaxQueueSize maxSize = maxQueueSize.get();
// Check if max size is set
if (maxSize.getMaxBytes() <= 0 && maxSize.getMaxCount() <= 0) {
return false;
}
final QueueSize queueSize = getQueueSize();
if (maxSize.getMaxCount() > 0 && queueSize.getObjectCount() >= maxSize.getMaxCount()) {
return true;
}
if (maxSize.getMaxBytes() > 0 && queueSize.getByteCount() >= maxSize.getMaxBytes()) {
return true;
}
return false;
}
use of org.apache.nifi.controller.queue.QueueSize in project nifi by apache.
the class StandardFlowFileQueue method recoverSwappedFlowFiles.
@Override
public SwapSummary recoverSwappedFlowFiles() {
int swapFlowFileCount = 0;
long swapByteCount = 0L;
Long maxId = null;
List<ResourceClaim> resourceClaims = new ArrayList<>();
final long startNanos = System.nanoTime();
writeLock.lock();
try {
final List<String> swapLocations;
try {
swapLocations = swapManager.recoverSwapLocations(this);
} catch (final IOException ioe) {
logger.error("Failed to determine whether or not any Swap Files exist for FlowFile Queue {}", getIdentifier());
logger.error("", ioe);
if (eventReporter != null) {
eventReporter.reportEvent(Severity.ERROR, "FlowFile Swapping", "Failed to determine whether or not any Swap Files exist for FlowFile Queue " + getIdentifier() + "; see logs for more detials");
}
return null;
}
for (final String swapLocation : swapLocations) {
try {
final SwapSummary summary = swapManager.getSwapSummary(swapLocation);
final QueueSize queueSize = summary.getQueueSize();
final Long maxSwapRecordId = summary.getMaxFlowFileId();
if (maxSwapRecordId != null) {
if (maxId == null || maxSwapRecordId > maxId) {
maxId = maxSwapRecordId;
}
}
swapFlowFileCount += queueSize.getObjectCount();
swapByteCount += queueSize.getByteCount();
resourceClaims.addAll(summary.getResourceClaims());
} catch (final IOException ioe) {
logger.error("Failed to recover FlowFiles from Swap File {}; the file appears to be corrupt", swapLocation, ioe.toString());
logger.error("", ioe);
if (eventReporter != null) {
eventReporter.reportEvent(Severity.ERROR, "FlowFile Swapping", "Failed to recover FlowFiles from Swap File " + swapLocation + "; the file appears to be corrupt. See logs for more details");
}
}
}
incrementSwapQueueSize(swapFlowFileCount, swapByteCount, swapLocations.size());
this.swapLocations.addAll(swapLocations);
} finally {
writeLock.unlock("Recover Swap Files");
}
if (!swapLocations.isEmpty()) {
final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
logger.info("Recovered {} swap files for {} in {} millis", swapLocations.size(), this, millis);
}
return new StandardSwapSummary(new QueueSize(swapFlowFileCount, swapByteCount), maxId, resourceClaims);
}
Aggregations