use of org.apache.nifi.controller.repository.claim.ResourceClaim in project nifi by apache.
the class DtoFactory method createFlowFileDTO.
/**
* Creates a FlowFileDTO from the specified FlowFileRecord.
*
* @param record record
* @return dto
*/
public FlowFileDTO createFlowFileDTO(final FlowFileRecord record) {
final Date now = new Date();
final FlowFileDTO dto = new FlowFileDTO();
dto.setUuid(record.getAttribute(CoreAttributes.UUID.key()));
dto.setFilename(record.getAttribute(CoreAttributes.FILENAME.key()));
dto.setPenalized(record.isPenalized());
dto.setSize(record.getSize());
dto.setAttributes(record.getAttributes());
final long queuedDuration = now.getTime() - record.getLastQueueDate();
dto.setQueuedDuration(queuedDuration);
final long age = now.getTime() - record.getLineageStartDate();
dto.setLineageDuration(age);
final ContentClaim contentClaim = record.getContentClaim();
if (contentClaim != null) {
final ResourceClaim resourceClaim = contentClaim.getResourceClaim();
dto.setContentClaimSection(resourceClaim.getSection());
dto.setContentClaimContainer(resourceClaim.getContainer());
dto.setContentClaimIdentifier(resourceClaim.getId());
dto.setContentClaimOffset(contentClaim.getOffset() + record.getContentClaimOffset());
dto.setContentClaimFileSizeBytes(record.getSize());
dto.setContentClaimFileSize(FormatUtils.formatDataSize(record.getSize()));
}
return dto;
}
use of org.apache.nifi.controller.repository.claim.ResourceClaim in project nifi by apache.
the class TestVolatileContentRepository method testRedirects.
@Test
public void testRedirects() throws IOException {
System.setProperty(NiFiProperties.PROPERTIES_FILE_PATH, TestVolatileContentRepository.class.getResource("/conf/nifi.properties").getFile());
final Map<String, String> addProps = new HashMap<>();
addProps.put(VolatileContentRepository.MAX_SIZE_PROPERTY, "10 MB");
final NiFiProperties nifiProps = NiFiProperties.createBasicNiFiProperties(null, addProps);
final VolatileContentRepository contentRepo = new VolatileContentRepository(nifiProps);
contentRepo.initialize(claimManager);
final ContentClaim claim = contentRepo.create(true);
final OutputStream out = contentRepo.write(claim);
final byte[] oneK = new byte[1024];
Arrays.fill(oneK, (byte) 55);
// Write 10 MB to the repo
for (int i = 0; i < 10240; i++) {
out.write(oneK);
}
try {
out.write(1);
Assert.fail("Expected to be out of space on content repo");
} catch (final IOException e) {
}
try {
out.write(1);
Assert.fail("Expected to be out of space on content repo");
} catch (final IOException e) {
}
final ContentRepository mockRepo = Mockito.mock(ContentRepository.class);
contentRepo.setBackupRepository(mockRepo);
final ResourceClaim resourceClaim = claimManager.newResourceClaim("container", "section", "1000", true, false);
final ContentClaim contentClaim = new StandardContentClaim(resourceClaim, 0L);
Mockito.when(mockRepo.create(Matchers.anyBoolean())).thenReturn(contentClaim);
final ByteArrayOutputStream overflowStream = new ByteArrayOutputStream();
Mockito.when(mockRepo.write(Matchers.any(ContentClaim.class))).thenReturn(overflowStream);
out.write(10);
assertEquals(1024 * 1024 * 10 + 1, overflowStream.size());
final byte[] overflowBuffer = overflowStream.toByteArray();
assertEquals(55, overflowBuffer[0]);
for (int i = 0; i < overflowBuffer.length; i++) {
if (i == overflowBuffer.length - 1) {
assertEquals(10, overflowBuffer[i]);
} else {
assertEquals(55, overflowBuffer[i]);
}
}
}
use of org.apache.nifi.controller.repository.claim.ResourceClaim in project nifi by apache.
the class TestWriteAheadFlowFileRepository method testResourceClaimsIncremented.
@Test
public void testResourceClaimsIncremented() throws IOException {
final ResourceClaimManager claimManager = new StandardResourceClaimManager();
final TestQueueProvider queueProvider = new TestQueueProvider();
final Connection connection = Mockito.mock(Connection.class);
when(connection.getIdentifier()).thenReturn("1234");
when(connection.getDestination()).thenReturn(Mockito.mock(Connectable.class));
final FlowFileSwapManager swapMgr = new MockFlowFileSwapManager();
final FlowFileQueue queue = new StandardFlowFileQueue("1234", connection, null, null, claimManager, null, swapMgr, null, 10000);
when(connection.getFlowFileQueue()).thenReturn(queue);
queueProvider.addConnection(connection);
final ResourceClaim resourceClaim1 = claimManager.newResourceClaim("container", "section", "1", false, false);
final ContentClaim claim1 = new StandardContentClaim(resourceClaim1, 0L);
final ResourceClaim resourceClaim2 = claimManager.newResourceClaim("container", "section", "2", false, false);
final ContentClaim claim2 = new StandardContentClaim(resourceClaim2, 0L);
// resource claims' counts should be updated for both the swapped out FlowFile and the non-swapped out FlowFile
try (final WriteAheadFlowFileRepository repo = new WriteAheadFlowFileRepository(NiFiProperties.createBasicNiFiProperties(null, null))) {
repo.initialize(claimManager);
repo.loadFlowFiles(queueProvider, -1L);
// Create a Repository Record that indicates that a FlowFile was created
final FlowFileRecord flowFile1 = new StandardFlowFileRecord.Builder().id(1L).addAttribute("uuid", "11111111-1111-1111-1111-111111111111").contentClaim(claim1).build();
final StandardRepositoryRecord rec1 = new StandardRepositoryRecord(queue);
rec1.setWorking(flowFile1);
rec1.setDestination(queue);
// Create a Record that we can swap out
final FlowFileRecord flowFile2 = new StandardFlowFileRecord.Builder().id(2L).addAttribute("uuid", "11111111-1111-1111-1111-111111111112").contentClaim(claim2).build();
final StandardRepositoryRecord rec2 = new StandardRepositoryRecord(queue);
rec2.setWorking(flowFile2);
rec2.setDestination(queue);
final List<RepositoryRecord> records = new ArrayList<>();
records.add(rec1);
records.add(rec2);
repo.updateRepository(records);
final String swapLocation = swapMgr.swapOut(Collections.singletonList(flowFile2), queue);
repo.swapFlowFilesOut(Collections.singletonList(flowFile2), queue, swapLocation);
}
final ResourceClaimManager recoveryClaimManager = new StandardResourceClaimManager();
try (final WriteAheadFlowFileRepository repo = new WriteAheadFlowFileRepository(NiFiProperties.createBasicNiFiProperties(null, null))) {
repo.initialize(recoveryClaimManager);
final long largestId = repo.loadFlowFiles(queueProvider, 0L);
// largest ID known is 1 because this doesn't take into account the FlowFiles that have been swapped out
assertEquals(1, largestId);
}
// resource claim 1 will have a single claimant count while resource claim 2 will have no claimant counts
// because resource claim 2 is referenced only by flowfiles that are swapped out.
assertEquals(1, recoveryClaimManager.getClaimantCount(resourceClaim1));
assertEquals(0, recoveryClaimManager.getClaimantCount(resourceClaim2));
final SwapSummary summary = queue.recoverSwappedFlowFiles();
assertNotNull(summary);
assertEquals(2, summary.getMaxFlowFileId().intValue());
assertEquals(new QueueSize(1, 0L), summary.getQueueSize());
final List<ResourceClaim> swappedOutClaims = summary.getResourceClaims();
assertNotNull(swappedOutClaims);
assertEquals(1, swappedOutClaims.size());
assertEquals(claim2.getResourceClaim(), swappedOutClaims.get(0));
}
use of org.apache.nifi.controller.repository.claim.ResourceClaim in project nifi by apache.
the class MockFlowFile method createContentClaim.
public static ContentClaim createContentClaim(final String id, final ResourceClaimManager claimManager) {
final ResourceClaim resourceClaim = claimManager.newResourceClaim("container", "section", id, false, false);
claimManager.incrementClaimantCount(resourceClaim);
return new StandardContentClaim(resourceClaim, 3L);
}
use of org.apache.nifi.controller.repository.claim.ResourceClaim in project nifi by apache.
the class StandardFlowFileQueue method recoverSwappedFlowFiles.
@Override
public SwapSummary recoverSwappedFlowFiles() {
int swapFlowFileCount = 0;
long swapByteCount = 0L;
Long maxId = null;
List<ResourceClaim> resourceClaims = new ArrayList<>();
final long startNanos = System.nanoTime();
writeLock.lock();
try {
final List<String> swapLocations;
try {
swapLocations = swapManager.recoverSwapLocations(this);
} catch (final IOException ioe) {
logger.error("Failed to determine whether or not any Swap Files exist for FlowFile Queue {}", getIdentifier());
logger.error("", ioe);
if (eventReporter != null) {
eventReporter.reportEvent(Severity.ERROR, "FlowFile Swapping", "Failed to determine whether or not any Swap Files exist for FlowFile Queue " + getIdentifier() + "; see logs for more detials");
}
return null;
}
for (final String swapLocation : swapLocations) {
try {
final SwapSummary summary = swapManager.getSwapSummary(swapLocation);
final QueueSize queueSize = summary.getQueueSize();
final Long maxSwapRecordId = summary.getMaxFlowFileId();
if (maxSwapRecordId != null) {
if (maxId == null || maxSwapRecordId > maxId) {
maxId = maxSwapRecordId;
}
}
swapFlowFileCount += queueSize.getObjectCount();
swapByteCount += queueSize.getByteCount();
resourceClaims.addAll(summary.getResourceClaims());
} catch (final IOException ioe) {
logger.error("Failed to recover FlowFiles from Swap File {}; the file appears to be corrupt", swapLocation, ioe.toString());
logger.error("", ioe);
if (eventReporter != null) {
eventReporter.reportEvent(Severity.ERROR, "FlowFile Swapping", "Failed to recover FlowFiles from Swap File " + swapLocation + "; the file appears to be corrupt. See logs for more details");
}
}
}
incrementSwapQueueSize(swapFlowFileCount, swapByteCount, swapLocations.size());
this.swapLocations.addAll(swapLocations);
} finally {
writeLock.unlock("Recover Swap Files");
}
if (!swapLocations.isEmpty()) {
final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
logger.info("Recovered {} swap files for {} in {} millis", swapLocations.size(), this, millis);
}
return new StandardSwapSummary(new QueueSize(swapFlowFileCount, swapByteCount), maxId, resourceClaims);
}
Aggregations