use of org.apache.nifi.controller.repository.claim.ContentClaim in project nifi by apache.
the class WriteAheadRepositoryRecordSerde method serializeEdit.
public void serializeEdit(final RepositoryRecord previousRecordState, final RepositoryRecord record, final DataOutputStream out, final boolean forceAttributesWritten) throws IOException {
if (record.isMarkedForAbort()) {
logger.warn("Repository Record {} is marked to be aborted; it will be persisted in the FlowFileRepository as a DELETE record", record);
out.write(ACTION_DELETE);
out.writeLong(getRecordIdentifier(record));
serializeContentClaim(record.getCurrentClaim(), record.getCurrentClaimOffset(), out);
return;
}
final UpdateType updateType = getUpdateType(record);
if (updateType.equals(UpdateType.DELETE)) {
out.write(ACTION_DELETE);
out.writeLong(getRecordIdentifier(record));
serializeContentClaim(record.getCurrentClaim(), record.getCurrentClaimOffset(), out);
return;
}
// If there's a Destination Connection, that's the one that we want to associated with this record.
// However, on restart, we will restore the FlowFile and set this connection to its "originalConnection".
// If we then serialize the FlowFile again before it's transferred, it's important to allow this to happen,
// so we use the originalConnection instead
FlowFileQueue associatedQueue = record.getDestination();
if (associatedQueue == null) {
associatedQueue = record.getOriginalQueue();
}
if (updateType.equals(UpdateType.SWAP_OUT)) {
out.write(ACTION_SWAPPED_OUT);
out.writeLong(getRecordIdentifier(record));
out.writeUTF(associatedQueue.getIdentifier());
out.writeUTF(getLocation(record));
return;
}
final FlowFile flowFile = record.getCurrent();
final ContentClaim claim = record.getCurrentClaim();
switch(updateType) {
case UPDATE:
out.write(ACTION_UPDATE);
break;
case CREATE:
out.write(ACTION_CREATE);
break;
case SWAP_IN:
out.write(ACTION_SWAPPED_IN);
break;
default:
throw new AssertionError();
}
out.writeLong(getRecordIdentifier(record));
out.writeLong(flowFile.getEntryDate());
out.writeLong(flowFile.getLineageStartDate());
out.writeLong(flowFile.getLineageStartIndex());
final Long queueDate = flowFile.getLastQueueDate();
out.writeLong(queueDate == null ? System.currentTimeMillis() : queueDate);
out.writeLong(flowFile.getQueueDateIndex());
out.writeLong(flowFile.getSize());
if (associatedQueue == null) {
logger.warn("{} Repository Record {} has no Connection associated with it; it will be destroyed on restart", new Object[] { this, record });
writeString("", out);
} else {
writeString(associatedQueue.getIdentifier(), out);
}
serializeContentClaim(claim, record.getCurrentClaimOffset(), out);
if (forceAttributesWritten || record.isAttributesChanged() || updateType == UpdateType.CREATE || updateType == UpdateType.SWAP_IN) {
// indicate attributes changed
out.write(1);
final Map<String, String> attributes = flowFile.getAttributes();
out.writeInt(attributes.size());
for (final Map.Entry<String, String> entry : attributes.entrySet()) {
writeString(entry.getKey(), out);
writeString(entry.getValue(), out);
}
} else {
// indicate attributes did not change
out.write(0);
}
if (updateType == UpdateType.SWAP_IN) {
out.writeUTF(record.getSwapLocation());
}
}
use of org.apache.nifi.controller.repository.claim.ContentClaim in project nifi by apache.
the class FlowFileRecordFieldMap method getFlowFile.
@SuppressWarnings("unchecked")
public static FlowFileRecord getFlowFile(final Record record, final ResourceClaimManager claimManager) {
final StandardFlowFileRecord.Builder builder = new StandardFlowFileRecord.Builder();
builder.id((Long) record.getFieldValue(FlowFileSchema.RECORD_ID));
builder.entryDate((Long) record.getFieldValue(FlowFileSchema.ENTRY_DATE));
builder.size((Long) record.getFieldValue(FlowFileSchema.FLOWFILE_SIZE));
builder.addAttributes((Map<String, String>) record.getFieldValue(FlowFileSchema.ATTRIBUTES));
builder.lineageStart((Long) record.getFieldValue(FlowFileSchema.LINEAGE_START_DATE), (Long) record.getFieldValue(FlowFileSchema.LINEAGE_START_INDEX));
builder.lastQueued((Long) record.getFieldValue(FlowFileSchema.QUEUE_DATE), (Long) record.getFieldValue(FlowFileSchema.QUEUE_DATE_INDEX));
final Record contentClaimRecord = (Record) record.getFieldValue(FlowFileSchema.CONTENT_CLAIM);
if (contentClaimRecord != null) {
final ContentClaim claim = ContentClaimFieldMap.getContentClaim(contentClaimRecord, claimManager);
builder.contentClaim(claim);
final Long offset = ContentClaimFieldMap.getContentClaimOffset(contentClaimRecord);
if (offset != null) {
builder.contentClaimOffset(offset);
}
}
return builder.build();
}
use of org.apache.nifi.controller.repository.claim.ContentClaim in project nifi by apache.
the class FlowController method getReplayFailureReason.
private String getReplayFailureReason(final ProvenanceEventRecord event) {
// Check that the event is a valid type.
final ProvenanceEventType type = event.getEventType();
if (type == ProvenanceEventType.JOIN) {
return "Cannot replay events that are created from multiple parents";
}
// Make sure event has the Content Claim info
final Long contentSize = event.getPreviousFileSize();
final String contentClaimId = event.getPreviousContentClaimIdentifier();
final String contentClaimSection = event.getPreviousContentClaimSection();
final String contentClaimContainer = event.getPreviousContentClaimContainer();
if (contentSize == null || contentClaimId == null || contentClaimSection == null || contentClaimContainer == null) {
return "Cannot replay data from Provenance Event because the event does not contain the required Content Claim";
}
try {
final ResourceClaim resourceClaim = resourceClaimManager.newResourceClaim(contentClaimContainer, contentClaimSection, contentClaimId, false, false);
final ContentClaim contentClaim = new StandardContentClaim(resourceClaim, event.getPreviousContentClaimOffset());
if (!contentRepository.isAccessible(contentClaim)) {
return "Content is no longer available in Content Repository";
}
} catch (final IOException ioe) {
return "Failed to determine whether or not content was available in Content Repository due to " + ioe.toString();
}
// Make sure that the source queue exists
if (event.getSourceQueueIdentifier() == null) {
return "Cannot replay data from Provenance Event because the event does not specify the Source FlowFile Queue";
}
final List<Connection> connections = getGroup(getRootGroupId()).findAllConnections();
FlowFileQueue queue = null;
for (final Connection connection : connections) {
if (event.getSourceQueueIdentifier().equals(connection.getIdentifier())) {
queue = connection.getFlowFileQueue();
break;
}
}
if (queue == null) {
return "Cannot replay data from Provenance Event because the Source FlowFile Queue with ID " + event.getSourceQueueIdentifier() + " no longer exists";
}
return null;
}
use of org.apache.nifi.controller.repository.claim.ContentClaim in project nifi by apache.
the class FlowController method getContent.
public InputStream getContent(final FlowFileRecord flowFile, final String requestor, final String requestUri) throws IOException {
requireNonNull(flowFile);
requireNonNull(requestor);
requireNonNull(requestUri);
InputStream stream;
final ResourceClaim resourceClaim;
final ContentClaim contentClaim = flowFile.getContentClaim();
if (contentClaim == null) {
resourceClaim = null;
stream = new ByteArrayInputStream(new byte[0]);
} else {
resourceClaim = flowFile.getContentClaim().getResourceClaim();
stream = contentRepository.read(flowFile.getContentClaim());
final long contentClaimOffset = flowFile.getContentClaimOffset();
if (contentClaimOffset > 0L) {
StreamUtils.skip(stream, contentClaimOffset);
}
stream = new LimitingInputStream(stream, flowFile.getSize());
}
// Register a Provenance Event to indicate that we replayed the data.
final StandardProvenanceEventRecord.Builder sendEventBuilder = new StandardProvenanceEventRecord.Builder().setEventType(ProvenanceEventType.DOWNLOAD).setFlowFileUUID(flowFile.getAttribute(CoreAttributes.UUID.key())).setAttributes(flowFile.getAttributes(), Collections.emptyMap()).setTransitUri(requestUri).setEventTime(System.currentTimeMillis()).setFlowFileEntryDate(flowFile.getEntryDate()).setLineageStartDate(flowFile.getLineageStartDate()).setComponentType(getName()).setComponentId(getRootGroupId()).setDetails("Download of Content requested by " + requestor + " for " + flowFile);
if (contentClaim != null) {
sendEventBuilder.setCurrentContentClaim(resourceClaim.getContainer(), resourceClaim.getSection(), resourceClaim.getId(), contentClaim.getOffset() + flowFile.getContentClaimOffset(), flowFile.getSize());
}
final ProvenanceEventRecord sendEvent = sendEventBuilder.build();
provenanceRepository.registerEvent(sendEvent);
return stream;
}
use of org.apache.nifi.controller.repository.claim.ContentClaim in project nifi by apache.
the class TestFileSystemRepository method testMarkDestructableDoesNotArchiveIfStreamOpenAndWrittenTo.
@Test
public void testMarkDestructableDoesNotArchiveIfStreamOpenAndWrittenTo() throws IOException, InterruptedException {
FileSystemRepository repository = null;
try {
final List<Path> archivedPaths = Collections.synchronizedList(new ArrayList<Path>());
// We are creating our own 'local' repository in this test so shut down the one created in the setup() method
shutdown();
repository = new FileSystemRepository(nifiProperties) {
@Override
protected boolean archive(Path curPath) throws IOException {
archivedPaths.add(curPath);
return true;
}
};
final StandardResourceClaimManager claimManager = new StandardResourceClaimManager();
repository.initialize(claimManager);
repository.purge();
final ContentClaim claim = repository.create(false);
// claim to be put back onto the 'writableClaimsQueue'
try (final OutputStream out = repository.write(claim)) {
assertEquals(1, claimManager.getClaimantCount(claim.getResourceClaim()));
out.write("1\n".getBytes());
}
assertEquals(1, claimManager.getClaimantCount(claim.getResourceClaim()));
int claimantCount = claimManager.decrementClaimantCount(claim.getResourceClaim());
assertEquals(0, claimantCount);
assertTrue(archivedPaths.isEmpty());
claimManager.markDestructable(claim.getResourceClaim());
// Wait for the archive thread to have a chance to run
Thread.sleep(2000L);
// Should still be empty because we have a stream open to the file.
assertTrue(archivedPaths.isEmpty());
assertEquals(0, claimManager.getClaimantCount(claim.getResourceClaim()));
} finally {
if (repository != null) {
repository.shutdown();
}
}
}
Aggregations