use of org.apache.nifi.controller.queue.FlowFileQueue in project nifi by apache.
the class SchemaSwapSerializer method serializeFlowFiles.
@Override
public void serializeFlowFiles(final List<FlowFileRecord> toSwap, final FlowFileQueue queue, final String swapLocation, final OutputStream out) throws IOException {
schema.writeTo(out);
long contentSize = 0L;
long maxFlowFileId = -1L;
final List<ResourceClaim> resourceClaims = new ArrayList<>();
for (final FlowFileRecord flowFile : toSwap) {
contentSize += flowFile.getSize();
if (flowFile.getId() > maxFlowFileId) {
maxFlowFileId = flowFile.getId();
}
final ContentClaim contentClaim = flowFile.getContentClaim();
if (contentClaim != null) {
resourceClaims.add(contentClaim.getResourceClaim());
}
}
final QueueSize queueSize = new QueueSize(toSwap.size(), contentSize);
final SwapSummary swapSummary = new StandardSwapSummary(queueSize, maxFlowFileId, resourceClaims);
final Record summaryRecord = new SwapSummaryFieldMap(swapSummary, queue.getIdentifier(), SwapSchema.SWAP_SUMMARY_SCHEMA_V1);
final List<Record> flowFileRecords = toSwap.stream().map(flowFile -> new FlowFileRecordFieldMap(flowFile, flowFileSchema)).collect(Collectors.toList());
// Create a simple record to hold the summary and the flowfile contents
final RecordField summaryField = new SimpleRecordField(SwapSchema.SWAP_SUMMARY, FieldType.COMPLEX, Repetition.EXACTLY_ONE);
final RecordField contentsField = new ComplexRecordField(SwapSchema.FLOWFILE_CONTENTS, Repetition.ZERO_OR_MORE, FlowFileSchema.FLOWFILE_SCHEMA_V2.getFields());
final List<RecordField> fields = new ArrayList<>(2);
fields.add(summaryField);
fields.add(contentsField);
final Map<RecordField, Object> swapFileMap = new LinkedHashMap<>();
swapFileMap.put(summaryField, summaryRecord);
swapFileMap.put(contentsField, flowFileRecords);
final Record swapFileRecord = new FieldMapRecord(swapFileMap, new RecordSchema(fields));
final SchemaRecordWriter writer = new SchemaRecordWriter();
writer.writeRecord(swapFileRecord, out);
out.flush();
}
use of org.apache.nifi.controller.queue.FlowFileQueue in project nifi by apache.
the class SchemaRepositoryRecordSerde method swapOutRecord.
private RepositoryRecord swapOutRecord(final Record record) {
final Long recordId = (Long) record.getFieldValue(RepositoryRecordSchema.RECORD_ID_FIELD);
final String queueId = (String) record.getFieldValue(new SimpleRecordField(RepositoryRecordSchema.QUEUE_IDENTIFIER, FieldType.STRING, Repetition.EXACTLY_ONE));
final String swapLocation = (String) record.getFieldValue(new SimpleRecordField(RepositoryRecordSchema.SWAP_LOCATION, FieldType.STRING, Repetition.EXACTLY_ONE));
final FlowFileQueue queue = getFlowFileQueue(queueId);
final FlowFileRecord flowFileRecord = new StandardFlowFileRecord.Builder().id(recordId).build();
return new StandardRepositoryRecord(queue, flowFileRecord, swapLocation);
}
use of org.apache.nifi.controller.queue.FlowFileQueue in project nifi by apache.
the class SchemaRepositoryRecordSerde method createRecord.
@SuppressWarnings("unchecked")
private StandardRepositoryRecord createRecord(final Record record) {
final StandardFlowFileRecord.Builder ffBuilder = new StandardFlowFileRecord.Builder();
ffBuilder.id((Long) record.getFieldValue(RepositoryRecordSchema.RECORD_ID));
ffBuilder.entryDate((Long) record.getFieldValue(FlowFileSchema.ENTRY_DATE));
final Long lastQueueDate = (Long) record.getFieldValue(FlowFileSchema.QUEUE_DATE);
final Long queueDateIndex = (Long) record.getFieldValue(FlowFileSchema.QUEUE_DATE_INDEX);
ffBuilder.lastQueued(lastQueueDate, queueDateIndex);
final Long lineageStartDate = (Long) record.getFieldValue(FlowFileSchema.LINEAGE_START_DATE);
final Long lineageStartIndex = (Long) record.getFieldValue(FlowFileSchema.LINEAGE_START_INDEX);
ffBuilder.lineageStart(lineageStartDate, lineageStartIndex);
populateContentClaim(ffBuilder, record);
ffBuilder.size((Long) record.getFieldValue(FlowFileSchema.FLOWFILE_SIZE));
ffBuilder.addAttributes((Map<String, String>) record.getFieldValue(FlowFileSchema.ATTRIBUTES));
final FlowFileRecord flowFileRecord = ffBuilder.build();
final String queueId = (String) record.getFieldValue(RepositoryRecordSchema.QUEUE_IDENTIFIER);
final FlowFileQueue queue = getFlowFileQueue(queueId);
final StandardRepositoryRecord repoRecord = new StandardRepositoryRecord(queue, flowFileRecord);
requireFlowFileQueue(repoRecord, queueId);
return repoRecord;
}
use of org.apache.nifi.controller.queue.FlowFileQueue in project nifi by apache.
the class WriteAheadRepositoryRecordSerde method deserializeEdit.
@Override
public RepositoryRecord deserializeEdit(final DataInputStream in, final Map<Object, RepositoryRecord> currentRecordStates, final int version) throws IOException {
final int action = in.read();
final long recordId = in.readLong();
if (action == ACTION_DELETE) {
final StandardFlowFileRecord.Builder ffBuilder = new StandardFlowFileRecord.Builder().id(recordId);
if (version > 4) {
deserializeClaim(in, version, ffBuilder);
}
final FlowFileRecord flowFileRecord = ffBuilder.build();
final StandardRepositoryRecord record = new StandardRepositoryRecord((FlowFileQueue) null, flowFileRecord);
record.markForDelete();
return record;
}
if (action == ACTION_SWAPPED_OUT) {
final String queueId = in.readUTF();
final String location = in.readUTF();
final FlowFileQueue queue = getFlowFileQueue(queueId);
final FlowFileRecord flowFileRecord = new StandardFlowFileRecord.Builder().id(recordId).build();
return new StandardRepositoryRecord(queue, flowFileRecord, location);
}
final StandardFlowFileRecord.Builder ffBuilder = new StandardFlowFileRecord.Builder();
final RepositoryRecord record = currentRecordStates.get(recordId);
ffBuilder.id(recordId);
if (record != null) {
ffBuilder.fromFlowFile(record.getCurrent());
}
ffBuilder.entryDate(in.readLong());
if (version > 1) {
// read the lineage identifiers and lineage start date, which were added in version 2.
if (version < 9) {
final int numLineageIds = in.readInt();
for (int i = 0; i < numLineageIds; i++) {
// skip identifiers
in.readUTF();
}
}
final long lineageStartDate = in.readLong();
final long lineageStartIndex;
if (version > 7) {
lineageStartIndex = in.readLong();
} else {
lineageStartIndex = 0L;
}
ffBuilder.lineageStart(lineageStartDate, lineageStartIndex);
if (version > 5) {
final long lastQueueDate = in.readLong();
final long queueDateIndex;
if (version > 7) {
queueDateIndex = in.readLong();
} else {
queueDateIndex = 0L;
}
ffBuilder.lastQueued(lastQueueDate, queueDateIndex);
}
}
ffBuilder.size(in.readLong());
final String connectionId = readString(in);
logger.debug("{} -> {}", new Object[] { recordId, connectionId });
deserializeClaim(in, version, ffBuilder);
// recover new attributes, if they changed
final int attributesChanged = in.read();
if (attributesChanged == -1) {
throw new EOFException();
} else if (attributesChanged == 1) {
final int numAttributes = in.readInt();
final Map<String, String> attributes = new HashMap<>();
for (int i = 0; i < numAttributes; i++) {
final String key = readString(in);
final String value = readString(in);
attributes.put(key, value);
}
ffBuilder.addAttributes(attributes);
} else if (attributesChanged != 0) {
throw new IOException("Attribute Change Qualifier not found in stream; found value: " + attributesChanged + " after successfully restoring " + recordsRestored + " records. The FlowFile Repository appears to be corrupt!");
}
final FlowFileRecord flowFile = ffBuilder.build();
String swapLocation = null;
if (action == ACTION_SWAPPED_IN) {
swapLocation = in.readUTF();
}
final FlowFileQueue queue = getFlowFileQueue(connectionId);
final StandardRepositoryRecord standardRepoRecord = new StandardRepositoryRecord(queue, flowFile);
if (swapLocation != null) {
standardRepoRecord.setSwapLocation(swapLocation);
}
if (connectionId.isEmpty()) {
logger.warn("{} does not have a Queue associated with it; this record will be discarded", flowFile);
standardRepoRecord.markForAbort();
} else if (queue == null) {
logger.warn("{} maps to unknown Queue {}; this record will be discarded", flowFile, connectionId);
standardRepoRecord.markForAbort();
}
recordsRestored++;
return standardRepoRecord;
}
use of org.apache.nifi.controller.queue.FlowFileQueue in project nifi by apache.
the class WriteAheadRepositoryRecordSerde method serializeEdit.
public void serializeEdit(final RepositoryRecord previousRecordState, final RepositoryRecord record, final DataOutputStream out, final boolean forceAttributesWritten) throws IOException {
if (record.isMarkedForAbort()) {
logger.warn("Repository Record {} is marked to be aborted; it will be persisted in the FlowFileRepository as a DELETE record", record);
out.write(ACTION_DELETE);
out.writeLong(getRecordIdentifier(record));
serializeContentClaim(record.getCurrentClaim(), record.getCurrentClaimOffset(), out);
return;
}
final UpdateType updateType = getUpdateType(record);
if (updateType.equals(UpdateType.DELETE)) {
out.write(ACTION_DELETE);
out.writeLong(getRecordIdentifier(record));
serializeContentClaim(record.getCurrentClaim(), record.getCurrentClaimOffset(), out);
return;
}
// If there's a Destination Connection, that's the one that we want to associated with this record.
// However, on restart, we will restore the FlowFile and set this connection to its "originalConnection".
// If we then serialize the FlowFile again before it's transferred, it's important to allow this to happen,
// so we use the originalConnection instead
FlowFileQueue associatedQueue = record.getDestination();
if (associatedQueue == null) {
associatedQueue = record.getOriginalQueue();
}
if (updateType.equals(UpdateType.SWAP_OUT)) {
out.write(ACTION_SWAPPED_OUT);
out.writeLong(getRecordIdentifier(record));
out.writeUTF(associatedQueue.getIdentifier());
out.writeUTF(getLocation(record));
return;
}
final FlowFile flowFile = record.getCurrent();
final ContentClaim claim = record.getCurrentClaim();
switch(updateType) {
case UPDATE:
out.write(ACTION_UPDATE);
break;
case CREATE:
out.write(ACTION_CREATE);
break;
case SWAP_IN:
out.write(ACTION_SWAPPED_IN);
break;
default:
throw new AssertionError();
}
out.writeLong(getRecordIdentifier(record));
out.writeLong(flowFile.getEntryDate());
out.writeLong(flowFile.getLineageStartDate());
out.writeLong(flowFile.getLineageStartIndex());
final Long queueDate = flowFile.getLastQueueDate();
out.writeLong(queueDate == null ? System.currentTimeMillis() : queueDate);
out.writeLong(flowFile.getQueueDateIndex());
out.writeLong(flowFile.getSize());
if (associatedQueue == null) {
logger.warn("{} Repository Record {} has no Connection associated with it; it will be destroyed on restart", new Object[] { this, record });
writeString("", out);
} else {
writeString(associatedQueue.getIdentifier(), out);
}
serializeContentClaim(claim, record.getCurrentClaimOffset(), out);
if (forceAttributesWritten || record.isAttributesChanged() || updateType == UpdateType.CREATE || updateType == UpdateType.SWAP_IN) {
// indicate attributes changed
out.write(1);
final Map<String, String> attributes = flowFile.getAttributes();
out.writeInt(attributes.size());
for (final Map.Entry<String, String> entry : attributes.entrySet()) {
writeString(entry.getKey(), out);
writeString(entry.getValue(), out);
}
} else {
// indicate attributes did not change
out.write(0);
}
if (updateType == UpdateType.SWAP_IN) {
out.writeUTF(record.getSwapLocation());
}
}
Aggregations