use of org.apache.nifi.controller.repository.FlowFileRecord in project nifi by apache.
the class StandardFlowFileQueue method drainQueue.
@Override
public long drainQueue(final Queue<FlowFileRecord> sourceQueue, final List<FlowFileRecord> destination, int maxResults, final Set<FlowFileRecord> expiredRecords) {
long drainedSize = 0L;
FlowFileRecord pulled = null;
final long expirationMillis = expirationPeriod.get().getMillis();
while (destination.size() < maxResults && (pulled = sourceQueue.poll()) != null) {
if (isLaterThan(getExpirationDate(pulled, expirationMillis))) {
expiredRecords.add(pulled);
if (expiredRecords.size() >= MAX_EXPIRED_RECORDS_PER_ITERATION) {
break;
}
} else {
if (pulled.isPenalized()) {
sourceQueue.add(pulled);
break;
}
destination.add(pulled);
}
drainedSize += pulled.getSize();
}
return drainedSize;
}
use of org.apache.nifi.controller.repository.FlowFileRecord in project nifi by apache.
the class StandardFlowFileQueue method doPoll.
private FlowFileRecord doPoll(final Set<FlowFileRecord> expiredRecords, final long expirationMillis) {
FlowFileRecord flowFile;
boolean isExpired;
migrateSwapToActive();
long expiredBytes = 0L;
do {
flowFile = this.activeQueue.poll();
isExpired = isLaterThan(getExpirationDate(flowFile, expirationMillis));
if (isExpired) {
expiredRecords.add(flowFile);
expiredBytes += flowFile.getSize();
flowFile = null;
if (expiredRecords.size() >= MAX_EXPIRED_RECORDS_PER_ITERATION) {
break;
}
} else if (flowFile != null && flowFile.isPenalized()) {
this.activeQueue.add(flowFile);
flowFile = null;
break;
}
if (flowFile != null) {
incrementActiveQueueSize(-1, -flowFile.getSize());
}
} while (isExpired);
if (!expiredRecords.isEmpty()) {
incrementActiveQueueSize(-expiredRecords.size(), -expiredBytes);
}
return flowFile;
}
use of org.apache.nifi.controller.repository.FlowFileRecord in project nifi by apache.
the class SchemaSwapDeserializer method deserializeFlowFiles.
@Override
@SuppressWarnings("unchecked")
public SwapContents deserializeFlowFiles(final DataInputStream in, final String swapLocation, final FlowFileQueue queue, final ResourceClaimManager claimManager) throws IOException {
final RecordSchema schema = RecordSchema.readFrom(in);
final SchemaRecordReader reader = SchemaRecordReader.fromSchema(schema);
final Record parentRecord = reader.readRecord(in);
final List<Record> flowFileRecords = (List<Record>) parentRecord.getFieldValue(SwapSchema.FLOWFILE_CONTENTS);
final List<FlowFileRecord> flowFiles = new ArrayList<>(flowFileRecords.size());
for (final Record record : flowFileRecords) {
flowFiles.add(FlowFileRecordFieldMap.getFlowFile(record, claimManager));
}
final Record summaryRecord = (Record) parentRecord.getFieldValue(SwapSchema.SWAP_SUMMARY);
final SwapSummary swapSummary = SwapSummaryFieldMap.getSwapSummary(summaryRecord, claimManager);
return new StandardSwapContents(swapSummary, flowFiles);
}
use of org.apache.nifi.controller.repository.FlowFileRecord in project nifi by apache.
the class SimpleSwapDeserializer method deserializeFlowFiles.
private static SwapContents deserializeFlowFiles(final DataInputStream in, final QueueSize queueSize, final Long maxRecordId, final int serializationVersion, final ResourceClaimManager claimManager, final String location) throws IOException {
final List<FlowFileRecord> flowFiles = new ArrayList<>(queueSize.getObjectCount());
final List<ResourceClaim> resourceClaims = new ArrayList<>(queueSize.getObjectCount());
Long maxId = maxRecordId;
for (int i = 0; i < queueSize.getObjectCount(); i++) {
try {
// legacy encoding had an "action" because it used to be couple with FlowFile Repository code
if (serializationVersion < 3) {
final int action = in.read();
if (action != 1) {
throw new IOException("Swap File is version " + serializationVersion + " but did not contain a 'UPDATE' record type");
}
}
final StandardFlowFileRecord.Builder ffBuilder = new StandardFlowFileRecord.Builder();
final long recordId = in.readLong();
if (maxId == null || recordId > maxId) {
maxId = recordId;
}
ffBuilder.id(recordId);
ffBuilder.entryDate(in.readLong());
if (serializationVersion > 1) {
// Lineage information was added in version 2
if (serializationVersion < 10) {
final int numLineageIdentifiers = in.readInt();
for (int lineageIdIdx = 0; lineageIdIdx < numLineageIdentifiers; lineageIdIdx++) {
// skip each identifier
in.readUTF();
}
}
// version 9 adds in a 'lineage start index'
final long lineageStartDate = in.readLong();
final long lineageStartIndex;
if (serializationVersion > 8) {
lineageStartIndex = in.readLong();
} else {
lineageStartIndex = 0L;
}
ffBuilder.lineageStart(lineageStartDate, lineageStartIndex);
if (serializationVersion > 5) {
// Version 9 adds in a 'queue date index'
final long lastQueueDate = in.readLong();
final long queueDateIndex;
if (serializationVersion > 8) {
queueDateIndex = in.readLong();
} else {
queueDateIndex = 0L;
}
ffBuilder.lastQueued(lastQueueDate, queueDateIndex);
}
}
ffBuilder.size(in.readLong());
if (serializationVersion < 3) {
// connection Id
readString(in);
}
final boolean hasClaim = in.readBoolean();
ResourceClaim resourceClaim = null;
if (hasClaim) {
final String claimId;
if (serializationVersion < 5) {
claimId = String.valueOf(in.readLong());
} else {
claimId = in.readUTF();
}
final String container = in.readUTF();
final String section = in.readUTF();
final long resourceOffset;
final long resourceLength;
if (serializationVersion < 6) {
resourceOffset = 0L;
resourceLength = -1L;
} else {
resourceOffset = in.readLong();
resourceLength = in.readLong();
}
final long claimOffset = in.readLong();
final boolean lossTolerant;
if (serializationVersion >= 4) {
lossTolerant = in.readBoolean();
} else {
lossTolerant = false;
}
resourceClaim = claimManager.getResourceClaim(container, section, claimId);
if (resourceClaim == null) {
logger.error("Swap file indicates that FlowFile was referencing Resource Claim at container={}, section={}, claimId={}, " + "but this Resource Claim cannot be found! Will create a temporary Resource Claim, but this may affect the framework's " + "ability to properly clean up this resource", container, section, claimId);
resourceClaim = claimManager.newResourceClaim(container, section, claimId, lossTolerant, true);
}
final StandardContentClaim claim = new StandardContentClaim(resourceClaim, resourceOffset);
claim.setLength(resourceLength);
ffBuilder.contentClaim(claim);
ffBuilder.contentClaimOffset(claimOffset);
}
boolean attributesChanged = true;
if (serializationVersion < 3) {
attributesChanged = in.readBoolean();
}
if (attributesChanged) {
final int numAttributes = in.readInt();
for (int j = 0; j < numAttributes; j++) {
final String key = readString(in);
final String value = readString(in);
ffBuilder.addAttribute(key, value);
}
}
final FlowFileRecord record = ffBuilder.build();
if (resourceClaim != null) {
resourceClaims.add(resourceClaim);
}
flowFiles.add(record);
} catch (final EOFException eof) {
final SwapSummary swapSummary = new StandardSwapSummary(queueSize, maxId, resourceClaims);
final SwapContents partialContents = new StandardSwapContents(swapSummary, flowFiles);
throw new IncompleteSwapFileException(location, partialContents);
}
}
final SwapSummary swapSummary = new StandardSwapSummary(queueSize, maxId, resourceClaims);
return new StandardSwapContents(swapSummary, flowFiles);
}
use of org.apache.nifi.controller.repository.FlowFileRecord in project nifi by apache.
the class StandardFlowFileQueue method poll.
@Override
public List<FlowFileRecord> poll(final FlowFileFilter filter, final Set<FlowFileRecord> expiredRecords) {
long bytesPulled = 0L;
int flowFilesPulled = 0;
writeLock.lock();
try {
migrateSwapToActive();
final long expirationMillis = expirationPeriod.get().getMillis();
final List<FlowFileRecord> selectedFlowFiles = new ArrayList<>();
final List<FlowFileRecord> unselected = new ArrayList<>();
while (true) {
FlowFileRecord flowFile = this.activeQueue.poll();
if (flowFile == null) {
break;
}
final boolean isExpired = isLaterThan(getExpirationDate(flowFile, expirationMillis));
if (isExpired) {
expiredRecords.add(flowFile);
bytesPulled += flowFile.getSize();
flowFilesPulled++;
if (expiredRecords.size() >= MAX_EXPIRED_RECORDS_PER_ITERATION) {
break;
} else {
continue;
}
} else if (flowFile.isPenalized()) {
this.activeQueue.add(flowFile);
flowFile = null;
// just stop searching because the rest are all penalized.
break;
}
final FlowFileFilterResult result = filter.filter(flowFile);
if (result.isAccept()) {
bytesPulled += flowFile.getSize();
flowFilesPulled++;
incrementUnacknowledgedQueueSize(1, flowFile.getSize());
selectedFlowFiles.add(flowFile);
} else {
unselected.add(flowFile);
}
if (!result.isContinue()) {
break;
}
}
this.activeQueue.addAll(unselected);
incrementActiveQueueSize(-flowFilesPulled, -bytesPulled);
return selectedFlowFiles;
} finally {
writeLock.unlock("poll(Filter, Set)");
}
}
Aggregations