use of org.apache.nifi.controller.repository.claim.StandardContentClaim in project nifi by apache.
the class TestStandardProcessSession method testMissingFlowFileExceptionThrownWhenUnableToReadData.
@Test
public void testMissingFlowFileExceptionThrownWhenUnableToReadData() {
final FlowFileRecord flowFileRecord = new StandardFlowFileRecord.Builder().addAttribute("uuid", "12345678-1234-1234-1234-123456789012").entryDate(System.currentTimeMillis()).contentClaim(new StandardContentClaim(resourceClaimManager.newResourceClaim("x", "x", "0", true, false), 0L)).size(1L).build();
flowFileQueue.put(flowFileRecord);
// attempt to read the data.
try {
final FlowFile ff1 = session.get();
session.read(ff1, new InputStreamCallback() {
@Override
public void process(InputStream in) throws IOException {
}
});
Assert.fail("Expected MissingFlowFileException");
} catch (final MissingFlowFileException mffe) {
}
}
use of org.apache.nifi.controller.repository.claim.StandardContentClaim in project nifi by apache.
the class TestStandardProcessSession method testMissingFlowFileExceptionThrownWhenUnableToReadDataStreamCallback.
@Test
public void testMissingFlowFileExceptionThrownWhenUnableToReadDataStreamCallback() {
final FlowFileRecord flowFileRecord = new StandardFlowFileRecord.Builder().addAttribute("uuid", "12345678-1234-1234-1234-123456789012").entryDate(System.currentTimeMillis()).contentClaim(new StandardContentClaim(resourceClaimManager.newResourceClaim("x", "x", "0", true, false), 0L)).size(1L).build();
flowFileQueue.put(flowFileRecord);
// attempt to read the data.
try {
final FlowFile ff1 = session.get();
session.write(ff1, new StreamCallback() {
@Override
public void process(InputStream in, OutputStream out) throws IOException {
}
});
Assert.fail("Expected MissingFlowFileException");
} catch (final MissingFlowFileException mffe) {
}
}
use of org.apache.nifi.controller.repository.claim.StandardContentClaim in project nifi by apache.
the class TestStandardProcessSession method testContentNotFoundExceptionThrownWhenUnableToReadDataStreamCallbackOffsetTooLarge.
@Test
public void testContentNotFoundExceptionThrownWhenUnableToReadDataStreamCallbackOffsetTooLarge() {
final FlowFileRecord flowFileRecord = new StandardFlowFileRecord.Builder().addAttribute("uuid", "12345678-1234-1234-1234-123456789012").entryDate(System.currentTimeMillis()).contentClaim(new StandardContentClaim(resourceClaimManager.newResourceClaim("x", "x", "0", true, false), 0L)).build();
flowFileQueue.put(flowFileRecord);
FlowFile ff1 = session.get();
ff1 = session.write(ff1, new OutputStreamCallback() {
@Override
public void process(OutputStream out) throws IOException {
}
});
session.transfer(ff1);
session.commit();
final FlowFileRecord flowFileRecord2 = new StandardFlowFileRecord.Builder().addAttribute("uuid", "12345678-1234-1234-1234-123456789012").entryDate(System.currentTimeMillis()).contentClaim(new StandardContentClaim(resourceClaimManager.newResourceClaim("x", "x", "0", true, false), 0L)).contentClaimOffset(1000L).size(1000L).build();
flowFileQueue.put(flowFileRecord2);
// attempt to read the data.
try {
session.get();
final FlowFile ff2 = session.get();
session.write(ff2, new StreamCallback() {
@Override
public void process(InputStream in, OutputStream out) throws IOException {
}
});
Assert.fail("Expected ContentNotFoundException");
} catch (final MissingFlowFileException mffe) {
}
}
use of org.apache.nifi.controller.repository.claim.StandardContentClaim in project nifi by apache.
the class VolatileContentRepository method createLossTolerant.
private ContentClaim createLossTolerant() {
final long id = idGenerator.getAndIncrement();
final ResourceClaim resourceClaim = claimManager.newResourceClaim(CONTAINER_NAME, "section", String.valueOf(id), true, false);
final ContentClaim claim = new StandardContentClaim(resourceClaim, 0L);
final ContentBlock contentBlock = new ContentBlock(claim, repoSize);
claimManager.incrementClaimantCount(resourceClaim, true);
claimMap.put(claim, contentBlock);
logger.debug("Created {} and mapped to {}", claim, contentBlock);
return claim;
}
use of org.apache.nifi.controller.repository.claim.StandardContentClaim in project nifi by apache.
the class FileSystemRepository method write.
private OutputStream write(final ContentClaim claim, final boolean append) throws IOException {
if (claim == null) {
throw new NullPointerException("ContentClaim cannot be null");
}
if (!(claim instanceof StandardContentClaim)) {
// else, just throw an Exception because it is not valid for this Repository
throw new IllegalArgumentException("Cannot write to " + claim + " because that Content Claim does belong to this Content Repository");
}
final StandardContentClaim scc = (StandardContentClaim) claim;
if (claim.getLength() > 0) {
throw new IllegalArgumentException("Cannot write to " + claim + " because it has already been written to.");
}
ByteCountingOutputStream claimStream = writableClaimStreams.get(scc.getResourceClaim());
final int initialLength = append ? (int) Math.max(0, scc.getLength()) : 0;
final ByteCountingOutputStream bcos = claimStream;
final OutputStream out = new OutputStream() {
private long bytesWritten = 0L;
private boolean recycle = true;
private boolean closed = false;
@Override
public String toString() {
return "FileSystemRepository Stream [" + scc + "]";
}
@Override
public synchronized void write(final int b) throws IOException {
if (closed) {
throw new IOException("Stream is closed");
}
try {
bcos.write(b);
} catch (final IOException ioe) {
recycle = false;
throw new IOException("Failed to write to " + this, ioe);
}
bytesWritten++;
scc.setLength(bytesWritten + initialLength);
}
@Override
public synchronized void write(final byte[] b) throws IOException {
if (closed) {
throw new IOException("Stream is closed");
}
try {
bcos.write(b);
} catch (final IOException ioe) {
recycle = false;
throw new IOException("Failed to write to " + this, ioe);
}
bytesWritten += b.length;
scc.setLength(bytesWritten + initialLength);
}
@Override
public synchronized void write(final byte[] b, final int off, final int len) throws IOException {
if (closed) {
throw new IOException("Stream is closed");
}
try {
bcos.write(b, off, len);
} catch (final IOException ioe) {
recycle = false;
throw new IOException("Failed to write to " + this, ioe);
}
bytesWritten += len;
scc.setLength(bytesWritten + initialLength);
}
@Override
public synchronized void flush() throws IOException {
if (closed) {
throw new IOException("Stream is closed");
}
bcos.flush();
}
@Override
public synchronized void close() throws IOException {
closed = true;
if (alwaysSync) {
((FileOutputStream) bcos.getWrappedStream()).getFD().sync();
}
if (scc.getLength() < 0) {
// If claim was not written to, set length to 0
scc.setLength(0L);
}
// if we've not yet hit the threshold for appending to a resource claim, add the claim
// to the writableClaimQueue so that the Resource Claim can be used again when create()
// is called. In this case, we don't have to actually close the file stream. Instead, we
// can just add it onto the queue and continue to use it for the next content claim.
final long resourceClaimLength = scc.getOffset() + scc.getLength();
if (recycle && resourceClaimLength < maxAppendableClaimLength) {
final ClaimLengthPair pair = new ClaimLengthPair(scc.getResourceClaim(), resourceClaimLength);
// We are checking that writableClaimStreams contains the resource claim as a key, as a sanity check.
// It should always be there. However, we have encountered a bug before where we archived content before
// we should have. As a result, the Resource Claim and the associated OutputStream were removed from the
// writableClaimStreams map, and this caused a NullPointerException. Worse, the call here to
// writableClaimQueue.offer() means that the ResourceClaim was then reused, which resulted in an endless
// loop of NullPointerException's being thrown. As a result, we simply ensure that the Resource Claim does
// in fact have an OutputStream associated with it before adding it back to the writableClaimQueue.
final boolean enqueued = writableClaimStreams.get(scc.getResourceClaim()) != null && writableClaimQueue.offer(pair);
if (enqueued) {
LOG.debug("Claim length less than max; Adding {} back to Writable Claim Queue", this);
} else {
writableClaimStreams.remove(scc.getResourceClaim());
resourceClaimManager.freeze(scc.getResourceClaim());
bcos.close();
LOG.debug("Claim length less than max; Closing {} because could not add back to queue", this);
if (LOG.isTraceEnabled()) {
LOG.trace("Stack trace: ", new RuntimeException("Stack Trace for closing " + this));
}
}
} else {
// we've reached the limit for this claim. Don't add it back to our queue.
// Instead, just remove it and move on.
// Mark the claim as no longer being able to be written to
resourceClaimManager.freeze(scc.getResourceClaim());
// ensure that the claim is no longer on the queue
writableClaimQueue.remove(new ClaimLengthPair(scc.getResourceClaim(), resourceClaimLength));
bcos.close();
LOG.debug("Claim lenth >= max; Closing {}", this);
if (LOG.isTraceEnabled()) {
LOG.trace("Stack trace: ", new RuntimeException("Stack Trace for closing " + this));
}
}
}
};
LOG.debug("Writing to {}", out);
if (LOG.isTraceEnabled()) {
LOG.trace("Stack trace: ", new RuntimeException("Stack Trace for writing to " + out));
}
return out;
}
Aggregations