use of org.apache.nifi.processor.ProcessContext in project nifi by apache.
the class TestHttpFlowFileServerProtocol method testReceiveZeroFile.
@Test
public void testReceiveZeroFile() throws Exception {
final HttpFlowFileServerProtocol serverProtocol = getDefaultHttpFlowFileServerProtocol();
final Peer peer = getDefaultPeer("testReceiveZeroFile");
final HttpServerCommunicationsSession commsSession = (HttpServerCommunicationsSession) peer.getCommunicationsSession();
commsSession.setUserDn("unit-test");
serverProtocol.handshake(peer);
assertTrue(serverProtocol.isHandshakeSuccessful());
final FlowFileCodec negotiatedCoded = serverProtocol.negotiateCodec(peer);
final ProcessContext context = null;
final ProcessSession processSession = mock(ProcessSession.class);
final InputStream httpInputStream = new ByteArrayInputStream(new byte[] {});
((HttpInput) commsSession.getInput()).setInputStream(httpInputStream);
// Execute test using mock
final int flowFileReceived = serverProtocol.receiveFlowFiles(peer, context, processSession, negotiatedCoded);
assertEquals(0, flowFileReceived);
}
use of org.apache.nifi.processor.ProcessContext in project nifi by apache.
the class TestHttpFlowFileServerProtocol method testShutdown.
@Test
public void testShutdown() throws Exception {
final HttpFlowFileServerProtocol serverProtocol = getDefaultHttpFlowFileServerProtocol();
final Peer peer = getDefaultPeer();
serverProtocol.handshake(peer);
assertTrue(serverProtocol.isHandshakeSuccessful());
final FlowFileCodec negotiatedCoded = serverProtocol.negotiateCodec(peer);
assertTrue(negotiatedCoded instanceof StandardFlowFileCodec);
assertEquals(negotiatedCoded, serverProtocol.getPreNegotiatedCodec());
assertEquals(1234, serverProtocol.getRequestExpiration());
serverProtocol.shutdown(peer);
final ProcessContext context = null;
final ProcessSession processSession = null;
try {
serverProtocol.transferFlowFiles(peer, context, processSession, negotiatedCoded);
fail("transferFlowFiles should fail since it's already shutdown.");
} catch (final IllegalStateException e) {
}
try {
serverProtocol.receiveFlowFiles(peer, context, processSession, negotiatedCoded);
fail("receiveFlowFiles should fail since it's already shutdown.");
} catch (final IllegalStateException e) {
}
}
use of org.apache.nifi.processor.ProcessContext in project nifi by apache.
the class TestHttpFlowFileServerProtocol method testTransferZeroFile.
@Test
public void testTransferZeroFile() throws Exception {
final HttpFlowFileServerProtocol serverProtocol = getDefaultHttpFlowFileServerProtocol();
final Peer peer = getDefaultPeer();
serverProtocol.handshake(peer);
assertTrue(serverProtocol.isHandshakeSuccessful());
final FlowFileCodec negotiatedCoded = serverProtocol.negotiateCodec(peer);
final ProcessContext context = null;
final ProcessSession processSession = mock(ProcessSession.class);
// Execute test using mock
final int flowFileSent = serverProtocol.transferFlowFiles(peer, context, processSession, negotiatedCoded);
assertEquals(0, flowFileSent);
}
use of org.apache.nifi.processor.ProcessContext in project nifi by apache.
the class ITPutS3Object method testS3MultipartAgeoff.
@Ignore
@Test
public void testS3MultipartAgeoff() throws InterruptedException, IOException {
final PutS3Object processor = new PutS3Object();
final TestRunner runner = TestRunners.newTestRunner(processor);
final ProcessContext context = runner.getProcessContext();
runner.setProperty(PutS3Object.CREDENTIALS_FILE, CREDENTIALS_FILE);
runner.setProperty(PutS3Object.REGION, REGION);
runner.setProperty(PutS3Object.BUCKET, BUCKET_NAME);
// set check interval and age off to minimum values
runner.setProperty(PutS3Object.MULTIPART_S3_AGEOFF_INTERVAL, "1 milli");
runner.setProperty(PutS3Object.MULTIPART_S3_MAX_AGE, "1 milli");
// create some dummy uploads
for (Integer i = 0; i < 3; i++) {
final InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(BUCKET_NAME, "file" + i.toString() + ".txt");
try {
client.initiateMultipartUpload(initiateRequest);
} catch (AmazonClientException e) {
Assert.fail("Failed to initiate upload: " + e.getMessage());
}
}
// Age off is time dependent, so test has some timing constraints. This
// sleep() delays long enough to satisfy interval and age intervals.
Thread.sleep(2000L);
// System millis are used for timing, but it is incremented on each
// call to circumvent what appears to be caching in the AWS library.
// The increments are 1000 millis because AWS returns upload
// initiation times in whole seconds.
Long now = System.currentTimeMillis();
MultipartUploadListing uploadList = processor.getS3AgeoffListAndAgeoffLocalState(context, client, now);
Assert.assertEquals(3, uploadList.getMultipartUploads().size());
MultipartUpload upload0 = uploadList.getMultipartUploads().get(0);
processor.abortS3MultipartUpload(client, BUCKET_NAME, upload0);
uploadList = processor.getS3AgeoffListAndAgeoffLocalState(context, client, now + 1000);
Assert.assertEquals(2, uploadList.getMultipartUploads().size());
final Map<String, String> attrs = new HashMap<>();
attrs.put("filename", "test-upload.txt");
runner.enqueue(getResourcePath(SAMPLE_FILE_RESOURCE_NAME), attrs);
runner.run();
uploadList = processor.getS3AgeoffListAndAgeoffLocalState(context, client, now + 2000);
Assert.assertEquals(0, uploadList.getMultipartUploads().size());
}
use of org.apache.nifi.processor.ProcessContext in project nifi by apache.
the class ITPutS3Object method testEndpointOverride.
@Test
public void testEndpointOverride() {
// remove leading "/" from filename to avoid duplicate separators
final String TESTKEY = AbstractS3IT.SAMPLE_FILE_RESOURCE_NAME.substring(1);
final PutS3Object processor = new TestablePutS3Object();
final TestRunner runner = TestRunners.newTestRunner(processor);
final ProcessContext context = runner.getProcessContext();
runner.setProperty(PutS3Object.ENDPOINT_OVERRIDE, TEST_ENDPOINT);
runner.setProperty(PutS3Object.BUCKET, BUCKET_NAME);
runner.setProperty(PutS3Object.KEY, TESTKEY);
runner.run();
Assert.assertEquals(BUCKET_NAME, context.getProperty(PutS3Object.BUCKET).toString());
Assert.assertEquals(TESTKEY, context.getProperty(PutS3Object.KEY).evaluateAttributeExpressions().toString());
Assert.assertEquals(TEST_ENDPOINT, context.getProperty(PutS3Object.ENDPOINT_OVERRIDE).toString());
String s3url = ((TestablePutS3Object) processor).testable_getClient().getResourceUrl(BUCKET_NAME, TESTKEY);
Assert.assertEquals(TEST_ENDPOINT + "/" + BUCKET_NAME + "/" + TESTKEY, s3url);
}
Aggregations