use of scala.runtime.BoxedUnit in project nifi by apache.
the class PutDruidRecord method processFlowFile.
/**
* Parses the record(s), converts each to a Map, and sends via Tranquility to the Druid Indexing Service
*
* @param context The process context
* @param session The process session
*/
@SuppressWarnings("unchecked")
private void processFlowFile(ProcessContext context, final ProcessSession session) {
final ComponentLog log = getLogger();
// Get handle on Druid Tranquility session
DruidTranquilityService tranquilityController = context.getProperty(DRUID_TRANQUILITY_SERVICE).asControllerService(DruidTranquilityService.class);
Tranquilizer<Map<String, Object>> tranquilizer = tranquilityController.getTranquilizer();
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
// Create the outgoing flow files and output streams
FlowFile droppedFlowFile = session.create(flowFile);
final AtomicInteger droppedFlowFileCount = new AtomicInteger(0);
FlowFile failedFlowFile = session.create(flowFile);
final AtomicInteger failedFlowFileCount = new AtomicInteger(0);
FlowFile successfulFlowFile = session.create(flowFile);
final AtomicInteger successfulFlowFileCount = new AtomicInteger(0);
final AtomicInteger recordWriteErrors = new AtomicInteger(0);
int recordCount = 0;
final OutputStream droppedOutputStream = session.write(droppedFlowFile);
final RecordSetWriter droppedRecordWriter;
final OutputStream failedOutputStream = session.write(failedFlowFile);
final RecordSetWriter failedRecordWriter;
final OutputStream successfulOutputStream = session.write(successfulFlowFile);
final RecordSetWriter successfulRecordWriter;
try (final InputStream in = session.read(flowFile)) {
final RecordReaderFactory recordParserFactory = context.getProperty(RECORD_READER_FACTORY).asControllerService(RecordReaderFactory.class);
final RecordSetWriterFactory writerFactory = context.getProperty(RECORD_WRITER_FACTORY).asControllerService(RecordSetWriterFactory.class);
final Map<String, String> attributes = flowFile.getAttributes();
final RecordReader reader = recordParserFactory.createRecordReader(flowFile, in, getLogger());
final RecordSchema outSchema = writerFactory.getSchema(attributes, reader.getSchema());
droppedRecordWriter = writerFactory.createWriter(log, outSchema, droppedOutputStream);
droppedRecordWriter.beginRecordSet();
failedRecordWriter = writerFactory.createWriter(log, outSchema, failedOutputStream);
failedRecordWriter.beginRecordSet();
successfulRecordWriter = writerFactory.createWriter(log, outSchema, successfulOutputStream);
successfulRecordWriter.beginRecordSet();
Record r;
while ((r = reader.nextRecord()) != null) {
final Record record = r;
recordCount++;
// Convert each Record to HashMap and send to Druid
Map<String, Object> contentMap = (Map<String, Object>) DataTypeUtils.convertRecordFieldtoObject(r, RecordFieldType.RECORD.getRecordDataType(r.getSchema()));
log.debug("Tranquilizer Status: {}", new Object[] { tranquilizer.status().toString() });
// Send data element to Druid asynchronously
Future<BoxedUnit> future = tranquilizer.send(contentMap);
log.debug("Sent Payload to Druid: {}", new Object[] { contentMap });
// Wait for Druid to call back with status
future.addEventListener(new FutureEventListener<Object>() {
@Override
public void onFailure(Throwable cause) {
if (cause instanceof MessageDroppedException) {
// This happens when event timestamp targets a Druid Indexing task that has closed (Late Arriving Data)
log.debug("Record Dropped due to MessageDroppedException: {}, transferring record to dropped.", new Object[] { cause.getMessage() }, cause);
try {
synchronized (droppedRecordWriter) {
droppedRecordWriter.write(record);
droppedRecordWriter.flush();
droppedFlowFileCount.incrementAndGet();
}
} catch (final IOException ioe) {
log.error("Error transferring record to dropped, this may result in data loss.", new Object[] { ioe.getMessage() }, ioe);
recordWriteErrors.incrementAndGet();
}
} else {
log.error("FlowFile Processing Failed due to: {}", new Object[] { cause.getMessage() }, cause);
try {
synchronized (failedRecordWriter) {
failedRecordWriter.write(record);
failedRecordWriter.flush();
failedFlowFileCount.incrementAndGet();
}
} catch (final IOException ioe) {
log.error("Error transferring record to failure, this may result in data loss.", new Object[] { ioe.getMessage() }, ioe);
recordWriteErrors.incrementAndGet();
}
}
}
@Override
public void onSuccess(Object value) {
log.debug(" FlowFile Processing Success: {}", new Object[] { value.toString() });
try {
synchronized (successfulRecordWriter) {
successfulRecordWriter.write(record);
successfulRecordWriter.flush();
successfulFlowFileCount.incrementAndGet();
}
} catch (final IOException ioe) {
log.error("Error transferring record to success, this may result in data loss. " + "However the record was successfully processed by Druid", new Object[] { ioe.getMessage() }, ioe);
recordWriteErrors.incrementAndGet();
}
}
});
}
} catch (IOException | SchemaNotFoundException | MalformedRecordException e) {
log.error("FlowFile Processing Failed due to: {}", new Object[] { e.getMessage() }, e);
// The FlowFile will be obtained and the error logged below, when calling publishResult.getFailedFlowFiles()
flowFile = session.putAttribute(flowFile, RECORD_COUNT, Integer.toString(recordCount));
session.transfer(flowFile, REL_FAILURE);
try {
droppedOutputStream.close();
session.remove(droppedFlowFile);
} catch (IOException ioe) {
log.error("Error closing output stream for FlowFile with dropped records.", ioe);
}
try {
failedOutputStream.close();
session.remove(failedFlowFile);
} catch (IOException ioe) {
log.error("Error closing output stream for FlowFile with failed records.", ioe);
}
try {
successfulOutputStream.close();
session.remove(successfulFlowFile);
} catch (IOException ioe) {
log.error("Error closing output stream for FlowFile with successful records.", ioe);
}
session.commit();
return;
}
if (recordCount == 0) {
// Send original (empty) flow file to success, remove the rest
flowFile = session.putAttribute(flowFile, RECORD_COUNT, "0");
session.transfer(flowFile, REL_SUCCESS);
try {
droppedOutputStream.close();
session.remove(droppedFlowFile);
} catch (IOException ioe) {
log.error("Error closing output stream for FlowFile with dropped records.", ioe);
}
try {
failedOutputStream.close();
session.remove(failedFlowFile);
} catch (IOException ioe) {
log.error("Error closing output stream for FlowFile with failed records.", ioe);
}
try {
successfulOutputStream.close();
session.remove(successfulFlowFile);
} catch (IOException ioe) {
log.error("Error closing output stream for FlowFile with successful records.", ioe);
}
} else {
// Wait for all the records to finish processing
while (recordCount != (droppedFlowFileCount.get() + failedFlowFileCount.get() + successfulFlowFileCount.get() + recordWriteErrors.get())) {
Thread.yield();
}
try {
droppedRecordWriter.finishRecordSet();
droppedRecordWriter.close();
} catch (IOException ioe) {
log.error("Error closing FlowFile with dropped records: {}", new Object[] { ioe.getMessage() }, ioe);
session.rollback();
throw new ProcessException(ioe);
}
if (droppedFlowFileCount.get() > 0) {
droppedFlowFile = session.putAttribute(droppedFlowFile, RECORD_COUNT, Integer.toString(droppedFlowFileCount.get()));
session.transfer(droppedFlowFile, REL_DROPPED);
} else {
session.remove(droppedFlowFile);
}
try {
failedRecordWriter.finishRecordSet();
failedRecordWriter.close();
} catch (IOException ioe) {
log.error("Error closing FlowFile with failed records: {}", new Object[] { ioe.getMessage() }, ioe);
session.rollback();
throw new ProcessException(ioe);
}
if (failedFlowFileCount.get() > 0) {
failedFlowFile = session.putAttribute(failedFlowFile, RECORD_COUNT, Integer.toString(failedFlowFileCount.get()));
session.transfer(failedFlowFile, REL_FAILURE);
} else {
session.remove(failedFlowFile);
}
try {
successfulRecordWriter.finishRecordSet();
successfulRecordWriter.close();
} catch (IOException ioe) {
log.error("Error closing FlowFile with successful records: {}", new Object[] { ioe.getMessage() }, ioe);
session.rollback();
throw new ProcessException(ioe);
}
if (successfulFlowFileCount.get() > 0) {
successfulFlowFile = session.putAttribute(successfulFlowFile, RECORD_COUNT, Integer.toString(successfulFlowFileCount.get()));
session.transfer(successfulFlowFile, REL_SUCCESS);
session.getProvenanceReporter().send(successfulFlowFile, tranquilityController.getTransitUri());
} else {
session.remove(successfulFlowFile);
}
session.remove(flowFile);
}
session.commit();
}
use of scala.runtime.BoxedUnit in project distributedlog by twitter.
the class ZKSessionLock method asyncTryLock.
@Override
public Future<LockWaiter> asyncTryLock(final long timeout, final TimeUnit unit) {
final Promise<String> result = new Promise<String>();
final boolean wait = DistributedLogConstants.LOCK_IMMEDIATE != timeout;
if (wait) {
asyncTryLock(wait, result);
} else {
// try to check locks first
zk.getChildren(lockPath, null, new AsyncCallback.Children2Callback() {
@Override
public void processResult(final int rc, String path, Object ctx, final List<String> children, Stat stat) {
lockStateExecutor.submit(lockPath, new SafeRunnable() {
@Override
public void safeRun() {
if (!lockState.inState(State.INIT)) {
result.setException(new LockStateChangedException(lockPath, lockId, State.INIT, lockState.getState()));
return;
}
if (KeeperException.Code.OK.intValue() != rc) {
result.setException(KeeperException.create(KeeperException.Code.get(rc)));
return;
}
FailpointUtils.checkFailPointNoThrow(FailpointUtils.FailPointName.FP_LockTryAcquire);
Collections.sort(children, MEMBER_COMPARATOR);
if (children.size() > 0) {
asyncParseClientID(zk, lockPath, children.get(0)).addEventListener(new FutureEventListener<Pair<String, Long>>() {
@Override
public void onSuccess(Pair<String, Long> owner) {
if (!checkOrClaimLockOwner(owner, result)) {
acquireFuture.updateIfEmpty(new Return<Boolean>(false));
}
}
@Override
public void onFailure(final Throwable cause) {
lockStateExecutor.submit(lockPath, new SafeRunnable() {
@Override
public void safeRun() {
result.setException(cause);
}
});
}
});
} else {
asyncTryLock(wait, result);
}
}
});
}
}, null);
}
final Promise<Boolean> waiterAcquireFuture = new Promise<Boolean>(new com.twitter.util.Function<Throwable, BoxedUnit>() {
@Override
public BoxedUnit apply(Throwable t) {
acquireFuture.raise(t);
return BoxedUnit.UNIT;
}
});
return result.map(new AbstractFunction1<String, LockWaiter>() {
@Override
public LockWaiter apply(final String currentOwner) {
final Exception acquireException = new OwnershipAcquireFailedException(lockPath, currentOwner);
FutureUtils.within(acquireFuture, timeout, unit, acquireException, lockStateExecutor, lockPath).addEventListener(new FutureEventListener<Boolean>() {
@Override
public void onSuccess(Boolean acquired) {
completeOrFail(acquireException);
}
@Override
public void onFailure(final Throwable acquireCause) {
completeOrFail(acquireException);
}
private void completeOrFail(final Throwable acquireCause) {
if (isLockHeld()) {
waiterAcquireFuture.setValue(true);
} else {
asyncUnlock().addEventListener(new FutureEventListener<BoxedUnit>() {
@Override
public void onSuccess(BoxedUnit value) {
waiterAcquireFuture.setException(acquireCause);
}
@Override
public void onFailure(Throwable cause) {
waiterAcquireFuture.setException(acquireCause);
}
});
}
}
});
;
return new LockWaiter(lockId.getLeft(), currentOwner, waiterAcquireFuture);
}
});
}
use of scala.runtime.BoxedUnit in project distributedlog by twitter.
the class ZKSessionLock method asyncUnlock.
Future<BoxedUnit> asyncUnlock(final Throwable cause) {
final Promise<BoxedUnit> promise = new Promise<BoxedUnit>();
// Use lock executor here rather than lock action, because we want this opertaion to be applied
// whether the epoch has changed or not. The member node is EPHEMERAL_SEQUENTIAL so there's no
// risk of an ABA problem where we delete and recreate a node and then delete it again here.
lockStateExecutor.submit(lockPath, new SafeRunnable() {
@Override
public void safeRun() {
acquireFuture.updateIfEmpty(new Throw<Boolean>(cause));
unlockInternal(promise);
promise.addEventListener(new OpStatsListener<BoxedUnit>(unlockStats));
}
});
return promise;
}
use of scala.runtime.BoxedUnit in project distributedlog by twitter.
the class MonitoredFuturePool method apply.
@Override
public <T> Future<T> apply(Function0<T> function0) {
if (traceTaskExecution) {
taskPendingCounter.inc();
Stopwatch taskEnqueueStopwatch = Stopwatch.createStarted();
Future<T> futureResult = futurePool.apply(new TimedFunction0<T>(function0));
taskEnqueueTime.registerSuccessfulEvent(taskEnqueueStopwatch.elapsed(TimeUnit.MICROSECONDS));
futureResult.ensure(new com.twitter.util.Function0<BoxedUnit>() {
@Override
public BoxedUnit apply() {
taskPendingCounter.dec();
return null;
}
});
return futureResult;
} else {
return futurePool.apply(function0);
}
}
use of scala.runtime.BoxedUnit in project distributedlog by twitter.
the class StreamImpl method scheduleTimeout.
void scheduleTimeout(final StreamOp op) {
final Timeout timeout = requestTimer.newTimeout(new TimerTask() {
@Override
public void run(Timeout timeout) throws Exception {
if (!timeout.isCancelled()) {
serviceTimeout.inc();
handleServiceTimeout("Operation " + op.getClass().getName() + " timeout");
}
}
}, serviceTimeoutMs, TimeUnit.MILLISECONDS);
op.responseHeader().ensure(new Function0<BoxedUnit>() {
@Override
public BoxedUnit apply() {
timeout.cancel();
return null;
}
});
}
Aggregations