use of com.alibaba.jstorm.utils.Pair in project jstorm by alibaba.
the class SyncProcessEvent method checkNewWorkers.
/**
* check new workers if the time is not > * SUPERVISOR_WORKER_START_TIMEOUT_SECS, otherwise info failed
*/
public void checkNewWorkers(Map conf) throws IOException, InterruptedException {
Set<String> workers = new HashSet<>();
for (Entry<String, Pair<Integer, Integer>> entry : workerIdToStartTimeAndPort.entrySet()) {
String workerId = entry.getKey();
int startTime = entry.getValue().getFirst();
LocalState ls = StormConfig.worker_state(conf, workerId);
WorkerHeartbeat whb = (WorkerHeartbeat) ls.get(Common.LS_WORKER_HEARTBEAT);
if (whb == null) {
if ((TimeUtils.current_time_secs() - startTime) < JStormUtils.parseInt(conf.get(Config.SUPERVISOR_WORKER_START_TIMEOUT_SECS))) {
LOG.info(workerId + " still hasn't started");
} else {
LOG.error("Failed to start Worker " + workerId);
workers.add(workerId);
}
} else {
LOG.info("Successfully start worker " + workerId);
workers.add(workerId);
}
}
for (String workerId : workers) {
Integer port = this.workerIdToStartTimeAndPort.get(workerId).getSecond();
this.workerIdToStartTimeAndPort.remove(workerId);
this.portToWorkerId.remove(port);
}
}
use of com.alibaba.jstorm.utils.Pair in project jstorm by alibaba.
the class RefreshEvent method syncMetaFromRemote.
private void syncMetaFromRemote(String topologyId, TopologyMetricContext tmContext) {
try {
int memSize = tmContext.getMemMeta().size();
//Integer zkSize = (Integer) context.getStormClusterState().get_topology_metric(topologyId);
Set<String> added = new HashSet<>();
List<Pair<MetricMeta, Long>> pairsToCheck = new ArrayList<>();
ConcurrentMap<String, Long> memMeta = tmContext.getMemMeta();
for (MetaType metaType : MetaType.values()) {
List<MetricMeta> metaList = context.getMetricQueryClient().getMetricMeta(context.getClusterName(), topologyId, metaType);
if (metaList != null) {
LOG.debug("get remote metric meta, topology:{}, metaType:{}, local mem:{}, remote:{}", topologyId, metaType, memSize, metaList.size());
for (MetricMeta meta : metaList) {
String fqn = meta.getFQN();
if (added.contains(fqn)) {
Long existingId = memMeta.get(fqn);
if (existingId != null && existingId != meta.getId()) {
LOG.warn("duplicate remote metric meta:{}, will double-check...", fqn);
pairsToCheck.add(new Pair<>(meta, existingId));
}
} else {
// force remote to overwrite local meta
LOG.debug("overwrite local from remote:{}", fqn);
added.add(fqn);
memMeta.put(fqn, meta.getId());
}
}
}
}
context.getMetricCache().putMeta(topologyId, memMeta);
if (pairsToCheck.size() > 0) {
CheckMetricEvent.pushEvent(topologyId, tmContext, pairsToCheck);
}
} catch (Exception ex) {
LOG.error("failed to sync remote meta", ex);
}
}
use of com.alibaba.jstorm.utils.Pair in project jstorm by alibaba.
the class BoltBatchCollector method sendBatch.
public List<Integer> sendBatch(String outStreamId, String outTaskId, List<MsgInfo> batchTobeFlushed, boolean isFlush) {
final long start = emitTimer.getTime();
try {
Map<Object, List<MsgInfo>> outTasks = null;
if (outTaskId != null) {
outTasks = sendTargets.getBatch(Integer.valueOf(outTaskId), outStreamId, batchTobeFlushed);
} else {
outTasks = sendTargets.getBatch(outStreamId, batchTobeFlushed);
}
if (outTasks == null || outTasks.size() == 0) {
} else {
for (Map.Entry<Object, List<MsgInfo>> entry : outTasks.entrySet()) {
Object target = entry.getKey();
List<Integer> tasks = (target instanceof Integer) ? JStormUtils.mk_list((Integer) target) : ((List<Integer>) target);
List<MsgInfo> batch = entry.getValue();
for (Integer t : tasks) {
List<Object> batchValues = new ArrayList<Object>();
for (MsgInfo msg : batch) {
BoltMsgInfo msgInfo = (BoltMsgInfo) msg;
Pair<MessageId, List<Object>> pair = new Pair<MessageId, List<Object>>(getMessageId(msgInfo.anchors), msgInfo.values);
batchValues.add(pair);
}
TupleImplExt batchTuple = new TupleImplExt(topologyContext, batchValues, task_id, outStreamId, null);
batchTuple.setTargetTaskId(t);
batchTuple.setBatchTuple(true);
taskTransfer.transfer(batchTuple);
}
for (MsgInfo msg : batch) {
if (msg.callback != null) {
msg.callback.execute(outStreamId, tasks, msg.values);
}
}
}
}
for (MsgInfo msg : batchTobeFlushed) {
Collection<Tuple> anchors = ((BoltMsgInfo) msg).anchors;
if (anchors != null && anchors.size() > 0) {
for (Tuple a : anchors) {
synchronized (pendingTuples) {
Integer pendingCount = pendingTuples.get(a);
if (pendingCount != null) {
if (--pendingCount <= 0) {
pendingTuples.remove(a);
} else {
pendingTuples.put(a, pendingCount);
}
}
}
}
}
}
return null;
} catch (Exception e) {
LOG.error("bolt emit", e);
} finally {
emitTimer.updateTime(start);
}
return new ArrayList<Integer>();
}
use of com.alibaba.jstorm.utils.Pair in project jstorm by alibaba.
the class BoltExecutors method processTupleBatchEvent.
private void processTupleBatchEvent(Tuple tuple) {
try {
if ((!isSystemBolt && tuple.getSourceStreamId().equals(Common.TOPOLOGY_MASTER_CONTROL_STREAM_ID)) || tuple.getSourceStreamId().equals(Common.TOPOLOGY_MASTER_REGISTER_METRICS_RESP_STREAM_ID)) {
if (tuple.getValues().get(0) instanceof Pair) {
for (Object value : tuple.getValues()) {
Pair<MessageId, List<Object>> val = (Pair<MessageId, List<Object>>) value;
TupleImplExt t = new TupleImplExt(sysTopologyCtx, val.getSecond(), val.getFirst(), ((TupleImplExt) tuple));
processTupleEvent(t);
}
}
} else {
bolt.execute(tuple);
}
} catch (Throwable e) {
error = e;
LOG.error("bolt execute error ", e);
report_error.report(e);
}
}
use of com.alibaba.jstorm.utils.Pair in project jstorm by alibaba.
the class BoltExecutors method onEvent.
@Override
public void onEvent(Object event, long sequence, boolean endOfBatch) throws Exception {
if (event == null) {
return;
}
long start = System.currentTimeMillis();
try {
if (event instanceof Tuple) {
Tuple tuple = (Tuple) event;
int tupleNum = 1;
Long startTime = System.currentTimeMillis();
long lifeCycleStart = ((TupleExt) tuple).getCreationTimeStamp();
task_stats.tupleLifeCycle(tuple.getSourceComponent(), tuple.getSourceStreamId(), lifeCycleStart, startTime);
if (((TupleExt) tuple).isBatchTuple()) {
List<Object> values = ((Tuple) event).getValues();
tupleNum = values.size();
if (bolt instanceof IRichBatchBolt) {
processControlEvent();
processTupleBatchEvent(tuple);
} else {
for (Object value : values) {
Pair<MessageId, List<Object>> val = (Pair<MessageId, List<Object>>) value;
TupleImplExt t = new TupleImplExt(sysTopologyCtx, val.getSecond(), val.getFirst(), ((TupleImplExt) event));
processControlEvent();
processTupleEvent(t);
}
}
} else {
processTupleEvent(tuple);
}
task_stats.recv_tuple(tuple.getSourceComponent(), tuple.getSourceStreamId(), tupleNum);
if (ackerNum == 0) {
// get tuple process latency
if (JStormMetrics.enabled) {
long endTime = System.currentTimeMillis();
task_stats.update_bolt_acked_latency(tuple.getSourceComponent(), tuple.getSourceStreamId(), startTime, endTime, tupleNum);
}
}
} else if (event instanceof TimerTrigger.TimerEvent) {
processTimerEvent((TimerTrigger.TimerEvent) event);
} else {
LOG.warn("Bolt executor received unknown message");
}
} finally {
if (JStormMetrics.enabled) {
exeTime = System.currentTimeMillis() - start;
}
}
}
Aggregations