use of org.apache.storm.hive.common.HiveWriter in project storm by apache.
the class HiveState method prepare.
public void prepare(Map conf, IMetricsContext metrics, int partitionIndex, int numPartitions) {
try {
if (options.getKerberosPrincipal() == null && options.getKerberosKeytab() == null) {
kerberosEnabled = false;
} else if (options.getKerberosPrincipal() != null && options.getKerberosKeytab() != null) {
kerberosEnabled = true;
} else {
throw new IllegalArgumentException("To enable Kerberos, need to set both KerberosPrincipal " + " & KerberosKeytab");
}
if (kerberosEnabled) {
try {
ugi = HiveUtils.authenticate(options.getKerberosKeytab(), options.getKerberosPrincipal());
} catch (HiveUtils.AuthenticationFailed ex) {
LOG.error("Hive kerberos authentication failed " + ex.getMessage(), ex);
throw new IllegalArgumentException(ex);
}
}
allWriters = new ConcurrentHashMap<HiveEndPoint, HiveWriter>();
String timeoutName = "hive-bolt-%d";
this.callTimeoutPool = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder().setNameFormat(timeoutName).build());
heartBeatTimer = new Timer();
setupHeartBeatTimer();
} catch (Exception e) {
LOG.warn("unable to make connection to hive ", e);
}
}
use of org.apache.storm.hive.common.HiveWriter in project storm by apache.
the class HiveBolt method execute.
@Override
public void execute(Tuple tuple) {
try {
if (batchHelper.shouldHandle(tuple)) {
List<String> partitionVals = options.getMapper().mapPartitions(tuple);
HiveEndPoint endPoint = HiveUtils.makeEndPoint(partitionVals, options);
HiveWriter writer = getOrCreateWriter(endPoint);
writer.write(options.getMapper().mapRecord(tuple));
batchHelper.addBatch(tuple);
}
if (batchHelper.shouldFlush()) {
flushAllWriters(true);
LOG.info("acknowledging tuples after writers flushed ");
batchHelper.ack();
}
if (TupleUtils.isTick(tuple)) {
retireIdleWriters();
}
} catch (SerializationError se) {
LOG.info("Serialization exception occurred, tuple is acknowledged but not written to Hive.", tuple);
this.collector.reportError(se);
collector.ack(tuple);
} catch (Exception e) {
batchHelper.fail(e);
abortAndCloseWriters();
}
}
use of org.apache.storm.hive.common.HiveWriter in project storm by apache.
the class HiveBolt method retire.
private void retire(HiveEndPoint ep) {
try {
HiveWriter writer = allWriters.remove(ep);
if (writer != null) {
LOG.info("Closing idle Writer to Hive end point : {}", ep);
writer.flushAndClose();
}
} catch (IOException e) {
LOG.warn("Failed to close writer for end point: {}. Error: " + ep, e);
} catch (InterruptedException e) {
LOG.warn("Interrupted when attempting to close writer for end point: " + ep, e);
Thread.currentThread().interrupt();
} catch (Exception e) {
LOG.warn("Interrupted when attempting to close writer for end point: " + ep, e);
}
}
use of org.apache.storm.hive.common.HiveWriter in project storm by apache.
the class HiveState method cleanup.
public void cleanup() {
for (Entry<HiveEndPoint, HiveWriter> entry : allWriters.entrySet()) {
try {
sendHeartBeat = false;
HiveWriter w = entry.getValue();
LOG.info("Flushing writer to {}", w);
w.flush(false);
LOG.info("Closing writer to {}", w);
w.close();
} catch (Exception ex) {
LOG.warn("Error while closing writer to " + entry.getKey() + ". Exception follows.", ex);
if (ex instanceof InterruptedException) {
Thread.currentThread().interrupt();
}
}
}
ExecutorService[] toShutdown = { callTimeoutPool };
for (ExecutorService execService : toShutdown) {
execService.shutdown();
try {
while (!execService.isTerminated()) {
execService.awaitTermination(options.getCallTimeOut(), TimeUnit.MILLISECONDS);
}
} catch (InterruptedException ex) {
LOG.warn("shutdown interrupted on " + execService, ex);
}
}
heartBeatTimer.cancel();
callTimeoutPool = null;
}
use of org.apache.storm.hive.common.HiveWriter in project storm by apache.
the class HiveState method getOrCreateWriter.
private HiveWriter getOrCreateWriter(HiveEndPoint endPoint) throws HiveWriter.ConnectFailure, InterruptedException {
try {
HiveWriter writer = allWriters.get(endPoint);
if (writer == null) {
LOG.info("Creating Writer to Hive end point : " + endPoint);
writer = HiveUtils.makeHiveWriter(endPoint, callTimeoutPool, ugi, options, tokenAuthEnabled);
if (allWriters.size() > (options.getMaxOpenConnections() - 1)) {
int retired = retireIdleWriters();
if (retired == 0) {
retireEldestWriter();
}
}
allWriters.put(endPoint, writer);
}
return writer;
} catch (HiveWriter.ConnectFailure e) {
LOG.error("Failed to create HiveWriter for endpoint: " + endPoint, e);
throw e;
}
}
Aggregations