use of org.apache.hadoop.conf.Configured in project hbase by apache.
the class DumpReplicationQueues method dumpReplicationQueues.
private int dumpReplicationQueues(DumpOptions opts) throws Exception {
Configuration conf = getConf();
HBaseAdmin.available(conf);
ClusterConnection connection = (ClusterConnection) ConnectionFactory.createConnection(conf);
Admin admin = connection.getAdmin();
ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "DumpReplicationQueues" + System.currentTimeMillis(), new WarnOnlyAbortable(), true);
try {
// Our zk watcher
LOG.info("Our Quorum: " + zkw.getQuorum());
List<TableCFs> replicatedTableCFs = admin.listReplicatedTableCFs();
if (replicatedTableCFs.isEmpty()) {
LOG.info("No tables with a configured replication peer were found.");
return (0);
} else {
LOG.info("Replicated Tables: " + replicatedTableCFs);
}
List<ReplicationPeerDescription> peers = admin.listReplicationPeers();
if (peers.isEmpty()) {
LOG.info("Replication is enabled but no peer configuration was found.");
}
System.out.println("Dumping replication peers and configurations:");
System.out.println(dumpPeersState(peers));
if (opts.isDistributed()) {
LOG.info("Found [--distributed], will poll each RegionServer.");
Set<String> peerIds = peers.stream().map((peer) -> peer.getPeerId()).collect(Collectors.toSet());
System.out.println(dumpQueues(connection, zkw, peerIds, opts.isHdfs()));
System.out.println(dumpReplicationSummary());
} else {
// use ZK instead
System.out.print("Dumping replication znodes via ZooKeeper:");
System.out.println(ZKUtil.getReplicationZnodesDump(zkw));
}
return (0);
} catch (IOException e) {
return (-1);
} finally {
zkw.close();
}
}
use of org.apache.hadoop.conf.Configured in project pinot by linkedin.
the class ThirdEyeJob method run.
@SuppressWarnings("unchecked")
public void run() throws Exception {
LOGGER.info("Input config:{}", inputConfig);
PhaseSpec phaseSpec;
try {
phaseSpec = PhaseSpec.valueOf(phaseName.toUpperCase());
} catch (Exception e) {
usage();
throw e;
}
if (PhaseSpec.TRANSFORM.equals(phaseSpec)) {
TransformPhaseJob job = new TransformPhaseJob("Transform Job", inputConfig);
job.run();
return;
} else if (PhaseSpec.JOIN.equals(phaseSpec)) {
JoinPhaseJob job = new JoinPhaseJob("Join Job", inputConfig);
job.run();
return;
} else if (PhaseSpec.WAIT.equals(phaseSpec)) {
WaitPhaseJob job = new WaitPhaseJob("Wait for inputs", inputConfig);
job.run();
return;
}
// Get root, collection, input paths
String root = getAndCheck(ThirdEyeJobProperties.THIRDEYE_ROOT.getName(), inputConfig);
String collection = getAndCheck(ThirdEyeJobProperties.THIRDEYE_COLLECTION.getName(), inputConfig);
String inputPaths = getAndCheck(ThirdEyeJobProperties.INPUT_PATHS.getName(), inputConfig);
// Get min / max time
DateTime minTime;
DateTime maxTime;
String minTimeProp = inputConfig.getProperty(ThirdEyeJobProperties.THIRDEYE_TIME_MIN.getName());
String maxTimeProp = inputConfig.getProperty(ThirdEyeJobProperties.THIRDEYE_TIME_MAX.getName());
minTime = ISODateTimeFormat.dateTimeParser().parseDateTime(minTimeProp);
maxTime = ISODateTimeFormat.dateTimeParser().parseDateTime(maxTimeProp);
Properties jobProperties = phaseSpec.getJobProperties(inputConfig, root, collection, minTime, maxTime, inputPaths);
for (Object key : inputConfig.keySet()) {
jobProperties.setProperty(key.toString(), inputConfig.getProperty(key.toString()));
}
// Instantiate the job
Constructor<Configured> constructor = (Constructor<Configured>) phaseSpec.getKlazz().getConstructor(String.class, Properties.class);
Configured instance = constructor.newInstance(phaseSpec.getName(), jobProperties);
setMapreduceConfig(instance.getConf());
// Run the job
Method runMethod = instance.getClass().getMethod("run");
Job job = (Job) runMethod.invoke(instance);
if (job != null) {
JobStatus status = job.getStatus();
if (status.getState() != JobStatus.State.SUCCEEDED) {
throw new RuntimeException("Job " + job.getJobName() + " failed to execute: Ran with config:" + jobProperties);
}
}
}
Aggregations