Search in sources :

Example 1 with Configured

use of org.apache.hadoop.conf.Configured in project hbase by apache.

the class DumpReplicationQueues method dumpReplicationQueues.

private int dumpReplicationQueues(DumpOptions opts) throws Exception {
    Configuration conf = getConf();
    HBaseAdmin.available(conf);
    ClusterConnection connection = (ClusterConnection) ConnectionFactory.createConnection(conf);
    Admin admin = connection.getAdmin();
    ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "DumpReplicationQueues" + System.currentTimeMillis(), new WarnOnlyAbortable(), true);
    try {
        // Our zk watcher
        LOG.info("Our Quorum: " + zkw.getQuorum());
        List<TableCFs> replicatedTableCFs = admin.listReplicatedTableCFs();
        if (replicatedTableCFs.isEmpty()) {
            LOG.info("No tables with a configured replication peer were found.");
            return (0);
        } else {
            LOG.info("Replicated Tables: " + replicatedTableCFs);
        }
        List<ReplicationPeerDescription> peers = admin.listReplicationPeers();
        if (peers.isEmpty()) {
            LOG.info("Replication is enabled but no peer configuration was found.");
        }
        System.out.println("Dumping replication peers and configurations:");
        System.out.println(dumpPeersState(peers));
        if (opts.isDistributed()) {
            LOG.info("Found [--distributed], will poll each RegionServer.");
            Set<String> peerIds = peers.stream().map((peer) -> peer.getPeerId()).collect(Collectors.toSet());
            System.out.println(dumpQueues(connection, zkw, peerIds, opts.isHdfs()));
            System.out.println(dumpReplicationSummary());
        } else {
            // use ZK instead
            System.out.print("Dumping replication znodes via ZooKeeper:");
            System.out.println(ZKUtil.getReplicationZnodesDump(zkw));
        }
        return (0);
    } catch (IOException e) {
        return (-1);
    } finally {
        zkw.close();
    }
}
Also used : StringUtils(org.apache.hadoop.hbase.procedure2.util.StringUtils) ReplicationTracker(org.apache.hadoop.hbase.replication.ReplicationTracker) Arrays(java.util.Arrays) FileSystem(org.apache.hadoop.fs.FileSystem) ReplicationFactory(org.apache.hadoop.hbase.replication.ReplicationFactory) FileStatus(org.apache.hadoop.fs.FileStatus) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) ArrayList(java.util.ArrayList) WALLink(org.apache.hadoop.hbase.io.WALLink) ReplicationQueueInfo(org.apache.hadoop.hbase.replication.ReplicationQueueInfo) Configured(org.apache.hadoop.conf.Configured) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) TableCFs(org.apache.hadoop.hbase.client.replication.TableCFs) AtomicLongMap(com.google.common.util.concurrent.AtomicLongMap) LinkedList(java.util.LinkedList) KeeperException(org.apache.zookeeper.KeeperException) ZKUtil(org.apache.hadoop.hbase.zookeeper.ZKUtil) Abortable(org.apache.hadoop.hbase.Abortable) ReplicationQueuesClientArguments(org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments) ToolRunner(org.apache.hadoop.util.ToolRunner) Set(java.util.Set) ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) IOException(java.io.IOException) ReplicationPeers(org.apache.hadoop.hbase.replication.ReplicationPeers) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) Collectors(java.util.stream.Collectors) Stoppable(org.apache.hadoop.hbase.Stoppable) FileNotFoundException(java.io.FileNotFoundException) ConnectionFactory(org.apache.hadoop.hbase.client.ConnectionFactory) Tool(org.apache.hadoop.util.Tool) List(java.util.List) ReplicationQueuesClient(org.apache.hadoop.hbase.replication.ReplicationQueuesClient) Admin(org.apache.hadoop.hbase.client.Admin) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) ReplicationPeerDescription(org.apache.hadoop.hbase.replication.ReplicationPeerDescription) Log(org.apache.commons.logging.Log) Queue(java.util.Queue) LogFactory(org.apache.commons.logging.LogFactory) ReplicationQueues(org.apache.hadoop.hbase.replication.ReplicationQueues) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) IOException(java.io.IOException) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) Admin(org.apache.hadoop.hbase.client.Admin) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) TableCFs(org.apache.hadoop.hbase.client.replication.TableCFs) ReplicationPeerDescription(org.apache.hadoop.hbase.replication.ReplicationPeerDescription)

Example 2 with Configured

use of org.apache.hadoop.conf.Configured in project pinot by linkedin.

the class ThirdEyeJob method run.

@SuppressWarnings("unchecked")
public void run() throws Exception {
    LOGGER.info("Input config:{}", inputConfig);
    PhaseSpec phaseSpec;
    try {
        phaseSpec = PhaseSpec.valueOf(phaseName.toUpperCase());
    } catch (Exception e) {
        usage();
        throw e;
    }
    if (PhaseSpec.TRANSFORM.equals(phaseSpec)) {
        TransformPhaseJob job = new TransformPhaseJob("Transform Job", inputConfig);
        job.run();
        return;
    } else if (PhaseSpec.JOIN.equals(phaseSpec)) {
        JoinPhaseJob job = new JoinPhaseJob("Join Job", inputConfig);
        job.run();
        return;
    } else if (PhaseSpec.WAIT.equals(phaseSpec)) {
        WaitPhaseJob job = new WaitPhaseJob("Wait for inputs", inputConfig);
        job.run();
        return;
    }
    // Get root, collection, input paths
    String root = getAndCheck(ThirdEyeJobProperties.THIRDEYE_ROOT.getName(), inputConfig);
    String collection = getAndCheck(ThirdEyeJobProperties.THIRDEYE_COLLECTION.getName(), inputConfig);
    String inputPaths = getAndCheck(ThirdEyeJobProperties.INPUT_PATHS.getName(), inputConfig);
    // Get min / max time
    DateTime minTime;
    DateTime maxTime;
    String minTimeProp = inputConfig.getProperty(ThirdEyeJobProperties.THIRDEYE_TIME_MIN.getName());
    String maxTimeProp = inputConfig.getProperty(ThirdEyeJobProperties.THIRDEYE_TIME_MAX.getName());
    minTime = ISODateTimeFormat.dateTimeParser().parseDateTime(minTimeProp);
    maxTime = ISODateTimeFormat.dateTimeParser().parseDateTime(maxTimeProp);
    Properties jobProperties = phaseSpec.getJobProperties(inputConfig, root, collection, minTime, maxTime, inputPaths);
    for (Object key : inputConfig.keySet()) {
        jobProperties.setProperty(key.toString(), inputConfig.getProperty(key.toString()));
    }
    // Instantiate the job
    Constructor<Configured> constructor = (Constructor<Configured>) phaseSpec.getKlazz().getConstructor(String.class, Properties.class);
    Configured instance = constructor.newInstance(phaseSpec.getName(), jobProperties);
    setMapreduceConfig(instance.getConf());
    // Run the job
    Method runMethod = instance.getClass().getMethod("run");
    Job job = (Job) runMethod.invoke(instance);
    if (job != null) {
        JobStatus status = job.getStatus();
        if (status.getState() != JobStatus.State.SUCCEEDED) {
            throw new RuntimeException("Job " + job.getJobName() + " failed to execute: Ran with config:" + jobProperties);
        }
    }
}
Also used : Constructor(java.lang.reflect.Constructor) Method(java.lang.reflect.Method) Properties(java.util.Properties) IOException(java.io.IOException) DateTime(org.joda.time.DateTime) Configured(org.apache.hadoop.conf.Configured) WaitPhaseJob(com.linkedin.thirdeye.hadoop.wait.WaitPhaseJob) JobStatus(org.apache.hadoop.mapreduce.JobStatus) JoinPhaseJob(com.linkedin.thirdeye.hadoop.join.JoinPhaseJob) TransformPhaseJob(com.linkedin.thirdeye.hadoop.transform.TransformPhaseJob) DerivedColumnTransformationPhaseJob(com.linkedin.thirdeye.hadoop.derivedcolumn.transformation.DerivedColumnTransformationPhaseJob) SegmentCreationPhaseJob(com.linkedin.thirdeye.hadoop.segment.creation.SegmentCreationPhaseJob) AggregationPhaseJob(com.linkedin.thirdeye.hadoop.aggregation.AggregationPhaseJob) TransformPhaseJob(com.linkedin.thirdeye.hadoop.transform.TransformPhaseJob) JoinPhaseJob(com.linkedin.thirdeye.hadoop.join.JoinPhaseJob) TopKPhaseJob(com.linkedin.thirdeye.hadoop.topk.TopKPhaseJob) WaitPhaseJob(com.linkedin.thirdeye.hadoop.wait.WaitPhaseJob) Job(org.apache.hadoop.mapreduce.Job) BackfillPhaseJob(com.linkedin.thirdeye.hadoop.backfill.BackfillPhaseJob)

Aggregations

IOException (java.io.IOException)2 Configured (org.apache.hadoop.conf.Configured)2 AtomicLongMap (com.google.common.util.concurrent.AtomicLongMap)1 AggregationPhaseJob (com.linkedin.thirdeye.hadoop.aggregation.AggregationPhaseJob)1 BackfillPhaseJob (com.linkedin.thirdeye.hadoop.backfill.BackfillPhaseJob)1 DerivedColumnTransformationPhaseJob (com.linkedin.thirdeye.hadoop.derivedcolumn.transformation.DerivedColumnTransformationPhaseJob)1 JoinPhaseJob (com.linkedin.thirdeye.hadoop.join.JoinPhaseJob)1 SegmentCreationPhaseJob (com.linkedin.thirdeye.hadoop.segment.creation.SegmentCreationPhaseJob)1 TopKPhaseJob (com.linkedin.thirdeye.hadoop.topk.TopKPhaseJob)1 TransformPhaseJob (com.linkedin.thirdeye.hadoop.transform.TransformPhaseJob)1 WaitPhaseJob (com.linkedin.thirdeye.hadoop.wait.WaitPhaseJob)1 FileNotFoundException (java.io.FileNotFoundException)1 Constructor (java.lang.reflect.Constructor)1 Method (java.lang.reflect.Method)1 ArrayList (java.util.ArrayList)1 Arrays (java.util.Arrays)1 LinkedList (java.util.LinkedList)1 List (java.util.List)1 Map (java.util.Map)1 Properties (java.util.Properties)1