Search in sources :

Example 1 with ReplicationEndpoint

use of org.apache.hadoop.hbase.replication.ReplicationEndpoint in project hbase by apache.

the class ReplicationSource method run.

@Override
public void run() {
    // mark we are running now
    this.sourceRunning = true;
    try {
        // start the endpoint, connect to the cluster
        Service.State state = replicationEndpoint.start().get();
        if (state != Service.State.RUNNING) {
            LOG.warn("ReplicationEndpoint was not started. Exiting");
            uninitialize();
            return;
        }
    } catch (Exception ex) {
        LOG.warn("Error starting ReplicationEndpoint, exiting", ex);
        throw new RuntimeException(ex);
    }
    // get the WALEntryFilter from ReplicationEndpoint and add it to default filters
    ArrayList<WALEntryFilter> filters = Lists.newArrayList((WALEntryFilter) new SystemTableWALEntryFilter());
    WALEntryFilter filterFromEndpoint = this.replicationEndpoint.getWALEntryfilter();
    if (filterFromEndpoint != null) {
        filters.add(filterFromEndpoint);
    }
    this.walEntryFilter = new ChainWALEntryFilter(filters);
    int sleepMultiplier = 1;
    // delay this until we are in an asynchronous thread
    while (this.isSourceActive() && this.peerClusterId == null) {
        this.peerClusterId = replicationEndpoint.getPeerUUID();
        if (this.isSourceActive() && this.peerClusterId == null) {
            if (sleepForRetries("Cannot contact the peer's zk ensemble", sleepMultiplier)) {
                sleepMultiplier++;
            }
        }
    }
    // peerClusterId value, which is the same as the source clusterId
    if (clusterId.equals(peerClusterId) && !replicationEndpoint.canReplicateToSameCluster()) {
        this.terminate("ClusterId " + clusterId + " is replicating to itself: peerClusterId " + peerClusterId + " which is not allowed by ReplicationEndpoint:" + replicationEndpoint.getClass().getName(), null, false);
        this.manager.closeQueue(this);
        return;
    }
    LOG.info("Replicating " + clusterId + " -> " + peerClusterId);
    // start workers
    for (Map.Entry<String, PriorityBlockingQueue<Path>> entry : queues.entrySet()) {
        String walGroupId = entry.getKey();
        PriorityBlockingQueue<Path> queue = entry.getValue();
        final ReplicationSourceShipperThread worker = new ReplicationSourceShipperThread(walGroupId, queue, replicationQueueInfo, this);
        ReplicationSourceShipperThread extant = workerThreads.putIfAbsent(walGroupId, worker);
        if (extant != null) {
            LOG.debug("Someone has beat us to start a worker thread for wal group " + walGroupId);
        } else {
            LOG.debug("Starting up worker for wal group " + walGroupId);
            worker.startup();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Service(com.google.common.util.concurrent.Service) ReplicationException(org.apache.hadoop.hbase.replication.ReplicationException) IOException(java.io.IOException) ReplicationEndpoint(org.apache.hadoop.hbase.replication.ReplicationEndpoint) PriorityBlockingQueue(java.util.concurrent.PriorityBlockingQueue) SystemTableWALEntryFilter(org.apache.hadoop.hbase.replication.SystemTableWALEntryFilter) ChainWALEntryFilter(org.apache.hadoop.hbase.replication.ChainWALEntryFilter) SystemTableWALEntryFilter(org.apache.hadoop.hbase.replication.SystemTableWALEntryFilter) WALEntryFilter(org.apache.hadoop.hbase.replication.WALEntryFilter) ChainWALEntryFilter(org.apache.hadoop.hbase.replication.ChainWALEntryFilter) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap)

Example 2 with ReplicationEndpoint

use of org.apache.hadoop.hbase.replication.ReplicationEndpoint in project hbase by apache.

the class ReplicationSourceManager method getReplicationSource.

/**
   * Factory method to create a replication source
   * @param conf the configuration to use
   * @param fs the file system to use
   * @param manager the manager to use
   * @param server the server object for this region server
   * @param peerId the id of the peer cluster
   * @return the created source
   * @throws IOException
   */
protected ReplicationSourceInterface getReplicationSource(final Configuration conf, final FileSystem fs, final ReplicationSourceManager manager, final ReplicationQueues replicationQueues, final ReplicationPeers replicationPeers, final Server server, final String peerId, final UUID clusterId, final ReplicationPeerConfig peerConfig, final ReplicationPeer replicationPeer) throws IOException {
    RegionServerCoprocessorHost rsServerHost = null;
    TableDescriptors tableDescriptors = null;
    if (server instanceof HRegionServer) {
        rsServerHost = ((HRegionServer) server).getRegionServerCoprocessorHost();
        tableDescriptors = ((HRegionServer) server).getTableDescriptors();
    }
    ReplicationSourceInterface src;
    try {
        @SuppressWarnings("rawtypes") Class c = Class.forName(conf.get("replication.replicationsource.implementation", ReplicationSource.class.getCanonicalName()));
        src = (ReplicationSourceInterface) c.newInstance();
    } catch (Exception e) {
        LOG.warn("Passed replication source implementation throws errors, " + "defaulting to ReplicationSource", e);
        src = new ReplicationSource();
    }
    ReplicationEndpoint replicationEndpoint = null;
    try {
        String replicationEndpointImpl = peerConfig.getReplicationEndpointImpl();
        if (replicationEndpointImpl == null) {
            // Default to HBase inter-cluster replication endpoint
            replicationEndpointImpl = HBaseInterClusterReplicationEndpoint.class.getName();
        }
        @SuppressWarnings("rawtypes") Class c = Class.forName(replicationEndpointImpl);
        replicationEndpoint = (ReplicationEndpoint) c.newInstance();
        if (rsServerHost != null) {
            ReplicationEndpoint newReplicationEndPoint = rsServerHost.postCreateReplicationEndPoint(replicationEndpoint);
            if (newReplicationEndPoint != null) {
                // Override the newly created endpoint from the hook with configured end point
                replicationEndpoint = newReplicationEndPoint;
            }
        }
    } catch (Exception e) {
        LOG.warn("Passed replication endpoint implementation throws errors" + " while initializing ReplicationSource for peer: " + peerId, e);
        throw new IOException(e);
    }
    MetricsSource metrics = new MetricsSource(peerId);
    // init replication source
    src.init(conf, fs, manager, replicationQueues, replicationPeers, server, peerId, clusterId, replicationEndpoint, metrics);
    // init replication endpoint
    replicationEndpoint.init(new ReplicationEndpoint.Context(replicationPeer.getConfiguration(), fs, peerId, clusterId, replicationPeer, metrics, tableDescriptors, server));
    return src;
}
Also used : IOException(java.io.IOException) ReplicationException(org.apache.hadoop.hbase.replication.ReplicationException) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) IOException(java.io.IOException) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) RegionServerCoprocessorHost(org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost) ReplicationEndpoint(org.apache.hadoop.hbase.replication.ReplicationEndpoint) TableDescriptors(org.apache.hadoop.hbase.TableDescriptors)

Aggregations

IOException (java.io.IOException)2 ReplicationEndpoint (org.apache.hadoop.hbase.replication.ReplicationEndpoint)2 ReplicationException (org.apache.hadoop.hbase.replication.ReplicationException)2 Service (com.google.common.util.concurrent.Service)1 HashMap (java.util.HashMap)1 Map (java.util.Map)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1 PriorityBlockingQueue (java.util.concurrent.PriorityBlockingQueue)1 RejectedExecutionException (java.util.concurrent.RejectedExecutionException)1 Path (org.apache.hadoop.fs.Path)1 TableDescriptors (org.apache.hadoop.hbase.TableDescriptors)1 HRegionServer (org.apache.hadoop.hbase.regionserver.HRegionServer)1 RegionServerCoprocessorHost (org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost)1 ChainWALEntryFilter (org.apache.hadoop.hbase.replication.ChainWALEntryFilter)1 SystemTableWALEntryFilter (org.apache.hadoop.hbase.replication.SystemTableWALEntryFilter)1 WALEntryFilter (org.apache.hadoop.hbase.replication.WALEntryFilter)1