Search in sources :

Example 91 with VisibleForTesting

use of com.google.common.annotations.VisibleForTesting in project hbase by apache.

the class HRegionServer method tryRegionServerReport.

@VisibleForTesting
protected void tryRegionServerReport(long reportStartTime, long reportEndTime) throws IOException {
    RegionServerStatusService.BlockingInterface rss = rssStub;
    if (rss == null) {
        // the current server could be stopping.
        return;
    }
    ClusterStatusProtos.ServerLoad sl = buildServerLoad(reportStartTime, reportEndTime);
    try {
        RegionServerReportRequest.Builder request = RegionServerReportRequest.newBuilder();
        ServerName sn = ServerName.parseVersionedServerName(this.serverName.getVersionedBytes());
        request.setServer(ProtobufUtil.toServerName(sn));
        request.setLoad(sl);
        rss.regionServerReport(null, request.build());
    } catch (ServiceException se) {
        IOException ioe = ProtobufUtil.getRemoteException(se);
        if (ioe instanceof YouAreDeadException) {
            // This will be caught and handled as a fatal error in run()
            throw ioe;
        }
        if (rssStub == rss) {
            rssStub = null;
        }
        // Couldn't connect to the master, get location from zk and reconnect
        // Method blocks until new master is found or we are stopped
        createRegionServerStatusStub(true);
    }
}
Also used : ClusterStatusProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos) RegionServerReportRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) ServerName(org.apache.hadoop.hbase.ServerName) RegionServerStatusService(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService) YouAreDeadException(org.apache.hadoop.hbase.YouAreDeadException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 92 with VisibleForTesting

use of com.google.common.annotations.VisibleForTesting in project hbase by apache.

the class HRegionServer method createRegionServerStatusStub.

/**
   * Get the current master from ZooKeeper and open the RPC connection to it. To get a fresh
   * connection, the current rssStub must be null. Method will block until a master is available.
   * You can break from this block by requesting the server stop.
   * @param refresh If true then master address will be read from ZK, otherwise use cached data
   * @return master + port, or null if server has been stopped
   */
@VisibleForTesting
protected synchronized ServerName createRegionServerStatusStub(boolean refresh) {
    if (rssStub != null) {
        return masterAddressTracker.getMasterAddress();
    }
    ServerName sn = null;
    long previousLogTime = 0;
    RegionServerStatusService.BlockingInterface intRssStub = null;
    LockService.BlockingInterface intLockStub = null;
    boolean interrupted = false;
    try {
        while (keepLooping()) {
            sn = this.masterAddressTracker.getMasterAddress(refresh);
            if (sn == null) {
                if (!keepLooping()) {
                    // give up with no connection.
                    LOG.debug("No master found and cluster is stopped; bailing out");
                    return null;
                }
                if (System.currentTimeMillis() > (previousLogTime + 1000)) {
                    LOG.debug("No master found; retry");
                    previousLogTime = System.currentTimeMillis();
                }
                // let's try pull it from ZK directly
                refresh = true;
                if (sleep(200)) {
                    interrupted = true;
                }
                continue;
            }
            // If we are on the active master, use the shortcut
            if (this instanceof HMaster && sn.equals(getServerName())) {
                intRssStub = ((HMaster) this).getMasterRpcServices();
                intLockStub = ((HMaster) this).getMasterRpcServices();
                break;
            }
            try {
                BlockingRpcChannel channel = this.rpcClient.createBlockingRpcChannel(sn, userProvider.getCurrent(), shortOperationTimeout);
                intRssStub = RegionServerStatusService.newBlockingStub(channel);
                intLockStub = LockService.newBlockingStub(channel);
                break;
            } catch (IOException e) {
                if (System.currentTimeMillis() > (previousLogTime + 1000)) {
                    e = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e;
                    if (e instanceof ServerNotRunningYetException) {
                        LOG.info("Master isn't available yet, retrying");
                    } else {
                        LOG.warn("Unable to connect to master. Retrying. Error was:", e);
                    }
                    previousLogTime = System.currentTimeMillis();
                }
                if (sleep(200)) {
                    interrupted = true;
                }
            }
        }
    } finally {
        if (interrupted) {
            Thread.currentThread().interrupt();
        }
    }
    this.rssStub = intRssStub;
    this.lockStub = intLockStub;
    return sn;
}
Also used : LockService(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService) ServerName(org.apache.hadoop.hbase.ServerName) HMaster(org.apache.hadoop.hbase.master.HMaster) RegionServerStatusService(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService) BlockingRpcChannel(org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingRpcChannel) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) ServerNotRunningYetException(org.apache.hadoop.hbase.ipc.ServerNotRunningYetException) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 93 with VisibleForTesting

use of com.google.common.annotations.VisibleForTesting in project hbase by apache.

the class HRegion method compactStore.

/**
   * This is a helper function that compact the given store
   * It is used by utilities and testing
   *
   * @throws IOException e
   */
@VisibleForTesting
void compactStore(byte[] family, ThroughputController throughputController) throws IOException {
    Store s = getStore(family);
    CompactionContext compaction = s.requestCompaction();
    if (compaction != null) {
        compact(compaction, s, throughputController, null);
    }
}
Also used : CompactionContext(org.apache.hadoop.hbase.regionserver.compactions.CompactionContext) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 94 with VisibleForTesting

use of com.google.common.annotations.VisibleForTesting in project hbase by apache.

the class HRegion method replayWALFlushStartMarker.

/** Replay the flush marker from primary region by creating a corresponding snapshot of
   * the store memstores, only if the memstores do not have a higher seqId from an earlier wal
   * edit (because the events may be coming out of order).
   */
@VisibleForTesting
PrepareFlushResult replayWALFlushStartMarker(FlushDescriptor flush) throws IOException {
    long flushSeqId = flush.getFlushSequenceNumber();
    HashSet<Store> storesToFlush = new HashSet<>();
    for (StoreFlushDescriptor storeFlush : flush.getStoreFlushesList()) {
        byte[] family = storeFlush.getFamilyName().toByteArray();
        Store store = getStore(family);
        if (store == null) {
            LOG.warn(getRegionInfo().getEncodedName() + " : " + "Received a flush start marker from primary, but the family is not found. Ignoring" + " StoreFlushDescriptor:" + TextFormat.shortDebugString(storeFlush));
            continue;
        }
        storesToFlush.add(store);
    }
    MonitoredTask status = TaskMonitor.get().createStatus("Preparing flush " + this);
    // (flush, compaction, region open etc)
    synchronized (writestate) {
        try {
            if (flush.getFlushSequenceNumber() < lastReplayedOpenRegionSeqId) {
                LOG.warn(getRegionInfo().getEncodedName() + " : " + "Skipping replaying flush event :" + TextFormat.shortDebugString(flush) + " because its sequence id is smaller than this regions lastReplayedOpenRegionSeqId " + " of " + lastReplayedOpenRegionSeqId);
                return null;
            }
            if (numMutationsWithoutWAL.sum() > 0) {
                numMutationsWithoutWAL.reset();
                dataInMemoryWithoutWAL.reset();
            }
            if (!writestate.flushing) {
                // we do not have an active snapshot and corresponding this.prepareResult. This means
                // we can just snapshot our memstores and continue as normal.
                // invoke prepareFlushCache. Send null as wal since we do not want the flush events in wal
                PrepareFlushResult prepareResult = internalPrepareFlushCache(null, flushSeqId, storesToFlush, status, false);
                if (prepareResult.result == null) {
                    // save the PrepareFlushResult so that we can use it later from commit flush
                    this.writestate.flushing = true;
                    this.prepareFlushResult = prepareResult;
                    status.markComplete("Flush prepare successful");
                    if (LOG.isDebugEnabled()) {
                        LOG.debug(getRegionInfo().getEncodedName() + " : " + " Prepared flush with seqId:" + flush.getFlushSequenceNumber());
                    }
                } else {
                    // our memstore ie empty, but the primary is still flushing
                    if (prepareResult.getResult().getResult() == FlushResult.Result.CANNOT_FLUSH_MEMSTORE_EMPTY) {
                        this.writestate.flushing = true;
                        this.prepareFlushResult = prepareResult;
                        if (LOG.isDebugEnabled()) {
                            LOG.debug(getRegionInfo().getEncodedName() + " : " + " Prepared empty flush with seqId:" + flush.getFlushSequenceNumber());
                        }
                    }
                    status.abort("Flush prepare failed with " + prepareResult.result);
                // nothing much to do. prepare flush failed because of some reason.
                }
                return prepareResult;
            } else {
                // we already have an active snapshot.
                if (flush.getFlushSequenceNumber() == this.prepareFlushResult.flushOpSeqId) {
                    // They define the same flush. Log and continue.
                    LOG.warn(getRegionInfo().getEncodedName() + " : " + "Received a flush prepare marker with the same seqId: " + +flush.getFlushSequenceNumber() + " before clearing the previous one with seqId: " + prepareFlushResult.flushOpSeqId + ". Ignoring");
                // ignore
                } else if (flush.getFlushSequenceNumber() < this.prepareFlushResult.flushOpSeqId) {
                    // We received a flush with a smaller seqNum than what we have prepared. We can only
                    // ignore this prepare flush request.
                    LOG.warn(getRegionInfo().getEncodedName() + " : " + "Received a flush prepare marker with a smaller seqId: " + +flush.getFlushSequenceNumber() + " before clearing the previous one with seqId: " + prepareFlushResult.flushOpSeqId + ". Ignoring");
                // ignore
                } else {
                    // We received a flush with a larger seqNum than what we have prepared
                    LOG.warn(getRegionInfo().getEncodedName() + " : " + "Received a flush prepare marker with a larger seqId: " + +flush.getFlushSequenceNumber() + " before clearing the previous one with seqId: " + prepareFlushResult.flushOpSeqId + ". Ignoring");
                // We do not have multiple active snapshots in the memstore or a way to merge current
                // memstore snapshot with the contents and resnapshot for now. We cannot take
                // another snapshot and drop the previous one because that will cause temporary
                // data loss in the secondary. So we ignore this for now, deferring the resolution
                // to happen when we see the corresponding flush commit marker. If we have a memstore
                // snapshot with x, and later received another prepare snapshot with y (where x < y),
                // when we see flush commit for y, we will drop snapshot for x, and can also drop all
                // the memstore edits if everything in memstore is < y. This is the usual case for
                // RS crash + recovery where we might see consequtive prepare flush wal markers.
                // Otherwise, this will cause more memory to be used in secondary replica until a
                // further prapare + commit flush is seen and replayed.
                }
            }
        } finally {
            status.cleanup();
            writestate.notifyAll();
        }
    }
    return null;
}
Also used : HashSet(java.util.HashSet) StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) MonitoredTask(org.apache.hadoop.hbase.monitoring.MonitoredTask) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 95 with VisibleForTesting

use of com.google.common.annotations.VisibleForTesting in project hbase by apache.

the class FSTableDescriptors method createMetaTableDescriptor.

@VisibleForTesting
public static HTableDescriptor createMetaTableDescriptor(final Configuration conf) throws IOException {
    HTableDescriptor metaDescriptor = new HTableDescriptor(TableName.META_TABLE_NAME, new HColumnDescriptor[] { new HColumnDescriptor(HConstants.CATALOG_FAMILY).setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)).setInMemory(true).setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)).setScope(HConstants.REPLICATION_SCOPE_LOCAL).setBloomFilterType(BloomType.NONE).setCacheDataInL1(true), new HColumnDescriptor(HConstants.REPLICATION_BARRIER_FAMILY).setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)).setInMemory(true).setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)).setScope(HConstants.REPLICATION_SCOPE_LOCAL).setBloomFilterType(BloomType.NONE).setCacheDataInL1(true), new HColumnDescriptor(HConstants.REPLICATION_POSITION_FAMILY).setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)).setInMemory(true).setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)).setScope(HConstants.REPLICATION_SCOPE_LOCAL).setBloomFilterType(BloomType.NONE).setCacheDataInL1(true), new HColumnDescriptor(HConstants.REPLICATION_META_FAMILY).setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)).setInMemory(true).setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)).setScope(HConstants.REPLICATION_SCOPE_LOCAL).setBloomFilterType(BloomType.NONE).setCacheDataInL1(true), new HColumnDescriptor(HConstants.TABLE_FAMILY).setMaxVersions(10).setInMemory(true).setBlocksize(8 * 1024).setScope(HConstants.REPLICATION_SCOPE_LOCAL).setBloomFilterType(BloomType.NONE).setCacheDataInL1(true) }) {
    };
    metaDescriptor.addCoprocessor("org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint", null, Coprocessor.PRIORITY_SYSTEM, null);
    return metaDescriptor;
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Aggregations

VisibleForTesting (com.google.common.annotations.VisibleForTesting)760 IOException (java.io.IOException)128 ArrayList (java.util.ArrayList)52 Path (java.nio.file.Path)46 Map (java.util.Map)46 File (java.io.File)40 HashMap (java.util.HashMap)34 Path (org.apache.hadoop.fs.Path)30 ImmutableList (com.google.common.collect.ImmutableList)28 Matcher (java.util.regex.Matcher)26 List (java.util.List)24 SourcePath (com.facebook.buck.rules.SourcePath)20 ImmutableMap (com.google.common.collect.ImmutableMap)20 HashSet (java.util.HashSet)19 FileStatus (org.apache.hadoop.fs.FileStatus)19 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)19 DFSClient (org.apache.hadoop.hdfs.DFSClient)18 Nfs3FileAttributes (org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes)18 ImmutableSet (com.google.common.collect.ImmutableSet)16 CigarElement (htsjdk.samtools.CigarElement)13