Search in sources :

Example 91 with NavigableMap

use of java.util.NavigableMap in project hbase by apache.

the class PartitionedMobCompactor method select.

/**
   * Selects the compacted mob/del files.
   * Iterates the candidates to find out all the del files and small mob files.
   * @param candidates All the candidates.
   * @param allFiles Whether add all mob files into the compaction.
   * @return A compaction request.
   * @throws IOException if IO failure is encountered
   */
protected PartitionedMobCompactionRequest select(List<FileStatus> candidates, boolean allFiles) throws IOException {
    final Map<CompactionPartitionId, CompactionPartition> filesToCompact = new HashMap<>();
    final CompactionPartitionId id = new CompactionPartitionId();
    final NavigableMap<CompactionDelPartitionId, CompactionDelPartition> delFilesToCompact = new TreeMap<>();
    final CompactionDelPartitionId delId = new CompactionDelPartitionId();
    final ArrayList<CompactionDelPartition> allDelPartitions = new ArrayList<>();
    int selectedFileCount = 0;
    int irrelevantFileCount = 0;
    int totalDelFiles = 0;
    MobCompactPartitionPolicy policy = column.getMobCompactPartitionPolicy();
    Calendar calendar = Calendar.getInstance();
    Date currentDate = new Date();
    Date firstDayOfCurrentMonth = null;
    Date firstDayOfCurrentWeek = null;
    if (policy == MobCompactPartitionPolicy.MONTHLY) {
        firstDayOfCurrentMonth = MobUtils.getFirstDayOfMonth(calendar, currentDate);
        firstDayOfCurrentWeek = MobUtils.getFirstDayOfWeek(calendar, currentDate);
    } else if (policy == MobCompactPartitionPolicy.WEEKLY) {
        firstDayOfCurrentWeek = MobUtils.getFirstDayOfWeek(calendar, currentDate);
    }
    // We check if there is any del files so the logic can be optimized for the following processing
    // First step is to check if there is any delete files. If there is any delete files,
    // For each Partition, it needs to read its startKey and endKey from files.
    // If there is no delete file, there is no need to read startKey and endKey from files, this
    // is an optimization.
    boolean withDelFiles = false;
    for (FileStatus file : candidates) {
        if (!file.isFile()) {
            continue;
        }
        // group the del files and small files.
        FileStatus linkedFile = file;
        if (HFileLink.isHFileLink(file.getPath())) {
            HFileLink link = HFileLink.buildFromHFileLinkPattern(conf, file.getPath());
            linkedFile = getLinkedFileStatus(link);
            if (linkedFile == null) {
                continue;
            }
        }
        if (StoreFileInfo.isDelFile(linkedFile.getPath())) {
            withDelFiles = true;
            break;
        }
    }
    for (FileStatus file : candidates) {
        if (!file.isFile()) {
            irrelevantFileCount++;
            continue;
        }
        // group the del files and small files.
        FileStatus linkedFile = file;
        if (HFileLink.isHFileLink(file.getPath())) {
            HFileLink link = HFileLink.buildFromHFileLinkPattern(conf, file.getPath());
            linkedFile = getLinkedFileStatus(link);
            if (linkedFile == null) {
                // If the linked file cannot be found, regard it as an irrelevantFileCount file
                irrelevantFileCount++;
                continue;
            }
        }
        if (withDelFiles && StoreFileInfo.isDelFile(linkedFile.getPath())) {
            // File in the Del Partition List
            // Get delId from the file
            Reader reader = HFile.createReader(fs, linkedFile.getPath(), CacheConfig.DISABLED, conf);
            try {
                delId.setStartKey(reader.getFirstRowKey());
                delId.setEndKey(reader.getLastRowKey());
            } finally {
                reader.close();
            }
            CompactionDelPartition delPartition = delFilesToCompact.get(delId);
            if (delPartition == null) {
                CompactionDelPartitionId newDelId = new CompactionDelPartitionId(delId.getStartKey(), delId.getEndKey());
                delPartition = new CompactionDelPartition(newDelId);
                delFilesToCompact.put(newDelId, delPartition);
            }
            delPartition.addDelFile(file);
            totalDelFiles++;
        } else {
            String fileName = linkedFile.getPath().getName();
            String date = MobFileName.getDateFromName(fileName);
            boolean skipCompaction = MobUtils.fillPartitionId(id, firstDayOfCurrentMonth, firstDayOfCurrentWeek, date, policy, calendar, mergeableSize);
            if (allFiles || (!skipCompaction && (linkedFile.getLen() < id.getThreshold()))) {
                // add all files if allFiles is true,
                // otherwise add the small files to the merge pool
                // filter out files which are not supposed to be compacted with the
                // current policy
                id.setStartKey(MobFileName.getStartKeyFromName(fileName));
                CompactionPartition compactionPartition = filesToCompact.get(id);
                if (compactionPartition == null) {
                    CompactionPartitionId newId = new CompactionPartitionId(id.getStartKey(), id.getDate());
                    compactionPartition = new CompactionPartition(newId);
                    compactionPartition.addFile(file);
                    filesToCompact.put(newId, compactionPartition);
                    newId.updateLatestDate(date);
                } else {
                    compactionPartition.addFile(file);
                    compactionPartition.getPartitionId().updateLatestDate(date);
                }
                if (withDelFiles) {
                    // get startKey and endKey from the file and update partition
                    // TODO: is it possible to skip read of most hfiles?
                    Reader reader = HFile.createReader(fs, linkedFile.getPath(), CacheConfig.DISABLED, conf);
                    try {
                        compactionPartition.setStartKey(reader.getFirstRowKey());
                        compactionPartition.setEndKey(reader.getLastRowKey());
                    } finally {
                        reader.close();
                    }
                }
                selectedFileCount++;
            }
        }
    }
    /*
     * Merge del files so there are only non-overlapped del file lists
     */
    for (Map.Entry<CompactionDelPartitionId, CompactionDelPartition> entry : delFilesToCompact.entrySet()) {
        if (allDelPartitions.size() > 0) {
            // check if the current key range overlaps the previous one
            CompactionDelPartition prev = allDelPartitions.get(allDelPartitions.size() - 1);
            if (Bytes.compareTo(prev.getId().getEndKey(), entry.getKey().getStartKey()) >= 0) {
                // merge them together
                prev.getId().setEndKey(entry.getValue().getId().getEndKey());
                prev.addDelFileList(entry.getValue().listDelFiles());
            } else {
                allDelPartitions.add(entry.getValue());
            }
        } else {
            allDelPartitions.add(entry.getValue());
        }
    }
    PartitionedMobCompactionRequest request = new PartitionedMobCompactionRequest(filesToCompact.values(), allDelPartitions);
    if (candidates.size() == (totalDelFiles + selectedFileCount + irrelevantFileCount)) {
        // all the files are selected
        request.setCompactionType(CompactionType.ALL_FILES);
    }
    LOG.info("The compaction type is " + request.getCompactionType() + ", the request has " + totalDelFiles + " del files, " + selectedFileCount + " selected files, and " + irrelevantFileCount + " irrelevant files");
    return request;
}
Also used : CompactionDelPartitionId(org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionDelPartitionId) HFileLink(org.apache.hadoop.hbase.io.HFileLink) CompactionPartition(org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionPartition) FileStatus(org.apache.hadoop.fs.FileStatus) HashMap(java.util.HashMap) Calendar(java.util.Calendar) ArrayList(java.util.ArrayList) Reader(org.apache.hadoop.hbase.io.hfile.HFile.Reader) CompactionPartitionId(org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionPartitionId) TreeMap(java.util.TreeMap) Date(java.util.Date) CompactionDelPartition(org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionDelPartition) MobCompactPartitionPolicy(org.apache.hadoop.hbase.client.MobCompactPartitionPolicy) Map(java.util.Map) NavigableMap(java.util.NavigableMap) HashMap(java.util.HashMap) TreeMap(java.util.TreeMap)

Example 92 with NavigableMap

use of java.util.NavigableMap in project hbase by apache.

the class WALKey method readOlderScopes.

public void readOlderScopes(NavigableMap<byte[], Integer> scopes) {
    if (scopes != null) {
        Iterator<Map.Entry<byte[], Integer>> iterator = scopes.entrySet().iterator();
        while (iterator.hasNext()) {
            Map.Entry<byte[], Integer> scope = iterator.next();
            String key = Bytes.toString(scope.getKey());
            if (key.startsWith(PREFIX_CLUSTER_KEY)) {
                addClusterId(UUID.fromString(key.substring(PREFIX_CLUSTER_KEY.length())));
                iterator.remove();
            }
        }
        if (scopes.size() > 0) {
            this.replicationScope = scopes;
        }
    }
}
Also used : ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) HashMap(java.util.HashMap) Map(java.util.Map) NavigableMap(java.util.NavigableMap) TreeMap(java.util.TreeMap)

Example 93 with NavigableMap

use of java.util.NavigableMap in project hbase by apache.

the class TestTableMapReduceBase method map.

/**
   * Implements mapper logic for use across APIs.
   */
protected static Put map(ImmutableBytesWritable key, Result value) throws IOException {
    if (value.size() != 1) {
        throw new IOException("There should only be one input column");
    }
    Map<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> cf = value.getMap();
    if (!cf.containsKey(INPUT_FAMILY)) {
        throw new IOException("Wrong input columns. Missing: '" + Bytes.toString(INPUT_FAMILY) + "'.");
    }
    // Get the original value and reverse it
    String originalValue = Bytes.toString(value.getValue(INPUT_FAMILY, INPUT_FAMILY));
    StringBuilder newValue = new StringBuilder(originalValue);
    newValue.reverse();
    // Now set the value to be collected
    Put outval = new Put(key.get());
    outval.addColumn(OUTPUT_FAMILY, null, Bytes.toBytes(newValue.toString()));
    return outval;
}
Also used : NavigableMap(java.util.NavigableMap) IOException(java.io.IOException) Put(org.apache.hadoop.hbase.client.Put)

Example 94 with NavigableMap

use of java.util.NavigableMap in project hbase by apache.

the class SimpleLoadBalancer method balanceCluster.

/**
   * Generate a global load balancing plan according to the specified map of
   * server information to the most loaded regions of each server.
   *
   * The load balancing invariant is that all servers are within 1 region of the
   * average number of regions per server.  If the average is an integer number,
   * all servers will be balanced to the average.  Otherwise, all servers will
   * have either floor(average) or ceiling(average) regions.
   *
   * HBASE-3609 Modeled regionsToMove using Guava's MinMaxPriorityQueue so that
   *   we can fetch from both ends of the queue.
   * At the beginning, we check whether there was empty region server
   *   just discovered by Master. If so, we alternately choose new / old
   *   regions from head / tail of regionsToMove, respectively. This alternation
   *   avoids clustering young regions on the newly discovered region server.
   *   Otherwise, we choose new regions from head of regionsToMove.
   *
   * Another improvement from HBASE-3609 is that we assign regions from
   *   regionsToMove to underloaded servers in round-robin fashion.
   *   Previously one underloaded server would be filled before we move onto
   *   the next underloaded server, leading to clustering of young regions.
   *
   * Finally, we randomly shuffle underloaded servers so that they receive
   *   offloaded regions relatively evenly across calls to balanceCluster().
   *
   * The algorithm is currently implemented as such:
   *
   * <ol>
   * <li>Determine the two valid numbers of regions each server should have,
   *     <b>MIN</b>=floor(average) and <b>MAX</b>=ceiling(average).
   *
   * <li>Iterate down the most loaded servers, shedding regions from each so
   *     each server hosts exactly <b>MAX</b> regions.  Stop once you reach a
   *     server that already has &lt;= <b>MAX</b> regions.
   *     <p>
   *     Order the regions to move from most recent to least.
   *
   * <li>Iterate down the least loaded servers, assigning regions so each server
   *     has exactly <b>MIN</b> regions.  Stop once you reach a server that
   *     already has &gt;= <b>MIN</b> regions.
   *
   *     Regions being assigned to underloaded servers are those that were shed
   *     in the previous step.  It is possible that there were not enough
   *     regions shed to fill each underloaded server to <b>MIN</b>.  If so we
   *     end up with a number of regions required to do so, <b>neededRegions</b>.
   *
   *     It is also possible that we were able to fill each underloaded but ended
   *     up with regions that were unassigned from overloaded servers but that
   *     still do not have assignment.
   *
   *     If neither of these conditions hold (no regions needed to fill the
   *     underloaded servers, no regions leftover from overloaded servers),
   *     we are done and return.  Otherwise we handle these cases below.
   *
   * <li>If <b>neededRegions</b> is non-zero (still have underloaded servers),
   *     we iterate the most loaded servers again, shedding a single server from
   *     each (this brings them from having <b>MAX</b> regions to having
   *     <b>MIN</b> regions).
   *
   * <li>We now definitely have more regions that need assignment, either from
   *     the previous step or from the original shedding from overloaded servers.
   *     Iterate the least loaded servers filling each to <b>MIN</b>.
   *
   * <li>If we still have more regions that need assignment, again iterate the
   *     least loaded servers, this time giving each one (filling them to
   *     <b>MAX</b>) until we run out.
   *
   * <li>All servers will now either host <b>MIN</b> or <b>MAX</b> regions.
   *
   *     In addition, any server hosting &gt;= <b>MAX</b> regions is guaranteed
   *     to end up with <b>MAX</b> regions at the end of the balancing.  This
   *     ensures the minimal number of regions possible are moved.
   * </ol>
   *
   * TODO: We can at-most reassign the number of regions away from a particular
   *       server to be how many they report as most loaded.
   *       Should we just keep all assignment in memory?  Any objections?
   *       Does this mean we need HeapSize on HMaster?  Or just careful monitor?
   *       (current thinking is we will hold all assignments in memory)
   *
   * @param clusterMap Map of regionservers and their load/region information to
   *                   a list of their most loaded regions
   * @return a list of regions to be moved, including source and destination,
   *         or null if cluster is already balanced
   */
@Override
public List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>> clusterMap) {
    List<RegionPlan> regionsToReturn = balanceMasterRegions(clusterMap);
    if (regionsToReturn != null || clusterMap == null || clusterMap.size() <= 1) {
        return regionsToReturn;
    }
    if (masterServerName != null && clusterMap.containsKey(masterServerName)) {
        if (clusterMap.size() <= 2) {
            return null;
        }
        clusterMap = new HashMap<>(clusterMap);
        clusterMap.remove(masterServerName);
    }
    long startTime = System.currentTimeMillis();
    // construct a Cluster object with clusterMap and rest of the
    // argument as defaults
    Cluster c = new Cluster(clusterMap, null, this.regionFinder, this.rackManager);
    if (!this.needsBalance(c) && !this.overallNeedsBalance())
        return null;
    ClusterLoadState cs = new ClusterLoadState(clusterMap);
    int numServers = cs.getNumServers();
    NavigableMap<ServerAndLoad, List<HRegionInfo>> serversByLoad = cs.getServersByLoad();
    int numRegions = cs.getNumRegions();
    float average = cs.getLoadAverage();
    int max = (int) Math.ceil(average);
    int min = (int) average;
    // Using to check balance result.
    StringBuilder strBalanceParam = new StringBuilder();
    strBalanceParam.append("Balance parameter: numRegions=").append(numRegions).append(", numServers=").append(numServers).append(", max=").append(max).append(", min=").append(min);
    LOG.debug(strBalanceParam.toString());
    // Balance the cluster
    // TODO: Look at data block locality or a more complex load to do this
    MinMaxPriorityQueue<RegionPlan> regionsToMove = MinMaxPriorityQueue.orderedBy(rpComparator).create();
    regionsToReturn = new ArrayList<>();
    // Walk down most loaded, pruning each to the max
    int serversOverloaded = 0;
    // flag used to fetch regions from head and tail of list, alternately
    boolean fetchFromTail = false;
    Map<ServerName, BalanceInfo> serverBalanceInfo = new TreeMap<>();
    for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server : serversByLoad.descendingMap().entrySet()) {
        ServerAndLoad sal = server.getKey();
        int load = sal.getLoad();
        if (load <= max) {
            serverBalanceInfo.put(sal.getServerName(), new BalanceInfo(0, 0, server.getValue()));
            continue;
        }
        serversOverloaded++;
        List<HRegionInfo> regions = server.getValue();
        int numToOffload = Math.min(load - max, regions.size());
        // account for the out-of-band regions which were assigned to this server
        // after some other region server crashed
        Collections.sort(regions, riComparator);
        int numTaken = 0;
        for (int i = 0; i <= numToOffload; ) {
            // fetch from head
            HRegionInfo hri = regions.get(i);
            if (fetchFromTail) {
                hri = regions.get(regions.size() - 1 - i);
            }
            i++;
            // Don't rebalance special regions.
            if (shouldBeOnMaster(hri) && masterServerName.equals(sal.getServerName()))
                continue;
            regionsToMove.add(new RegionPlan(hri, sal.getServerName(), null));
            numTaken++;
            if (numTaken >= numToOffload)
                break;
        }
        serverBalanceInfo.put(sal.getServerName(), new BalanceInfo(numToOffload, (-1) * numTaken, server.getValue()));
    }
    int totalNumMoved = regionsToMove.size();
    // Walk down least loaded, filling each to the min
    // number of regions needed to bring all up to min
    int neededRegions = 0;
    fetchFromTail = false;
    Map<ServerName, Integer> underloadedServers = new HashMap<>();
    int maxToTake = numRegions - min;
    for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server : serversByLoad.entrySet()) {
        // no more to take
        if (maxToTake == 0)
            break;
        int load = server.getKey().getLoad();
        if (load >= min) {
            // look for other servers which haven't reached min
            continue;
        }
        int regionsToPut = min - load;
        maxToTake -= regionsToPut;
        underloadedServers.put(server.getKey().getServerName(), regionsToPut);
    }
    // number of servers that get new regions
    int serversUnderloaded = underloadedServers.size();
    int incr = 1;
    List<ServerName> sns = Arrays.asList(underloadedServers.keySet().toArray(new ServerName[serversUnderloaded]));
    Collections.shuffle(sns, RANDOM);
    while (regionsToMove.size() > 0) {
        int cnt = 0;
        int i = incr > 0 ? 0 : underloadedServers.size() - 1;
        for (; i >= 0 && i < underloadedServers.size(); i += incr) {
            if (regionsToMove.isEmpty())
                break;
            ServerName si = sns.get(i);
            int numToTake = underloadedServers.get(si);
            if (numToTake == 0)
                continue;
            addRegionPlan(regionsToMove, fetchFromTail, si, regionsToReturn);
            underloadedServers.put(si, numToTake - 1);
            cnt++;
            BalanceInfo bi = serverBalanceInfo.get(si);
            bi.setNumRegionsAdded(bi.getNumRegionsAdded() + 1);
        }
        if (cnt == 0)
            break;
        // iterates underloadedServers in the other direction
        incr = -incr;
    }
    for (Integer i : underloadedServers.values()) {
        // If we still want to take some, increment needed
        neededRegions += i;
    }
    // If we need more to fill min, grab one from each most loaded until enough
    if (neededRegions != 0) {
        // Walk down most loaded, grabbing one from each until we get enough
        for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server : serversByLoad.descendingMap().entrySet()) {
            BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey().getServerName());
            int idx = balanceInfo == null ? 0 : balanceInfo.getNextRegionForUnload();
            if (idx >= server.getValue().size())
                break;
            HRegionInfo region = server.getValue().get(idx);
            // Don't move meta regions.
            if (region.isMetaRegion())
                continue;
            regionsToMove.add(new RegionPlan(region, server.getKey().getServerName(), null));
            balanceInfo.setNumRegionsAdded(balanceInfo.getNumRegionsAdded() - 1);
            balanceInfo.setNextRegionForUnload(balanceInfo.getNextRegionForUnload() + 1);
            totalNumMoved++;
            if (--neededRegions == 0) {
                // No more regions needed, done shedding
                break;
            }
        }
    }
    // Walk down least loaded, assigning to each to fill up to min
    for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server : serversByLoad.entrySet()) {
        int regionCount = server.getKey().getLoad();
        if (regionCount >= min)
            break;
        BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey().getServerName());
        if (balanceInfo != null) {
            regionCount += balanceInfo.getNumRegionsAdded();
        }
        if (regionCount >= min) {
            continue;
        }
        int numToTake = min - regionCount;
        int numTaken = 0;
        while (numTaken < numToTake && 0 < regionsToMove.size()) {
            addRegionPlan(regionsToMove, fetchFromTail, server.getKey().getServerName(), regionsToReturn);
            numTaken++;
        }
    }
    if (min != max) {
        balanceOverall(regionsToReturn, serverBalanceInfo, fetchFromTail, regionsToMove, max, min);
    }
    long endTime = System.currentTimeMillis();
    if (!regionsToMove.isEmpty() || neededRegions != 0) {
        // Emit data so can diagnose how balancer went astray.
        LOG.warn("regionsToMove=" + totalNumMoved + ", numServers=" + numServers + ", serversOverloaded=" + serversOverloaded + ", serversUnderloaded=" + serversUnderloaded);
        StringBuilder sb = new StringBuilder();
        for (Map.Entry<ServerName, List<HRegionInfo>> e : clusterMap.entrySet()) {
            if (sb.length() > 0)
                sb.append(", ");
            sb.append(e.getKey().toString());
            sb.append(" ");
            sb.append(e.getValue().size());
        }
        LOG.warn("Input " + sb.toString());
    }
    // All done!
    LOG.info("Done. Calculated a load balance in " + (endTime - startTime) + "ms. " + "Moving " + totalNumMoved + " regions off of " + serversOverloaded + " overloaded servers onto " + serversUnderloaded + " less loaded servers");
    return regionsToReturn;
}
Also used : HashMap(java.util.HashMap) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) ArrayList(java.util.ArrayList) List(java.util.List) TreeMap(java.util.TreeMap) RegionPlan(org.apache.hadoop.hbase.master.RegionPlan) ServerName(org.apache.hadoop.hbase.ServerName) HashMap(java.util.HashMap) NavigableMap(java.util.NavigableMap) TreeMap(java.util.TreeMap) Map(java.util.Map)

Example 95 with NavigableMap

use of java.util.NavigableMap in project hbase by apache.

the class TestReplicationSourceManager method testLogRoll.

@Test
public void testLogRoll() throws Exception {
    long baseline = 1000;
    long time = baseline;
    MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
    KeyValue kv = new KeyValue(r1, f1, r1);
    WALEdit edit = new WALEdit();
    edit.add(kv);
    List<WALActionsListener> listeners = new ArrayList<>(1);
    listeners.add(replication);
    final WALFactory wals = new WALFactory(utility.getConfiguration(), listeners, URLEncoder.encode("regionserver:60020", "UTF8"));
    final WAL wal = wals.getWAL(hri.getEncodedNameAsBytes(), hri.getTable().getNamespace());
    manager.init();
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("tableame"));
    htd.addFamily(new HColumnDescriptor(f1));
    NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : htd.getFamiliesKeys()) {
        scopes.put(fam, 0);
    }
    // Testing normal log rolling every 20
    for (long i = 1; i < 101; i++) {
        if (i > 1 && i % 20 == 0) {
            wal.rollWriter();
        }
        LOG.info(i);
        final long txid = wal.append(hri, new WALKey(hri.getEncodedNameAsBytes(), test, System.currentTimeMillis(), mvcc, scopes), edit, true);
        wal.sync(txid);
    }
    // Simulate a rapid insert that's followed
    // by a report that's still not totally complete (missing last one)
    LOG.info(baseline + " and " + time);
    baseline += 101;
    time = baseline;
    LOG.info(baseline + " and " + time);
    for (int i = 0; i < 3; i++) {
        wal.append(hri, new WALKey(hri.getEncodedNameAsBytes(), test, System.currentTimeMillis(), mvcc, scopes), edit, true);
    }
    wal.sync();
    int logNumber = 0;
    for (Map.Entry<String, SortedSet<String>> entry : manager.getWALs().get(slaveId).entrySet()) {
        logNumber += entry.getValue().size();
    }
    assertEquals(6, logNumber);
    wal.rollWriter();
    manager.logPositionAndCleanOldLogs(manager.getSources().get(0).getCurrentPath(), "1", 0, false, false);
    wal.append(hri, new WALKey(hri.getEncodedNameAsBytes(), test, System.currentTimeMillis(), mvcc, scopes), edit, true);
    wal.sync();
    assertEquals(1, manager.getWALs().size());
// TODO Need a case with only 2 WALs and we only want to delete the first one
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) WAL(org.apache.hadoop.hbase.wal.WAL) MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ArrayList(java.util.ArrayList) WALActionsListener(org.apache.hadoop.hbase.regionserver.wal.WALActionsListener) TreeMap(java.util.TreeMap) SortedSet(java.util.SortedSet) ReplicationEndpoint(org.apache.hadoop.hbase.replication.ReplicationEndpoint) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) WALKey(org.apache.hadoop.hbase.wal.WALKey) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) Map(java.util.Map) NavigableMap(java.util.NavigableMap) HashMap(java.util.HashMap) TreeMap(java.util.TreeMap) Test(org.junit.Test)

Aggregations

NavigableMap (java.util.NavigableMap)170 Map (java.util.Map)84 TreeMap (java.util.TreeMap)61 SortedMap (java.util.SortedMap)34 ArrayList (java.util.ArrayList)33 List (java.util.List)27 Iterator (java.util.Iterator)21 HashMap (java.util.HashMap)20 Cell (org.apache.hadoop.hbase.Cell)20 Result (org.apache.hadoop.hbase.client.Result)18 Set (java.util.Set)14 Get (org.apache.hadoop.hbase.client.Get)13 IOException (java.io.IOException)12 KeyValue (org.apache.hadoop.hbase.KeyValue)10 Put (org.apache.hadoop.hbase.client.Put)10 Test (org.junit.Test)10 Entry (java.util.Map.Entry)9 Update (co.cask.cdap.data2.dataset2.lib.table.Update)7 TestSuite (junit.framework.TestSuite)7 ImmutableMap (com.google.common.collect.ImmutableMap)6