Search in sources :

Example 6 with TreeSet

use of java.util.TreeSet in project hbase by apache.

the class ReplicationSourceManager method recordLog.

/**
   * Check and enqueue the given log to the correct source. If there's still no source for the
   * group to which the given log belongs, create one
   * @param logPath the log path to check and enqueue
   * @throws IOException
   */
private void recordLog(Path logPath) throws IOException {
    String logName = logPath.getName();
    String logPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(logName);
    // synchronize on replicationPeers to avoid adding source for the to-be-removed peer
    synchronized (replicationPeers) {
        for (String id : replicationPeers.getConnectedPeerIds()) {
            try {
                this.replicationQueues.addLog(id, logName);
            } catch (ReplicationException e) {
                throw new IOException("Cannot add log to replication queue" + " when creating a new source, queueId=" + id + ", filename=" + logName, e);
            }
        }
    }
    // update walsById map
    synchronized (walsById) {
        for (Map.Entry<String, Map<String, SortedSet<String>>> entry : this.walsById.entrySet()) {
            String peerId = entry.getKey();
            Map<String, SortedSet<String>> walsByPrefix = entry.getValue();
            boolean existingPrefix = false;
            for (Map.Entry<String, SortedSet<String>> walsEntry : walsByPrefix.entrySet()) {
                SortedSet<String> wals = walsEntry.getValue();
                if (this.sources.isEmpty()) {
                    // If there's no slaves, don't need to keep the old wals since
                    // we only consider the last one when a new slave comes in
                    wals.clear();
                }
                if (logPrefix.equals(walsEntry.getKey())) {
                    wals.add(logName);
                    existingPrefix = true;
                }
            }
            if (!existingPrefix) {
                // The new log belongs to a new group, add it into this peer
                LOG.debug("Start tracking logs for wal group " + logPrefix + " for peer " + peerId);
                SortedSet<String> wals = new TreeSet<>();
                wals.add(logName);
                walsByPrefix.put(logPrefix, wals);
            }
        }
    }
}
Also used : TreeSet(java.util.TreeSet) ReplicationException(org.apache.hadoop.hbase.replication.ReplicationException) IOException(java.io.IOException) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) SortedSet(java.util.SortedSet)

Example 7 with TreeSet

use of java.util.TreeSet in project hbase by apache.

the class AccessControlLists method addUserPermission.

/**
   * Stores a new user permission grant in the access control lists table.
   * @param conf the configuration
   * @param userPerm the details of the permission to be granted
   * @param t acl table instance. It is closed upon method return.
   * @throws IOException in the case of an error accessing the metadata table
   */
static void addUserPermission(Configuration conf, UserPermission userPerm, Table t, boolean mergeExistingPermissions) throws IOException {
    Permission.Action[] actions = userPerm.getActions();
    byte[] rowKey = userPermissionRowKey(userPerm);
    Put p = new Put(rowKey);
    byte[] key = userPermissionKey(userPerm);
    if ((actions == null) || (actions.length == 0)) {
        String msg = "No actions associated with user '" + Bytes.toString(userPerm.getUser()) + "'";
        LOG.warn(msg);
        throw new IOException(msg);
    }
    Set<Permission.Action> actionSet = new TreeSet<Permission.Action>();
    if (mergeExistingPermissions) {
        List<UserPermission> perms = getUserPermissions(conf, rowKey);
        UserPermission currentPerm = null;
        for (UserPermission perm : perms) {
            if (Bytes.equals(perm.getUser(), userPerm.getUser()) && ((userPerm.isGlobal() && ACL_TABLE_NAME.equals(perm.getTableName())) || perm.tableFieldsEqual(userPerm))) {
                currentPerm = perm;
                break;
            }
        }
        if (currentPerm != null && currentPerm.getActions() != null) {
            actionSet.addAll(Arrays.asList(currentPerm.getActions()));
        }
    }
    // merge current action with new action.
    actionSet.addAll(Arrays.asList(actions));
    // serialize to byte array.
    byte[] value = new byte[actionSet.size()];
    int index = 0;
    for (Permission.Action action : actionSet) {
        value[index++] = action.code();
    }
    p.addImmutable(ACL_LIST_FAMILY, key, value);
    if (LOG.isDebugEnabled()) {
        LOG.debug("Writing permission with rowKey " + Bytes.toString(rowKey) + " " + Bytes.toString(key) + ": " + Bytes.toStringBinary(value));
    }
    try {
        t.put(p);
    } finally {
        t.close();
    }
}
Also used : IOException(java.io.IOException) Put(org.apache.hadoop.hbase.client.Put) TreeSet(java.util.TreeSet)

Example 8 with TreeSet

use of java.util.TreeSet in project hbase by apache.

the class WALSplitter method getSplitEditFilesSorted.

/**
   * Returns sorted set of edit files made by splitter, excluding files
   * with '.temp' suffix.
   *
   * @param fs
   * @param regiondir
   * @return Files in passed <code>regiondir</code> as a sorted set.
   * @throws IOException
   */
public static NavigableSet<Path> getSplitEditFilesSorted(final FileSystem fs, final Path regiondir) throws IOException {
    NavigableSet<Path> filesSorted = new TreeSet<>();
    Path editsdir = getRegionDirRecoveredEditsDir(regiondir);
    if (!fs.exists(editsdir))
        return filesSorted;
    FileStatus[] files = FSUtils.listStatus(fs, editsdir, new PathFilter() {

        @Override
        public boolean accept(Path p) {
            boolean result = false;
            try {
                // Return files and only files that match the editfile names pattern.
                // There can be other files in this directory other than edit files.
                // In particular, on error, we'll move aside the bad edit file giving
                // it a timestamp suffix. See moveAsideBadEditsFile.
                Matcher m = EDITFILES_NAME_PATTERN.matcher(p.getName());
                result = fs.isFile(p) && m.matches();
                // because it means splitwal thread is writting this file.
                if (p.getName().endsWith(RECOVERED_LOG_TMPFILE_SUFFIX)) {
                    result = false;
                }
                // Skip SeqId Files
                if (isSequenceIdFile(p)) {
                    result = false;
                }
            } catch (IOException e) {
                LOG.warn("Failed isFile check on " + p);
            }
            return result;
        }
    });
    if (files == null) {
        return filesSorted;
    }
    for (FileStatus status : files) {
        filesSorted.add(status.getPath());
    }
    return filesSorted;
}
Also used : Path(org.apache.hadoop.fs.Path) PathFilter(org.apache.hadoop.fs.PathFilter) FileStatus(org.apache.hadoop.fs.FileStatus) Matcher(java.util.regex.Matcher) TreeSet(java.util.TreeSet) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Example 9 with TreeSet

use of java.util.TreeSet in project hadoop by apache.

the class TestProportionalCapacityPreemptionPolicy method mockLeafQueue.

@SuppressWarnings("rawtypes")
LeafQueue mockLeafQueue(ParentQueue p, Resource tot, int i, Resource[] abs, Resource[] used, Resource[] pending, Resource[] reserved, int[] apps, Resource[] gran) {
    LeafQueue lq = mock(LeafQueue.class);
    ResourceCalculator rc = mCS.getResourceCalculator();
    List<ApplicationAttemptId> appAttemptIdList = new ArrayList<ApplicationAttemptId>();
    when(lq.getTotalPendingResourcesConsideringUserLimit(isA(Resource.class), isA(String.class), eq(false))).thenReturn(pending[i]);
    when(lq.getTotalPendingResourcesConsideringUserLimit(isA(Resource.class), isA(String.class), eq(true))).thenReturn(Resources.componentwiseMax(Resources.subtract(pending[i], reserved[i] == null ? Resources.none() : reserved[i]), Resources.none()));
    // need to set pending resource in resource usage as well
    ResourceUsage ru = new ResourceUsage();
    ru.setPending(pending[i]);
    ru.setUsed(used[i]);
    ru.setReserved(reserved[i]);
    when(lq.getQueueResourceUsage()).thenReturn(ru);
    // consider moving where CapacityScheduler::comparator accessible
    final NavigableSet<FiCaSchedulerApp> qApps = new TreeSet<FiCaSchedulerApp>(new Comparator<FiCaSchedulerApp>() {

        @Override
        public int compare(FiCaSchedulerApp a1, FiCaSchedulerApp a2) {
            return a1.getApplicationAttemptId().compareTo(a2.getApplicationAttemptId());
        }
    });
    // applications are added in global L->R order in queues
    if (apps[i] != 0) {
        Resource aUsed = Resources.divideAndCeil(rc, used[i], apps[i]);
        Resource aPending = Resources.divideAndCeil(rc, pending[i], apps[i]);
        Resource aReserve = Resources.divideAndCeil(rc, reserved[i], apps[i]);
        for (int a = 0; a < apps[i]; ++a) {
            FiCaSchedulerApp mockFiCaApp = mockApp(i, appAlloc, aUsed, aPending, aReserve, gran[i]);
            qApps.add(mockFiCaApp);
            ++appAlloc;
            appAttemptIdList.add(mockFiCaApp.getApplicationAttemptId());
        }
        when(mCS.getAppsInQueue("queue" + (char) ('A' + i - 1))).thenReturn(appAttemptIdList);
    }
    when(lq.getApplications()).thenReturn(qApps);
    @SuppressWarnings("unchecked") OrderingPolicy<FiCaSchedulerApp> so = mock(OrderingPolicy.class);
    when(so.getPreemptionIterator()).thenAnswer(new Answer() {

        public Object answer(InvocationOnMock invocation) {
            return qApps.descendingIterator();
        }
    });
    when(lq.getOrderingPolicy()).thenReturn(so);
    if (setAMResourcePercent != 0.0f) {
        when(lq.getMaxAMResourcePerQueuePercent()).thenReturn(setAMResourcePercent);
    }
    ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
    when(lq.getReadLock()).thenReturn(lock.readLock());
    when(lq.getPriority()).thenReturn(Priority.newInstance(0));
    p.getChildQueues().add(lq);
    return lq;
}
Also used : ResourceUsage(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage) ArrayList(java.util.ArrayList) Resource(org.apache.hadoop.yarn.api.records.Resource) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) Matchers.anyString(org.mockito.Matchers.anyString) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) LeafQueue(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue) DefaultResourceCalculator(org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator) DominantResourceCalculator(org.apache.hadoop.yarn.util.resource.DominantResourceCalculator) ResourceCalculator(org.apache.hadoop.yarn.util.resource.ResourceCalculator) Answer(org.mockito.stubbing.Answer) TreeSet(java.util.TreeSet) InvocationOnMock(org.mockito.invocation.InvocationOnMock) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)

Example 10 with TreeSet

use of java.util.TreeSet in project hbase by apache.

the class TestStoreFile method bloomWriteRead.

private void bloomWriteRead(StoreFileWriter writer, FileSystem fs) throws Exception {
    float err = conf.getFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, 0);
    Path f = writer.getPath();
    long now = System.currentTimeMillis();
    for (int i = 0; i < 2000; i += 2) {
        String row = String.format(localFormatter, i);
        KeyValue kv = new KeyValue(row.getBytes(), "family".getBytes(), "col".getBytes(), now, "value".getBytes());
        writer.append(kv);
    }
    writer.close();
    StoreFileReader reader = new StoreFileReader(fs, f, cacheConf, conf);
    reader.loadFileInfo();
    reader.loadBloomfilter();
    StoreFileScanner scanner = getStoreFileScanner(reader, false, false);
    // check false positives rate
    int falsePos = 0;
    int falseNeg = 0;
    for (int i = 0; i < 2000; i++) {
        String row = String.format(localFormatter, i);
        TreeSet<byte[]> columns = new TreeSet<>(Bytes.BYTES_COMPARATOR);
        columns.add("family:col".getBytes());
        Scan scan = new Scan(row.getBytes(), row.getBytes());
        scan.addColumn("family".getBytes(), "family:col".getBytes());
        Store store = mock(Store.class);
        HColumnDescriptor hcd = mock(HColumnDescriptor.class);
        when(hcd.getName()).thenReturn(Bytes.toBytes("family"));
        when(store.getFamily()).thenReturn(hcd);
        boolean exists = scanner.shouldUseScanner(scan, store, Long.MIN_VALUE);
        if (i % 2 == 0) {
            if (!exists)
                falseNeg++;
        } else {
            if (exists)
                falsePos++;
        }
    }
    // evict because we are about to delete the file
    reader.close(true);
    fs.delete(f, true);
    assertEquals("False negatives: " + falseNeg, 0, falseNeg);
    int maxFalsePos = (int) (2 * 2000 * err);
    assertTrue("Too many false positives: " + falsePos + " (err=" + err + ", expected no more than " + maxFalsePos + ")", falsePos <= maxFalsePos);
}
Also used : Path(org.apache.hadoop.fs.Path) KeyValue(org.apache.hadoop.hbase.KeyValue) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) TreeSet(java.util.TreeSet) Scan(org.apache.hadoop.hbase.client.Scan)

Aggregations

TreeSet (java.util.TreeSet)3795 ArrayList (java.util.ArrayList)835 Test (org.junit.Test)544 HashMap (java.util.HashMap)502 HashSet (java.util.HashSet)430 Set (java.util.Set)424 Map (java.util.Map)405 IOException (java.io.IOException)378 File (java.io.File)341 List (java.util.List)323 TreeMap (java.util.TreeMap)229 Iterator (java.util.Iterator)189 SortedSet (java.util.SortedSet)186 LinkedList (java.util.LinkedList)110 LinkedHashSet (java.util.LinkedHashSet)106 Date (java.util.Date)94 Collection (java.util.Collection)92 Comparator (java.util.Comparator)85 Test (org.testng.annotations.Test)81 Text (org.apache.hadoop.io.Text)79