use of java.util.Set in project hbase by apache.
the class ReplicationZKNodeCleaner method getUnDeletedQueues.
/**
* @return undeletedQueues replicator with its queueIds for removed peers
* @throws IOException
*/
public Map<String, List<String>> getUnDeletedQueues() throws IOException {
Map<String, List<String>> undeletedQueues = new HashMap<>();
Set<String> peerIds = new HashSet<>(this.replicationPeers.getAllPeerIds());
try {
List<String> replicators = this.queuesClient.getListOfReplicators();
for (String replicator : replicators) {
List<String> queueIds = this.queuesClient.getAllQueues(replicator);
for (String queueId : queueIds) {
ReplicationQueueInfo queueInfo = new ReplicationQueueInfo(queueId);
if (!peerIds.contains(queueInfo.getPeerId())) {
undeletedQueues.computeIfAbsent(replicator, (key) -> new ArrayList<>()).add(queueId);
if (LOG.isDebugEnabled()) {
LOG.debug("Undeleted replication queue for removed peer found: " + String.format("[removedPeerId=%s, replicator=%s, queueId=%s]", queueInfo.getPeerId(), replicator, queueId));
}
}
}
}
} catch (KeeperException ke) {
throw new IOException("Failed to get the replication queues of all replicators", ke);
}
return undeletedQueues;
}
use of java.util.Set in project hbase by apache.
the class RegionStates method getTableRSRegionMap.
private Map<TableName, Map<ServerName, List<HRegionInfo>>> getTableRSRegionMap(Boolean bytable) {
Map<TableName, Map<ServerName, List<HRegionInfo>>> result = new HashMap<>();
for (Map.Entry<ServerName, Set<HRegionInfo>> e : serverHoldings.entrySet()) {
for (HRegionInfo hri : e.getValue()) {
if (hri.isMetaRegion())
continue;
TableName tablename = bytable ? hri.getTable() : TableName.valueOf(HConstants.ENSEMBLE_TABLE_NAME);
Map<ServerName, List<HRegionInfo>> svrToRegions = result.get(tablename);
if (svrToRegions == null) {
svrToRegions = new HashMap<>(serverHoldings.size());
result.put(tablename, svrToRegions);
}
List<HRegionInfo> regions = svrToRegions.get(e.getKey());
if (regions == null) {
regions = new ArrayList<>();
svrToRegions.put(e.getKey(), regions);
}
regions.add(hri);
}
}
return result;
}
use of java.util.Set in project hadoop by apache.
the class FileSystemTimelineReaderImpl method getEntities.
private Set<TimelineEntity> getEntities(File dir, String entityType, TimelineEntityFilters filters, TimelineDataToRetrieve dataToRetrieve) throws IOException {
// First sort the selected entities based on created/start time.
Map<Long, Set<TimelineEntity>> sortedEntities = new TreeMap<>(new Comparator<Long>() {
@Override
public int compare(Long l1, Long l2) {
return l2.compareTo(l1);
}
});
for (File entityFile : dir.listFiles()) {
if (!entityFile.getName().contains(TIMELINE_SERVICE_STORAGE_EXTENSION)) {
continue;
}
try (BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(entityFile), Charset.forName("UTF-8")))) {
TimelineEntity entity = readEntityFromFile(reader);
if (!entity.getType().equals(entityType)) {
continue;
}
if (!isTimeInRange(entity.getCreatedTime(), filters.getCreatedTimeBegin(), filters.getCreatedTimeEnd())) {
continue;
}
if (filters.getRelatesTo() != null && !filters.getRelatesTo().getFilterList().isEmpty() && !TimelineStorageUtils.matchRelatesTo(entity, filters.getRelatesTo())) {
continue;
}
if (filters.getIsRelatedTo() != null && !filters.getIsRelatedTo().getFilterList().isEmpty() && !TimelineStorageUtils.matchIsRelatedTo(entity, filters.getIsRelatedTo())) {
continue;
}
if (filters.getInfoFilters() != null && !filters.getInfoFilters().getFilterList().isEmpty() && !TimelineStorageUtils.matchInfoFilters(entity, filters.getInfoFilters())) {
continue;
}
if (filters.getConfigFilters() != null && !filters.getConfigFilters().getFilterList().isEmpty() && !TimelineStorageUtils.matchConfigFilters(entity, filters.getConfigFilters())) {
continue;
}
if (filters.getMetricFilters() != null && !filters.getMetricFilters().getFilterList().isEmpty() && !TimelineStorageUtils.matchMetricFilters(entity, filters.getMetricFilters())) {
continue;
}
if (filters.getEventFilters() != null && !filters.getEventFilters().getFilterList().isEmpty() && !TimelineStorageUtils.matchEventFilters(entity, filters.getEventFilters())) {
continue;
}
TimelineEntity entityToBeReturned = createEntityToBeReturned(entity, dataToRetrieve.getFieldsToRetrieve());
Set<TimelineEntity> entitiesCreatedAtSameTime = sortedEntities.get(entityToBeReturned.getCreatedTime());
if (entitiesCreatedAtSameTime == null) {
entitiesCreatedAtSameTime = new HashSet<TimelineEntity>();
}
entitiesCreatedAtSameTime.add(entityToBeReturned);
sortedEntities.put(entityToBeReturned.getCreatedTime(), entitiesCreatedAtSameTime);
}
}
Set<TimelineEntity> entities = new HashSet<TimelineEntity>();
long entitiesAdded = 0;
for (Set<TimelineEntity> entitySet : sortedEntities.values()) {
for (TimelineEntity entity : entitySet) {
entities.add(entity);
++entitiesAdded;
if (entitiesAdded >= filters.getLimit()) {
return entities;
}
}
}
return entities;
}
use of java.util.Set in project hadoop by apache.
the class FileSystemTimelineReaderImpl method mergeEntities.
private static void mergeEntities(TimelineEntity entity1, TimelineEntity entity2) {
// Ideally created time wont change except in the case of issue from client.
if (entity2.getCreatedTime() != null && entity2.getCreatedTime() > 0) {
entity1.setCreatedTime(entity2.getCreatedTime());
}
for (Entry<String, String> configEntry : entity2.getConfigs().entrySet()) {
entity1.addConfig(configEntry.getKey(), configEntry.getValue());
}
for (Entry<String, Object> infoEntry : entity2.getInfo().entrySet()) {
entity1.addInfo(infoEntry.getKey(), infoEntry.getValue());
}
for (Entry<String, Set<String>> isRelatedToEntry : entity2.getIsRelatedToEntities().entrySet()) {
String type = isRelatedToEntry.getKey();
for (String entityId : isRelatedToEntry.getValue()) {
entity1.addIsRelatedToEntity(type, entityId);
}
}
for (Entry<String, Set<String>> relatesToEntry : entity2.getRelatesToEntities().entrySet()) {
String type = relatesToEntry.getKey();
for (String entityId : relatesToEntry.getValue()) {
entity1.addRelatesToEntity(type, entityId);
}
}
for (TimelineEvent event : entity2.getEvents()) {
entity1.addEvent(event);
}
for (TimelineMetric metric2 : entity2.getMetrics()) {
boolean found = false;
for (TimelineMetric metric1 : entity1.getMetrics()) {
if (metric1.getId().equals(metric2.getId())) {
metric1.addValues(metric2.getValues());
found = true;
break;
}
}
if (!found) {
entity1.addMetric(metric2);
}
}
}
use of java.util.Set in project hadoop by apache.
the class TestTimelineReaderWebServices method testGetEntitiesWithLimit.
@Test
public void testGetEntitiesWithLimit() throws Exception {
Client client = createClient();
try {
URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" + "timeline/clusters/cluster1/apps/app1/entities/app?limit=2");
ClientResponse resp = getResponse(client, uri);
Set<TimelineEntity> entities = resp.getEntity(new GenericType<Set<TimelineEntity>>() {
});
assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, resp.getType().toString());
assertNotNull(entities);
assertEquals(2, entities.size());
// Entities returned are based on most recent created time.
assertTrue("Entities with id_1 and id_4 should have been present " + "in response based on entity created time.", entities.contains(newEntity("app", "id_1")) && entities.contains(newEntity("app", "id_4")));
uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/" + "clusters/cluster1/apps/app1/entities/app?limit=3");
resp = getResponse(client, uri);
entities = resp.getEntity(new GenericType<Set<TimelineEntity>>() {
});
assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, resp.getType().toString());
assertNotNull(entities);
// Even though 2 entities out of 4 have same created time, one entity
// is left out due to limit
assertEquals(3, entities.size());
} finally {
client.destroy();
}
}
Aggregations