use of org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve in project hadoop by apache.
the class TestFileSystemTimelineReaderImpl method testAppFlowMappingCsv.
/** This test checks whether we can handle commas in app flow mapping csv. */
@Test
public void testAppFlowMappingCsv() throws Exception {
// Test getting an entity by cluster and app where flow entry
// in app flow mapping csv has commas.
TimelineEntity result = reader.getEntity(new TimelineReaderContext("cluster1", null, null, null, "app2", "app", "id_5"), new TimelineDataToRetrieve(null, null, null, null));
Assert.assertEquals((new TimelineEntity.Identifier("app", "id_5")).toString(), result.getIdentifier().toString());
Assert.assertEquals((Long) 1425016502050L, result.getCreatedTime());
}
use of org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve in project hadoop by apache.
the class TestFileSystemTimelineReaderImpl method testGetEntityDefaultView.
@Test
public void testGetEntityDefaultView() throws Exception {
// If no fields are specified, entity is returned with default view i.e.
// only the id, type and created time.
TimelineEntity result = reader.getEntity(new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", "id_1"), new TimelineDataToRetrieve(null, null, null, null));
Assert.assertEquals((new TimelineEntity.Identifier("app", "id_1")).toString(), result.getIdentifier().toString());
Assert.assertEquals((Long) 1425016502000L, result.getCreatedTime());
Assert.assertEquals(0, result.getConfigs().size());
Assert.assertEquals(0, result.getMetrics().size());
}
use of org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve in project hadoop by apache.
the class TestFileSystemTimelineReaderImpl method testGetEntitiesByRelations.
@Test
public void testGetEntitiesByRelations() throws Exception {
// Get entities based on relatesTo.
TimelineFilterList relatesTo = new TimelineFilterList(Operator.OR);
Set<Object> relatesToIds = new HashSet<Object>(Arrays.asList((Object) "flow1"));
relatesTo.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "flow", relatesToIds));
Set<TimelineEntity> result = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters(null, null, null, relatesTo, null, null, null, null, null), new TimelineDataToRetrieve());
Assert.assertEquals(1, result.size());
// Only one entity with ID id_1 should be returned.
for (TimelineEntity entity : result) {
if (!entity.getId().equals("id_1")) {
Assert.fail("Incorrect filtering based on relatesTo");
}
}
// Get entities based on isRelatedTo.
TimelineFilterList isRelatedTo = new TimelineFilterList(Operator.OR);
Set<Object> isRelatedToIds = new HashSet<Object>(Arrays.asList((Object) "tid1_2"));
isRelatedTo.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "type1", isRelatedToIds));
result = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters(null, null, null, null, isRelatedTo, null, null, null, null), new TimelineDataToRetrieve());
Assert.assertEquals(2, result.size());
// Two entities with IDs' id_1 and id_3 should be returned.
for (TimelineEntity entity : result) {
if (!entity.getId().equals("id_1") && !entity.getId().equals("id_3")) {
Assert.fail("Incorrect filtering based on isRelatedTo");
}
}
}
use of org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve in project hadoop by apache.
the class TestFileSystemTimelineReaderImpl method testGetEntityByClusterAndApp.
@Test
public void testGetEntityByClusterAndApp() throws Exception {
// Cluster and AppId should be enough to get an entity.
TimelineEntity result = reader.getEntity(new TimelineReaderContext("cluster1", null, null, null, "app1", "app", "id_1"), new TimelineDataToRetrieve(null, null, null, null));
Assert.assertEquals((new TimelineEntity.Identifier("app", "id_1")).toString(), result.getIdentifier().toString());
Assert.assertEquals((Long) 1425016502000L, result.getCreatedTime());
Assert.assertEquals(0, result.getConfigs().size());
Assert.assertEquals(0, result.getMetrics().size());
}
use of org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve in project hadoop by apache.
the class FlowRunEntityReader method constructFilterListBasedOnFields.
@Override
protected FilterList constructFilterListBasedOnFields() throws IOException {
FilterList list = new FilterList(Operator.MUST_PASS_ONE);
// By default fetch everything in INFO column family.
FamilyFilter infoColumnFamily = new FamilyFilter(CompareOp.EQUAL, new BinaryComparator(FlowRunColumnFamily.INFO.getBytes()));
TimelineDataToRetrieve dataToRetrieve = getDataToRetrieve();
// Metrics are always returned if we are reading a single entity.
if (!isSingleEntityRead() && !hasField(dataToRetrieve.getFieldsToRetrieve(), Field.METRICS)) {
FilterList infoColFamilyList = new FilterList(Operator.MUST_PASS_ONE);
infoColFamilyList.addFilter(infoColumnFamily);
infoColFamilyList.addFilter(new QualifierFilter(CompareOp.NOT_EQUAL, new BinaryPrefixComparator(FlowRunColumnPrefix.METRIC.getColumnPrefixBytes(""))));
list.addFilter(infoColFamilyList);
} else {
// Check if metricsToRetrieve are specified and if they are, create a
// filter list for info column family by adding flow run tables columns
// and a list for metrics to retrieve. Pls note that fieldsToRetrieve
// will have METRICS added to it if metricsToRetrieve are specified
// (in augmentParams()).
TimelineFilterList metricsToRetrieve = dataToRetrieve.getMetricsToRetrieve();
if (metricsToRetrieve != null && !metricsToRetrieve.getFilterList().isEmpty()) {
FilterList infoColFamilyList = new FilterList();
infoColFamilyList.addFilter(infoColumnFamily);
FilterList columnsList = updateFixedColumns();
columnsList.addFilter(TimelineFilterUtils.createHBaseFilterList(FlowRunColumnPrefix.METRIC, metricsToRetrieve));
infoColFamilyList.addFilter(columnsList);
list.addFilter(infoColFamilyList);
}
}
return list;
}
Aggregations