use of org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey in project hadoop by apache.
the class HBaseTimelineWriterImpl method write.
/**
* Stores the entire information in TimelineEntities to the timeline store.
*/
@Override
public TimelineWriteResponse write(String clusterId, String userId, String flowName, String flowVersion, long flowRunId, String appId, TimelineEntities data) throws IOException {
TimelineWriteResponse putStatus = new TimelineWriteResponse();
// defensive coding to avoid NPE during row key construction
if ((flowName == null) || (appId == null) || (clusterId == null) || (userId == null)) {
LOG.warn("Found null for one of: flowName=" + flowName + " appId=" + appId + " userId=" + userId + " clusterId=" + clusterId + " . Not proceeding with writing to hbase");
return putStatus;
}
for (TimelineEntity te : data.getEntities()) {
// a set can have at most 1 null
if (te == null) {
continue;
}
// if the entity is the application, the destination is the application
// table
boolean isApplication = isApplicationEntity(te);
byte[] rowKey;
if (isApplication) {
ApplicationRowKey applicationRowKey = new ApplicationRowKey(clusterId, userId, flowName, flowRunId, appId);
rowKey = applicationRowKey.getRowKey();
} else {
EntityRowKey entityRowKey = new EntityRowKey(clusterId, userId, flowName, flowRunId, appId, te.getType(), te.getId());
rowKey = entityRowKey.getRowKey();
}
storeInfo(rowKey, te, flowVersion, isApplication);
storeEvents(rowKey, te.getEvents(), isApplication);
storeConfig(rowKey, te.getConfigs(), isApplication);
storeMetrics(rowKey, te.getMetrics(), isApplication);
storeRelations(rowKey, te, isApplication);
if (isApplication) {
TimelineEvent event = getApplicationEvent(te, ApplicationMetricsConstants.CREATED_EVENT_TYPE);
FlowRunRowKey flowRunRowKey = new FlowRunRowKey(clusterId, userId, flowName, flowRunId);
if (event != null) {
AppToFlowRowKey appToFlowRowKey = new AppToFlowRowKey(clusterId, appId);
onApplicationCreated(flowRunRowKey, appToFlowRowKey, appId, userId, flowVersion, te, event.getTimestamp());
}
// if it's an application entity, store metrics
storeFlowMetricsAppRunning(flowRunRowKey, appId, te);
// if application has finished, store it's finish time and write final
// values of all metrics
event = getApplicationEvent(te, ApplicationMetricsConstants.FINISHED_EVENT_TYPE);
if (event != null) {
onApplicationFinished(flowRunRowKey, flowVersion, appId, te, event.getTimestamp());
}
}
}
return putStatus;
}
use of org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey in project hadoop by apache.
the class FlowRunEntityReader method getResult.
@Override
protected Result getResult(Configuration hbaseConf, Connection conn, FilterList filterList) throws IOException {
TimelineReaderContext context = getContext();
FlowRunRowKey flowRunRowKey = new FlowRunRowKey(context.getClusterId(), context.getUserId(), context.getFlowName(), context.getFlowRunId());
byte[] rowKey = flowRunRowKey.getRowKey();
Get get = new Get(rowKey);
get.setMaxVersions(Integer.MAX_VALUE);
if (filterList != null && !filterList.getFilters().isEmpty()) {
get.setFilter(filterList);
}
return getTable().getResult(hbaseConf, conn, get);
}
use of org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey in project hadoop by apache.
the class FlowRunEntityReader method parseEntity.
@Override
protected TimelineEntity parseEntity(Result result) throws IOException {
TimelineReaderContext context = getContext();
FlowRunEntity flowRun = new FlowRunEntity();
flowRun.setUser(context.getUserId());
flowRun.setName(context.getFlowName());
if (isSingleEntityRead()) {
flowRun.setRunId(context.getFlowRunId());
} else {
FlowRunRowKey rowKey = FlowRunRowKey.parseRowKey(result.getRow());
flowRun.setRunId(rowKey.getFlowRunId());
}
// read the start time
Long startTime = (Long) FlowRunColumn.MIN_START_TIME.readResult(result);
if (startTime != null) {
flowRun.setStartTime(startTime.longValue());
}
// read the end time if available
Long endTime = (Long) FlowRunColumn.MAX_END_TIME.readResult(result);
if (endTime != null) {
flowRun.setMaxEndTime(endTime.longValue());
}
// read the flow version
String version = (String) FlowRunColumn.FLOW_VERSION.readResult(result);
if (version != null) {
flowRun.setVersion(version);
}
// fieldsToRetrieve.
if (isSingleEntityRead() || hasField(getDataToRetrieve().getFieldsToRetrieve(), Field.METRICS)) {
readMetrics(flowRun, result, FlowRunColumnPrefix.METRIC);
}
// set the id
flowRun.setId(flowRun.getId());
return flowRun;
}
use of org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey in project hadoop by apache.
the class FlowRunEntityReader method getResults.
@Override
protected ResultScanner getResults(Configuration hbaseConf, Connection conn, FilterList filterList) throws IOException {
Scan scan = new Scan();
TimelineReaderContext context = getContext();
RowKeyPrefix<FlowRunRowKey> flowRunRowKeyPrefix = new FlowRunRowKeyPrefix(context.getClusterId(), context.getUserId(), context.getFlowName());
scan.setRowPrefixFilter(flowRunRowKeyPrefix.getRowKeyPrefix());
FilterList newList = new FilterList();
newList.addFilter(new PageFilter(getFilters().getLimit()));
if (filterList != null && !filterList.getFilters().isEmpty()) {
newList.addFilter(filterList);
}
scan.setFilter(newList);
scan.setMaxVersions(Integer.MAX_VALUE);
return getTable().getResultScanner(hbaseConf, conn, scan);
}
use of org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey in project hadoop by apache.
the class TestRowKeys method testFlowRunRowKey.
@Test
public void testFlowRunRowKey() {
byte[] byteRowKey = new FlowRunRowKey(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID).getRowKey();
FlowRunRowKey rowKey = FlowRunRowKey.parseRowKey(byteRowKey);
assertEquals(CLUSTER, rowKey.getClusterId());
assertEquals(USER, rowKey.getUserId());
assertEquals(FLOW_NAME, rowKey.getFlowName());
assertEquals(FLOW_RUN_ID, rowKey.getFlowRunId());
byte[] byteRowKeyPrefix = new FlowRunRowKey(CLUSTER, USER, FLOW_NAME, null).getRowKey();
byte[][] splits = Separator.QUALIFIERS.split(byteRowKeyPrefix, new int[] { Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE });
assertEquals(4, splits.length);
assertEquals(0, splits[3].length);
assertEquals(FLOW_NAME, Separator.QUALIFIERS.decode(Bytes.toString(splits[2])));
verifyRowPrefixBytes(byteRowKeyPrefix);
}
Aggregations