Search in sources :

Example 1 with EntityRowKey

use of org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey in project hadoop by apache.

the class TestHBaseTimelineStorageEntities method isRowKeyCorrect.

private boolean isRowKeyCorrect(byte[] rowKey, String cluster, String user, String flow, Long runid, String appName, TimelineEntity te) {
    EntityRowKey key = EntityRowKey.parseRowKey(rowKey);
    assertEquals(user, key.getUserId());
    assertEquals(cluster, key.getClusterId());
    assertEquals(flow, key.getFlowName());
    assertEquals(runid, key.getFlowRunId());
    assertEquals(appName, key.getAppId());
    assertEquals(te.getType(), key.getEntityType());
    assertEquals(te.getId(), key.getEntityId());
    return true;
}
Also used : EntityRowKey(org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey)

Example 2 with EntityRowKey

use of org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey in project hadoop by apache.

the class HBaseTimelineWriterImpl method write.

/**
   * Stores the entire information in TimelineEntities to the timeline store.
   */
@Override
public TimelineWriteResponse write(String clusterId, String userId, String flowName, String flowVersion, long flowRunId, String appId, TimelineEntities data) throws IOException {
    TimelineWriteResponse putStatus = new TimelineWriteResponse();
    // defensive coding to avoid NPE during row key construction
    if ((flowName == null) || (appId == null) || (clusterId == null) || (userId == null)) {
        LOG.warn("Found null for one of: flowName=" + flowName + " appId=" + appId + " userId=" + userId + " clusterId=" + clusterId + " . Not proceeding with writing to hbase");
        return putStatus;
    }
    for (TimelineEntity te : data.getEntities()) {
        // a set can have at most 1 null
        if (te == null) {
            continue;
        }
        // if the entity is the application, the destination is the application
        // table
        boolean isApplication = isApplicationEntity(te);
        byte[] rowKey;
        if (isApplication) {
            ApplicationRowKey applicationRowKey = new ApplicationRowKey(clusterId, userId, flowName, flowRunId, appId);
            rowKey = applicationRowKey.getRowKey();
        } else {
            EntityRowKey entityRowKey = new EntityRowKey(clusterId, userId, flowName, flowRunId, appId, te.getType(), te.getId());
            rowKey = entityRowKey.getRowKey();
        }
        storeInfo(rowKey, te, flowVersion, isApplication);
        storeEvents(rowKey, te.getEvents(), isApplication);
        storeConfig(rowKey, te.getConfigs(), isApplication);
        storeMetrics(rowKey, te.getMetrics(), isApplication);
        storeRelations(rowKey, te, isApplication);
        if (isApplication) {
            TimelineEvent event = getApplicationEvent(te, ApplicationMetricsConstants.CREATED_EVENT_TYPE);
            FlowRunRowKey flowRunRowKey = new FlowRunRowKey(clusterId, userId, flowName, flowRunId);
            if (event != null) {
                AppToFlowRowKey appToFlowRowKey = new AppToFlowRowKey(clusterId, appId);
                onApplicationCreated(flowRunRowKey, appToFlowRowKey, appId, userId, flowVersion, te, event.getTimestamp());
            }
            // if it's an application entity, store metrics
            storeFlowMetricsAppRunning(flowRunRowKey, appId, te);
            // if application has finished, store it's finish time and write final
            // values of all metrics
            event = getApplicationEvent(te, ApplicationMetricsConstants.FINISHED_EVENT_TYPE);
            if (event != null) {
                onApplicationFinished(flowRunRowKey, flowVersion, appId, te, event.getTimestamp());
            }
        }
    }
    return putStatus;
}
Also used : TimelineEvent(org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent) EntityRowKey(org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey) TimelineWriteResponse(org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse) ApplicationRowKey(org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey) FlowRunRowKey(org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey) TimelineEntity(org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity) AppToFlowRowKey(org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey)

Example 3 with EntityRowKey

use of org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey in project hadoop by apache.

the class TestRowKeys method testEntityRowKey.

@Test
public void testEntityRowKey() {
    String entityId = "!ent!ity!!id!";
    String entityType = "entity!Type";
    byte[] byteRowKey = new EntityRowKey(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID, APPLICATION_ID, entityType, entityId).getRowKey();
    EntityRowKey rowKey = EntityRowKey.parseRowKey(byteRowKey);
    assertEquals(CLUSTER, rowKey.getClusterId());
    assertEquals(USER, rowKey.getUserId());
    assertEquals(FLOW_NAME, rowKey.getFlowName());
    assertEquals(FLOW_RUN_ID, rowKey.getFlowRunId());
    assertEquals(APPLICATION_ID, rowKey.getAppId());
    assertEquals(entityType, rowKey.getEntityType());
    assertEquals(entityId, rowKey.getEntityId());
    byte[] byteRowKeyPrefix = new EntityRowKeyPrefix(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID, APPLICATION_ID, entityType).getRowKeyPrefix();
    byte[][] splits = Separator.QUALIFIERS.split(byteRowKeyPrefix, new int[] { Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG, AppIdKeyConverter.getKeySize(), Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE });
    assertEquals(7, splits.length);
    assertEquals(0, splits[6].length);
    assertEquals(APPLICATION_ID, new AppIdKeyConverter().decode(splits[4]));
    assertEquals(entityType, Separator.QUALIFIERS.decode(Bytes.toString(splits[5])));
    verifyRowPrefixBytes(byteRowKeyPrefix);
    byteRowKeyPrefix = new EntityRowKeyPrefix(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID, APPLICATION_ID).getRowKeyPrefix();
    splits = Separator.QUALIFIERS.split(byteRowKeyPrefix, new int[] { Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG, AppIdKeyConverter.getKeySize(), Separator.VARIABLE_SIZE });
    assertEquals(6, splits.length);
    assertEquals(0, splits[5].length);
    AppIdKeyConverter appIdKeyConverter = new AppIdKeyConverter();
    assertEquals(APPLICATION_ID, appIdKeyConverter.decode(splits[4]));
    verifyRowPrefixBytes(byteRowKeyPrefix);
}
Also used : EntityRowKey(org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey) EntityRowKeyPrefix(org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix) Test(org.junit.Test)

Example 4 with EntityRowKey

use of org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey in project hadoop by apache.

the class GenericEntityReader method getResults.

@Override
protected ResultScanner getResults(Configuration hbaseConf, Connection conn, FilterList filterList) throws IOException {
    // Scan through part of the table to find the entities belong to one app
    // and one type
    Scan scan = new Scan();
    TimelineReaderContext context = getContext();
    RowKeyPrefix<EntityRowKey> entityRowKeyPrefix = new EntityRowKeyPrefix(context.getClusterId(), context.getUserId(), context.getFlowName(), context.getFlowRunId(), context.getAppId(), context.getEntityType());
    scan.setRowPrefixFilter(entityRowKeyPrefix.getRowKeyPrefix());
    scan.setMaxVersions(getDataToRetrieve().getMetricsLimit());
    if (filterList != null && !filterList.getFilters().isEmpty()) {
        scan.setFilter(filterList);
    }
    return getTable().getResultScanner(hbaseConf, conn, scan);
}
Also used : EntityRowKey(org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey) TimelineReaderContext(org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext) Scan(org.apache.hadoop.hbase.client.Scan) EntityRowKeyPrefix(org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix)

Example 5 with EntityRowKey

use of org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey in project hadoop by apache.

the class GenericEntityReader method getResult.

@Override
protected Result getResult(Configuration hbaseConf, Connection conn, FilterList filterList) throws IOException {
    TimelineReaderContext context = getContext();
    byte[] rowKey = new EntityRowKey(context.getClusterId(), context.getUserId(), context.getFlowName(), context.getFlowRunId(), context.getAppId(), context.getEntityType(), context.getEntityId()).getRowKey();
    Get get = new Get(rowKey);
    get.setMaxVersions(getDataToRetrieve().getMetricsLimit());
    if (filterList != null && !filterList.getFilters().isEmpty()) {
        get.setFilter(filterList);
    }
    return getTable().getResult(hbaseConf, conn, get);
}
Also used : EntityRowKey(org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey) TimelineReaderContext(org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext) Get(org.apache.hadoop.hbase.client.Get)

Aggregations

EntityRowKey (org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey)5 TimelineReaderContext (org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext)2 EntityRowKeyPrefix (org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix)2 Get (org.apache.hadoop.hbase.client.Get)1 Scan (org.apache.hadoop.hbase.client.Scan)1 TimelineEntity (org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity)1 TimelineEvent (org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent)1 TimelineWriteResponse (org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse)1 ApplicationRowKey (org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey)1 AppToFlowRowKey (org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey)1 FlowRunRowKey (org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey)1 Test (org.junit.Test)1