Search in sources :

Example 1 with StreamSizeScheduleState

use of co.cask.cdap.internal.app.runtime.schedule.StreamSizeScheduleState in project cdap by caskdata.

the class DatasetBasedStreamSizeScheduleStore method list.

/**
   * @return a list of all the schedules and their states present in the store
   */
public synchronized List<StreamSizeScheduleState> list() throws InterruptedException, TransactionFailureException {
    final List<StreamSizeScheduleState> scheduleStates = Lists.newArrayList();
    factory.createExecutor(ImmutableList.of((TransactionAware) table)).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            try (Scanner scan = getScannerWithPrefix(table, KEY_PREFIX)) {
                Row row;
                while ((row = scan.next()) != null) {
                    byte[] scheduleBytes = row.get(SCHEDULE_COL);
                    byte[] baseSizeBytes = row.get(BASE_SIZE_COL);
                    byte[] baseTsBytes = row.get(BASE_TS_COL);
                    byte[] lastRunSizeBytes = row.get(LAST_RUN_SIZE_COL);
                    byte[] lastRunTsBytes = row.get(LAST_RUN_TS_COL);
                    byte[] activeBytes = row.get(ACTIVE_COL);
                    byte[] propertyBytes = row.get(PROPERTIES_COL);
                    if (isInvalidRow(row)) {
                        LIMITED_LOG.debug("Stream Sized Schedule entry with Row key {} does not have all columns.", Bytes.toString(row.getRow()));
                        continue;
                    }
                    String rowKey = Bytes.toString(row.getRow());
                    String[] splits = rowKey.split(":");
                    ProgramId program;
                    if (splits.length == 7) {
                        // New Row key for the trigger should be of the form -
                        // streamSizeSchedule:namespace:application:version:type:program:schedule
                        program = new ApplicationId(splits[1], splits[2], splits[3]).program(ProgramType.valueOf(splits[4]), splits[5]);
                    } else if (splits.length == 6) {
                        program = new ApplicationId(splits[1], splits[2]).program(ProgramType.valueOf(splits[3]), splits[4]);
                    } else {
                        continue;
                    }
                    SchedulableProgramType programType = program.getType().getSchedulableType();
                    StreamSizeSchedule schedule = GSON.fromJson(Bytes.toString(scheduleBytes), StreamSizeSchedule.class);
                    long baseSize = Bytes.toLong(baseSizeBytes);
                    long baseTs = Bytes.toLong(baseTsBytes);
                    long lastRunSize = Bytes.toLong(lastRunSizeBytes);
                    long lastRunTs = Bytes.toLong(lastRunTsBytes);
                    boolean active = Bytes.toBoolean(activeBytes);
                    Map<String, String> properties = Maps.newHashMap();
                    if (propertyBytes != null) {
                        properties = GSON.fromJson(Bytes.toString(propertyBytes), STRING_MAP_TYPE);
                    }
                    StreamSizeScheduleState scheduleState = new StreamSizeScheduleState(program, programType, schedule, properties, baseSize, baseTs, lastRunSize, lastRunTs, active);
                    scheduleStates.add(scheduleState);
                    LOG.debug("StreamSizeSchedule found in store: {}", scheduleState);
                }
            }
        }
    });
    return scheduleStates;
}
Also used : Scanner(co.cask.cdap.api.dataset.table.Scanner) TransactionExecutor(org.apache.tephra.TransactionExecutor) ProgramId(co.cask.cdap.proto.id.ProgramId) TransactionFailureException(org.apache.tephra.TransactionFailureException) TransactionNotInProgressException(org.apache.tephra.TransactionNotInProgressException) TransactionConflictException(org.apache.tephra.TransactionConflictException) DatasetManagementException(co.cask.cdap.api.dataset.DatasetManagementException) IOException(java.io.IOException) StreamSizeScheduleState(co.cask.cdap.internal.app.runtime.schedule.StreamSizeScheduleState) SchedulableProgramType(co.cask.cdap.api.schedule.SchedulableProgramType) Row(co.cask.cdap.api.dataset.table.Row) ApplicationId(co.cask.cdap.proto.id.ApplicationId) StreamSizeSchedule(co.cask.cdap.internal.schedule.StreamSizeSchedule) Map(java.util.Map)

Aggregations

DatasetManagementException (co.cask.cdap.api.dataset.DatasetManagementException)1 Row (co.cask.cdap.api.dataset.table.Row)1 Scanner (co.cask.cdap.api.dataset.table.Scanner)1 SchedulableProgramType (co.cask.cdap.api.schedule.SchedulableProgramType)1 StreamSizeScheduleState (co.cask.cdap.internal.app.runtime.schedule.StreamSizeScheduleState)1 StreamSizeSchedule (co.cask.cdap.internal.schedule.StreamSizeSchedule)1 ApplicationId (co.cask.cdap.proto.id.ApplicationId)1 ProgramId (co.cask.cdap.proto.id.ProgramId)1 IOException (java.io.IOException)1 Map (java.util.Map)1 TransactionConflictException (org.apache.tephra.TransactionConflictException)1 TransactionExecutor (org.apache.tephra.TransactionExecutor)1 TransactionFailureException (org.apache.tephra.TransactionFailureException)1 TransactionNotInProgressException (org.apache.tephra.TransactionNotInProgressException)1