use of co.cask.cdap.internal.schedule.StreamSizeSchedule in project cdap by caskdata.
the class Schedulers method toProgramSchedule.
public static ProgramSchedule toProgramSchedule(ApplicationId appId, ScheduleSpecification spec) {
Schedule schedule = spec.getSchedule();
ProgramType programType = ProgramType.valueOfSchedulableType(spec.getProgram().getProgramType());
ProgramId programId = appId.program(programType, spec.getProgram().getProgramName());
Trigger trigger;
if (schedule instanceof TimeSchedule) {
TimeSchedule timeSchedule = (TimeSchedule) schedule;
trigger = new TimeTrigger(timeSchedule.getCronEntry());
} else {
StreamSizeSchedule streamSchedule = (StreamSizeSchedule) schedule;
StreamId streamId = programId.getNamespaceId().stream(streamSchedule.getStreamName());
trigger = new StreamSizeTrigger(streamId, streamSchedule.getDataTriggerMB());
}
Integer maxConcurrentRuns = schedule.getRunConstraints().getMaxConcurrentRuns();
List<Constraint> constraints = maxConcurrentRuns == null ? ImmutableList.<Constraint>of() : ImmutableList.<Constraint>of(new ConcurrencyConstraint(maxConcurrentRuns));
return new ProgramSchedule(schedule.getName(), schedule.getDescription(), programId, spec.getProperties(), trigger, constraints);
}
use of co.cask.cdap.internal.schedule.StreamSizeSchedule in project cdap by caskdata.
the class DatasetBasedStreamSizeScheduleStore method list.
/**
* @return a list of all the schedules and their states present in the store
*/
public synchronized List<StreamSizeScheduleState> list() throws InterruptedException, TransactionFailureException {
final List<StreamSizeScheduleState> scheduleStates = Lists.newArrayList();
factory.createExecutor(ImmutableList.of((TransactionAware) table)).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
try (Scanner scan = getScannerWithPrefix(table, KEY_PREFIX)) {
Row row;
while ((row = scan.next()) != null) {
byte[] scheduleBytes = row.get(SCHEDULE_COL);
byte[] baseSizeBytes = row.get(BASE_SIZE_COL);
byte[] baseTsBytes = row.get(BASE_TS_COL);
byte[] lastRunSizeBytes = row.get(LAST_RUN_SIZE_COL);
byte[] lastRunTsBytes = row.get(LAST_RUN_TS_COL);
byte[] activeBytes = row.get(ACTIVE_COL);
byte[] propertyBytes = row.get(PROPERTIES_COL);
if (isInvalidRow(row)) {
LIMITED_LOG.debug("Stream Sized Schedule entry with Row key {} does not have all columns.", Bytes.toString(row.getRow()));
continue;
}
String rowKey = Bytes.toString(row.getRow());
String[] splits = rowKey.split(":");
ProgramId program;
if (splits.length == 7) {
// New Row key for the trigger should be of the form -
// streamSizeSchedule:namespace:application:version:type:program:schedule
program = new ApplicationId(splits[1], splits[2], splits[3]).program(ProgramType.valueOf(splits[4]), splits[5]);
} else if (splits.length == 6) {
program = new ApplicationId(splits[1], splits[2]).program(ProgramType.valueOf(splits[3]), splits[4]);
} else {
continue;
}
SchedulableProgramType programType = program.getType().getSchedulableType();
StreamSizeSchedule schedule = GSON.fromJson(Bytes.toString(scheduleBytes), StreamSizeSchedule.class);
long baseSize = Bytes.toLong(baseSizeBytes);
long baseTs = Bytes.toLong(baseTsBytes);
long lastRunSize = Bytes.toLong(lastRunSizeBytes);
long lastRunTs = Bytes.toLong(lastRunTsBytes);
boolean active = Bytes.toBoolean(activeBytes);
Map<String, String> properties = Maps.newHashMap();
if (propertyBytes != null) {
properties = GSON.fromJson(Bytes.toString(propertyBytes), STRING_MAP_TYPE);
}
StreamSizeScheduleState scheduleState = new StreamSizeScheduleState(program, programType, schedule, properties, baseSize, baseTs, lastRunSize, lastRunTs, active);
scheduleStates.add(scheduleState);
LOG.debug("StreamSizeSchedule found in store: {}", scheduleState);
}
}
}
});
return scheduleStates;
}
use of co.cask.cdap.internal.schedule.StreamSizeSchedule in project cdap by caskdata.
the class ScheduleDetail method toScheduleSpec.
/**
* Return an equivalent schedule specification, or null if there is no equivalent one.
*/
@Deprecated
@Nullable
public ScheduleSpecification toScheduleSpec() {
RunConstraints constraints = RunConstraints.NONE;
if (getConstraints() != null) {
for (Constraint runConstraint : getConstraints()) {
if (runConstraint instanceof ProtoConstraint.ConcurrencyConstraint) {
constraints = new RunConstraints(((ProtoConstraint.ConcurrencyConstraint) runConstraint).getMaxConcurrency());
break;
}
}
}
Schedule schedule;
if (getTrigger() instanceof ProtoTrigger.TimeTrigger) {
ProtoTrigger.TimeTrigger trigger = ((ProtoTrigger.TimeTrigger) getTrigger());
schedule = new TimeSchedule(getName(), getDescription(), trigger.getCronExpression(), constraints);
} else if (getTrigger() instanceof ProtoTrigger.StreamSizeTrigger) {
ProtoTrigger.StreamSizeTrigger trigger = (ProtoTrigger.StreamSizeTrigger) getTrigger();
schedule = new StreamSizeSchedule(getName(), getDescription(), trigger.getStreamId().getStream(), trigger.getTriggerMB(), constraints);
} else {
return null;
}
return new ScheduleSpecification(schedule, getProgram(), getProperties());
}
Aggregations