use of co.cask.cdap.api.data.schema.UnsupportedTypeException in project cdap by caskdata.
the class DelimitedStringsRecordFormat method validateSchema.
@Override
protected void validateSchema(Schema desiredSchema) throws UnsupportedTypeException {
// a valid schema is a record of simple types. In other words, no maps, arrays, records, unions, or enums allowed.
// if mapping is null, the exception is the very last field, which is allowed to be an array of simple types.
// These types may be nullable, which is a union of a null and non-null type.
Iterator<Schema.Field> fields = desiredSchema.getFields().iterator();
// check that each field is a simple field, except for the very last field, which can be an array of simple types.
while (fields.hasNext()) {
Schema.Field field = fields.next();
Schema schema = field.getSchema();
// if we're not on the very last field, the field must be a simple type or a nullable simple type.
boolean isSimple = schema.getType().isSimpleType();
boolean isNullableSimple = schema.isNullableSimple();
if (!isSimple && !isNullableSimple) {
// if this is the very last field and a string array, it is valid. otherwise it is not.
if (fields.hasNext() || !isStringArray(schema)) {
throw new UnsupportedTypeException("Field " + field.getName() + " is of invalid type.");
}
}
}
}
use of co.cask.cdap.api.data.schema.UnsupportedTypeException in project cdap by caskdata.
the class AppWithStreamSizeSchedule method configure.
@Override
public void configure() {
try {
setName("AppWithStreamSizeSchedule");
setDescription("Sample application");
ObjectStores.createObjectStore(getConfigurer(), "input", String.class);
ObjectStores.createObjectStore(getConfigurer(), "output", String.class);
addWorkflow(new SampleWorkflow());
addStream(new Stream("stream"));
scheduleWorkflow(Schedules.builder("SampleSchedule1").createDataSchedule(Schedules.Source.STREAM, "stream", 1), "SampleWorkflow", SCHEDULE_PROPS);
scheduleWorkflow(Schedules.builder("SampleSchedule2").createDataSchedule(Schedules.Source.STREAM, "stream", 2), "SampleWorkflow", SCHEDULE_PROPS);
} catch (UnsupportedTypeException e) {
throw Throwables.propagate(e);
}
}
use of co.cask.cdap.api.data.schema.UnsupportedTypeException in project cdap by caskdata.
the class AppWithWorkflow method configure.
@Override
public void configure() {
try {
setName(NAME);
setDescription("Sample application");
addStream(new Stream("stream"));
ObjectStores.createObjectStore(getConfigurer(), "input", String.class);
ObjectStores.createObjectStore(getConfigurer(), "output", String.class);
addMapReduce(new WordCountMapReduce());
addWorkflow(new SampleWorkflow());
} catch (UnsupportedTypeException e) {
throw Throwables.propagate(e);
}
}
use of co.cask.cdap.api.data.schema.UnsupportedTypeException in project cdap by caskdata.
the class SparkKMeansApp method configure.
@Override
public void configure() {
setName("SparkKMeans");
setDescription("Spark KMeans app");
// Ingest data into the Application via a Stream
addStream(new Stream("pointsStream"));
// Process points data in real-time using a Flow
addFlow(new PointsFlow());
// Run a Spark program on the acquired data
addSpark(new SparkKMeansSpecification());
// Retrieve the processed data using a Service
addService(new CentersService());
// Store input and processed data in ObjectStore Datasets
try {
ObjectStores.createObjectStore(getConfigurer(), "points", Point.class, DatasetProperties.builder().setDescription("Store points data").build());
ObjectStores.createObjectStore(getConfigurer(), "centers", String.class, DatasetProperties.builder().setDescription("Store centers data").build());
} catch (UnsupportedTypeException e) {
// because String is an actual class.
throw new RuntimeException(e);
}
}
use of co.cask.cdap.api.data.schema.UnsupportedTypeException in project cdap by caskdata.
the class SparkPageRankApp method configure.
@Override
public void configure() {
setName("SparkPageRank");
setDescription("Spark page rank application.");
// Ingest data into the Application via a Stream
addStream(new Stream(BACKLINK_URL_STREAM));
// Run a Spark program on the acquired data
addSpark(new PageRankSpark());
// Runs MapReduce program on data emitted by Spark program
addMapReduce(new RanksCounter());
// Runs Spark followed by a MapReduce in a Workflow
addWorkflow(new PageRankWorkflow());
// Service to retrieve process data
addService(SERVICE_HANDLERS, new SparkPageRankServiceHandler());
// Store input and processed data in ObjectStore Datasets
try {
ObjectStores.createObjectStore(getConfigurer(), "ranks", Integer.class, DatasetProperties.builder().setDescription("Ranks Dataset").build());
ObjectStores.createObjectStore(getConfigurer(), "rankscount", Integer.class, DatasetProperties.builder().setDescription("Ranks Count Dataset").build());
} catch (UnsupportedTypeException e) {
// because String and Double are actual classes.
throw new RuntimeException(e);
}
}
Aggregations