use of org.apache.spark.ml.PipelineModel in project jpmml-sparkml by jpmml.
the class ConverterUtil method getTransformers.
private static Iterable<Transformer> getTransformers(PipelineModel pipelineModel) {
List<Transformer> transformers = new ArrayList<>();
transformers.add(pipelineModel);
Function<Transformer, List<Transformer>> function = new Function<Transformer, List<Transformer>>() {
@Override
public List<Transformer> apply(Transformer transformer) {
if (transformer instanceof PipelineModel) {
PipelineModel pipelineModel = (PipelineModel) transformer;
return Arrays.asList(pipelineModel.stages());
} else if (transformer instanceof CrossValidatorModel) {
CrossValidatorModel crossValidatorModel = (CrossValidatorModel) transformer;
return Collections.<Transformer>singletonList(crossValidatorModel.bestModel());
} else if (transformer instanceof TrainValidationSplitModel) {
TrainValidationSplitModel trainValidationSplitModel = (TrainValidationSplitModel) transformer;
return Collections.<Transformer>singletonList(trainValidationSplitModel.bestModel());
}
return null;
}
};
while (true) {
ListIterator<Transformer> transformerIt = transformers.listIterator();
boolean modified = false;
while (transformerIt.hasNext()) {
Transformer transformer = transformerIt.next();
List<Transformer> childTransformers = function.apply(transformer);
if (childTransformers != null) {
transformerIt.remove();
for (Transformer childTransformer : childTransformers) {
transformerIt.add(childTransformer);
}
modified = true;
}
}
if (!modified) {
break;
}
}
return transformers;
}
use of org.apache.spark.ml.PipelineModel in project jpmml-sparkml by jpmml.
the class ConverterUtil method toPMML.
public static PMML toPMML(StructType schema, PipelineModel pipelineModel) {
checkVersion();
SparkMLEncoder encoder = new SparkMLEncoder(schema);
List<org.dmg.pmml.Model> models = new ArrayList<>();
Iterable<Transformer> transformers = getTransformers(pipelineModel);
for (Transformer transformer : transformers) {
TransformerConverter<?> converter = ConverterUtil.createConverter(transformer);
if (converter instanceof FeatureConverter) {
FeatureConverter<?> featureConverter = (FeatureConverter<?>) converter;
featureConverter.registerFeatures(encoder);
} else if (converter instanceof ModelConverter) {
ModelConverter<?> modelConverter = (ModelConverter<?>) converter;
org.dmg.pmml.Model model = modelConverter.registerModel(encoder);
models.add(model);
} else {
throw new IllegalArgumentException("Expected a " + FeatureConverter.class.getName() + " or " + ModelConverter.class.getName() + " instance, got " + converter);
}
}
org.dmg.pmml.Model rootModel;
if (models.size() == 1) {
rootModel = Iterables.getOnlyElement(models);
} else if (models.size() > 1) {
List<MiningField> targetMiningFields = new ArrayList<>();
for (org.dmg.pmml.Model model : models) {
MiningSchema miningSchema = model.getMiningSchema();
List<MiningField> miningFields = miningSchema.getMiningFields();
for (MiningField miningField : miningFields) {
MiningField.UsageType usageType = miningField.getUsageType();
switch(usageType) {
case PREDICTED:
case TARGET:
targetMiningFields.add(miningField);
break;
default:
break;
}
}
}
MiningSchema miningSchema = new MiningSchema(targetMiningFields);
MiningModel miningModel = MiningModelUtil.createModelChain(models, new Schema(null, Collections.<Feature>emptyList())).setMiningSchema(miningSchema);
rootModel = miningModel;
} else {
throw new IllegalArgumentException("Expected a pipeline with one or more models, got a pipeline with zero models");
}
PMML pmml = encoder.encodePMML(rootModel);
return pmml;
}
use of org.apache.spark.ml.PipelineModel in project jpmml-sparkml by jpmml.
the class ConverterTest method createBatch.
@Override
protected ArchiveBatch createBatch(String name, String dataset, Predicate<FieldName> predicate) {
Predicate<FieldName> excludePredictionFields = excludeFields(FieldName.create("prediction"), FieldName.create("pmml(prediction)"));
if (predicate == null) {
predicate = excludePredictionFields;
} else {
predicate = Predicates.and(predicate, excludePredictionFields);
}
ArchiveBatch result = new IntegrationTestBatch(name, dataset, predicate) {
@Override
public IntegrationTest getIntegrationTest() {
return ConverterTest.this;
}
@Override
public PMML getPMML() throws Exception {
StructType schema;
try (InputStream is = open("/schema/" + getDataset() + ".json")) {
String json = CharStreams.toString(new InputStreamReader(is, "UTF-8"));
schema = (StructType) DataType.fromJson(json);
}
PipelineModel pipelineModel;
try (InputStream is = open("/pipeline/" + getName() + getDataset() + ".zip")) {
File tmpZipFile = File.createTempFile(getName() + getDataset(), ".zip");
try (OutputStream os = new FileOutputStream(tmpZipFile)) {
ByteStreams.copy(is, os);
}
File tmpDir = File.createTempFile(getName() + getDataset(), "");
if (!tmpDir.delete()) {
throw new IOException();
}
ZipUtil.uncompress(tmpZipFile, tmpDir);
MLReader<PipelineModel> mlReader = new PipelineModel.PipelineModelReader();
mlReader.session(ConverterTest.sparkSession);
pipelineModel = mlReader.load(tmpDir.getAbsolutePath());
}
PMML pmml = ConverterUtil.toPMML(schema, pipelineModel);
ensureValidity(pmml);
return pmml;
}
};
return result;
}
Aggregations