use of com.linkedin.data.schema.RecordDataSchema in project rest.li by linkedin.
the class SchemaSampleDataGenerator method buildData.
private static Object buildData(ParentSchemas parentSchemas, DataSchema schema, String fieldName, DataGenerationOptions spec) {
spec = preventRecursionIntoAlreadyTraversedSchemas(parentSchemas, spec, schema);
parentSchemas.incrementReferences(schema);
final DataSchema derefSchema = schema.getDereferencedDataSchema();
final SampleDataCallback callback = spec.getCallback();
Object data = null;
switch(derefSchema.getType()) {
case BOOLEAN:
data = callback.getBoolean(fieldName);
break;
case INT:
data = callback.getInteger(fieldName);
break;
case LONG:
data = callback.getLong(fieldName);
break;
case FLOAT:
data = callback.getFloat(fieldName);
break;
case DOUBLE:
data = callback.getDouble(fieldName);
break;
case BYTES:
data = callback.getBytes(fieldName);
break;
case STRING:
data = callback.getString(fieldName);
break;
case NULL:
data = Data.NULL;
break;
case FIXED:
data = callback.getFixed(fieldName, (FixedDataSchema) derefSchema);
break;
case ENUM:
data = callback.getEnum(fieldName, (EnumDataSchema) derefSchema);
break;
case ARRAY:
final DataList dataList = new DataList(spec.getArraySize());
for (int i = 0; i < spec.getArraySize(); i++) {
final Object item = buildData(parentSchemas, ((ArrayDataSchema) derefSchema).getItems(), fieldName, spec);
dataList.add(item);
}
data = dataList;
break;
case RECORD:
data = buildRecordData(parentSchemas, (RecordDataSchema) derefSchema, spec);
break;
case MAP:
final DataMap dataMap = new DataMap();
for (int i = 0; i < spec.getArraySize(); i++) {
final Object item = buildData(parentSchemas, ((MapDataSchema) derefSchema).getValues(), fieldName, spec);
dataMap.put("mapField_" + _random.nextInt(), item);
}
data = dataMap;
break;
case UNION:
final UnionDataSchema unionSchema = (UnionDataSchema) derefSchema;
final List<UnionDataSchema.Member> members = removeAlreadyTraversedSchemasFromUnionMemberList(parentSchemas, unionSchema.getMembers());
final int unionIndex = _random.nextInt(members.size());
final UnionDataSchema.Member unionMember = members.get(unionIndex);
data = buildData(parentSchemas, unionMember.getType(), fieldName, spec);
if (data != null) {
final DataMap unionMap = new DataMap();
unionMap.put(unionMember.getUnionMemberKey(), data);
data = unionMap;
}
break;
case TYPEREF:
data = buildData(parentSchemas, derefSchema, fieldName, spec);
break;
}
parentSchemas.decrementReferences(schema);
return data;
}
use of com.linkedin.data.schema.RecordDataSchema in project rest.li by linkedin.
the class SchemaSampleDataGenerator method buildDataMap.
private DataMap buildDataMap(ParentSchemas parentSchemas, String pegasusDataSchemaName, DataGenerationOptions spec) {
final DataSchema schema = _schemaParser.lookupName(pegasusDataSchemaName);
spec = preventRecursionIntoAlreadyTraversedSchemas(parentSchemas, spec, schema);
parentSchemas.incrementReferences(schema);
if (schema == null) {
throw new IllegalArgumentException(String.format("Could not find pegasus data schema '%s'", pegasusDataSchemaName));
}
assert (schema instanceof RecordDataSchema);
final DataMap data = buildRecordData(parentSchemas, (RecordDataSchema) schema, spec);
parentSchemas.decrementReferences(schema);
return data;
}
use of com.linkedin.data.schema.RecordDataSchema in project rest.li by linkedin.
the class PdlSchemaParser method parseRecord.
private RecordDataSchema parseRecord(NamedTypeDeclarationContext context, RecordDeclarationContext record) throws ParseException {
Name name = toName(record.name);
RecordDataSchema schema = new RecordDataSchema(name, RecordDataSchema.RecordType.RECORD);
getResolver().addPendingSchema(schema.getFullName());
try {
setDocAndProperties(context, schema);
bindNameToSchema(name, schema.getAliases(), schema);
FieldsAndIncludes fieldsAndIncludes = parseIncludes(schema, record.beforeIncludes);
boolean hasBeforeIncludes = fieldsAndIncludes.includes.size() > 0;
fieldsAndIncludes.fields.addAll(parseFields(schema, record.recordDecl));
FieldsAndIncludes afterIncludes = parseIncludes(schema, record.afterIncludes);
boolean hasAfterIncludes = afterIncludes.includes.size() > 0;
if (hasBeforeIncludes && hasAfterIncludes) {
startErrorMessage(record).append("Record may have includes before or after fields, but not both: ").append(record).append(NEWLINE);
}
fieldsAndIncludes.addAll(afterIncludes);
schema.setFields(fieldsAndIncludes.fields, errorMessageBuilder());
schema.setInclude(fieldsAndIncludes.includes);
schema.setIncludesDeclaredInline(fieldsAndIncludes.includesDeclaredInline);
schema.setFieldsBeforeIncludes(hasAfterIncludes);
validateDefaults(schema);
} finally {
getResolver().removePendingSchema(schema.getFullName());
}
return schema;
}
use of com.linkedin.data.schema.RecordDataSchema in project rest.li by linkedin.
the class SchemaAnnotationProcessor method findDataSchemaByPath.
private static DataSchema findDataSchemaByPath(DataSchema dataSchema, String pathSpec) {
List<String> paths = new ArrayList<>(Arrays.asList(pathSpec.split(Character.toString(PathSpec.SEPARATOR))));
paths.remove("");
DataSchema currentSchema = dataSchema;
for (String pathSegment : paths) {
String errorMsg = String.format("Could not find path segment \"%s\" in PathSpec \"%s\"", pathSegment, pathSpec);
if (currentSchema != null) {
currentSchema = currentSchema.getDereferencedDataSchema();
switch(currentSchema.getType()) {
case RECORD:
RecordDataSchema recordDataSchema = (RecordDataSchema) currentSchema;
RecordDataSchema.Field field = recordDataSchema.getField(pathSegment);
if (field == null) {
throw new IllegalArgumentException(errorMsg);
}
currentSchema = field.getType();
break;
case UNION:
UnionDataSchema unionDataSchema = (UnionDataSchema) currentSchema;
DataSchema unionSchema = unionDataSchema.getTypeByMemberKey(pathSegment);
if (unionSchema == null) {
throw new IllegalArgumentException(errorMsg);
}
currentSchema = unionSchema;
break;
case MAP:
if (pathSegment.equals(PathSpec.WILDCARD)) {
currentSchema = ((MapDataSchema) currentSchema).getValues();
} else if (pathSegment.equals((DataSchemaConstants.MAP_KEY_REF))) {
currentSchema = ((MapDataSchema) currentSchema).getKey();
} else {
throw new IllegalArgumentException(errorMsg);
}
break;
case ARRAY:
if (pathSegment.equals(PathSpec.WILDCARD)) {
currentSchema = ((ArrayDataSchema) currentSchema).getItems();
} else {
throw new IllegalArgumentException(errorMsg);
}
break;
default:
// illegal state
break;
}
}
}
// Remaining schema could be TypeRef
currentSchema = currentSchema.getDereferencedDataSchema();
return currentSchema;
}
use of com.linkedin.data.schema.RecordDataSchema in project rest.li by linkedin.
the class PathSpecBasedSchemaAnnotationVisitor method createOrReUseSchemaAndAttachToParent.
/**
* This function try to process the current dataSchema being visited inside the context and create a skeleton copy of it.
* But if the current dataSchema has been already processed, will fetch the cached copy of the skeleton schema.
*
* @param context {@link TraverserContext} context that contains current data schema.
* @param hasOverridesNotResolved a boolean to tell whether there are non-resolved overrides that will be resolved into the new schema
* @return the new schema
* @throws CloneNotSupportedException
*/
private DataSchema createOrReUseSchemaAndAttachToParent(TraverserContext context, boolean hasOverridesNotResolved) throws CloneNotSupportedException {
DataSchema currentDataSchema = context.getCurrentSchema();
CurrentSchemaEntryMode currentSchemaEntryMode = context.getCurrentSchemaEntryMode();
// newSchema could be created as skeletonSchema, or fetched from cache if currentDataSchema has already been processed.
DataSchema newSchema = null;
if (hasOverridesNotResolved) {
// if there are overrides that not resolved, always build skeleton schema
newSchema = CopySchemaUtil.buildSkeletonSchema(currentDataSchema);
} else {
if (_seenDataSchemaMapping.containsKey(currentDataSchema)) {
newSchema = _seenDataSchemaMapping.get(currentDataSchema);
} else {
newSchema = CopySchemaUtil.buildSkeletonSchema(currentDataSchema);
_seenDataSchemaMapping.put(currentDataSchema, newSchema);
}
}
// attach based on visitorContext's schema, need to create new fields or union members
PathSpecTraverseVisitorContext oldVisitorContext = (PathSpecTraverseVisitorContext) (context.getVisitorContext());
DataSchema outputParentSchema = oldVisitorContext.getOutputParentSchema();
if (outputParentSchema == null) {
_schemaConstructed = newSchema;
return newSchema;
}
switch(currentSchemaEntryMode) {
case FIELD:
assert (outputParentSchema.getType() == DataSchema.Type.RECORD);
addField(context.getEnclosingField(), newSchema, (RecordDataSchema) outputParentSchema);
break;
case MAP_KEY:
assert (outputParentSchema.getType() == DataSchema.Type.MAP);
MapDataSchema mapDataSchema = (MapDataSchema) outputParentSchema;
mapDataSchema.setKey((StringDataSchema) newSchema);
break;
case MAP_VALUE:
assert (outputParentSchema.getType() == DataSchema.Type.MAP);
mapDataSchema = (MapDataSchema) outputParentSchema;
mapDataSchema.setValues(newSchema);
break;
case ARRAY_VALUE:
assert (outputParentSchema.getType() == DataSchema.Type.ARRAY);
ArrayDataSchema arrayDataSchema = (ArrayDataSchema) outputParentSchema;
arrayDataSchema.setItems(newSchema);
break;
case UNION_MEMBER:
assert (outputParentSchema.getType() == DataSchema.Type.UNION);
addUnionMember(context.getEnclosingUnionMember(), newSchema, (UnionDataSchema) outputParentSchema);
break;
case TYPEREF_REF:
TyperefDataSchema typerefDataSchema = (TyperefDataSchema) outputParentSchema;
typerefDataSchema.setReferencedType(newSchema);
break;
default:
break;
}
return newSchema;
}
Aggregations