use of net.morimekta.providence.reflect.parser.internal.ThriftTokenizer in project providence by morimekta.
the class ThriftProgramParser method parseMessage.
private MessageType parseMessage(ThriftTokenizer tokenizer, String variant, String comment, Set<String> includedPrograms) throws IOException {
MessageType._Builder struct = MessageType.builder();
if (comment != null) {
struct.setDocumentation(comment);
comment = null;
}
boolean union = variant.equals("union");
if (!variant.equals("struct")) {
struct.setVariant(MessageVariant.valueForName(variant.toUpperCase(Locale.US)));
}
Token nameToken = tokenizer.expectIdentifier("message name identifier");
String name = nameToken.asString();
if (!allowedNameIdentifier(name)) {
throw tokenizer.failure(nameToken, "Message with reserved name: " + name);
}
struct.setName(name);
int nextAutoFieldKey = -1;
tokenizer.expectSymbol("message start", Token.kMessageStart);
Set<String> fieldNames = new HashSet<>();
Set<String> fieldNameVariants = new HashSet<>();
Set<Integer> fieldIds = new HashSet<>();
while (true) {
Token token = tokenizer.expect("field def or message end");
if (token.isSymbol(Token.kMessageEnd)) {
break;
} else if (token.strEquals(kLineCommentStart)) {
comment = parseDocLine(tokenizer, comment);
continue;
} else if (token.strEquals(kBlockCommentStart)) {
comment = tokenizer.parseDocBlock();
continue;
}
FieldType._Builder field = FieldType.builder();
field.setDocumentation(comment);
comment = null;
if (token.isInteger()) {
int fId = (int) token.parseInteger();
if (fId < 1) {
throw tokenizer.failure(token, "Negative or 0 field id " + fId + " not allowed.");
}
if (fieldIds.contains(fId)) {
throw tokenizer.failure(token, "Field id " + fId + " already exists in " + struct.build().getName());
}
fieldIds.add(fId);
field.setId(fId);
tokenizer.expectSymbol("field id sep", Token.kKeyValueSep);
token = tokenizer.expect("field requirement or type", t -> t.isIdentifier() || t.isQualifiedIdentifier());
} else {
if (requireFieldId) {
throw tokenizer.failure(token, "Missing field ID in strict declaration");
}
field.setId(nextAutoFieldKey--);
}
if (token.strEquals(kRequired)) {
if (union) {
throw tokenizer.failure(token, "Found required field in union");
}
field.setRequirement(FieldRequirement.REQUIRED);
token = tokenizer.expect("field type", t -> t.isIdentifier() || t.isQualifiedIdentifier());
} else if (token.strEquals(kOptional)) {
if (!union) {
// All union fields are optional regardless.
field.setRequirement(FieldRequirement.OPTIONAL);
}
token = tokenizer.expect("field type", t -> t.isIdentifier() || t.isQualifiedIdentifier());
}
// Get type.... This is mandatory.
field.setType(parseType(tokenizer, token, includedPrograms));
nameToken = tokenizer.expectIdentifier("field name");
String fName = nameToken.asString();
if (!allowedNameIdentifier(fName)) {
throw tokenizer.failure(nameToken, "Field with reserved name: " + fName);
}
if (fieldNames.contains(fName)) {
throw tokenizer.failure(nameToken, "Field %s already exists in %s", fName, struct.build().getName());
}
if (fieldNameVariants.contains(Strings.camelCase("get", fName))) {
throw tokenizer.failure(nameToken, "Field %s has field with conflicting name in %s", fName, struct.build().getName());
}
fieldNames.add(fName);
fieldNameVariants.add(Strings.camelCase("get", fName));
field.setName(fName);
token = tokenizer.peek("default sep, annotation, field def or message end");
// Default value
if (token.isSymbol(Token.kFieldValueSep)) {
tokenizer.next();
Token defaultValue = tokenizer.parseValue();
field.setDefaultValue(defaultValue.asString());
field.setStartLineNo(defaultValue.getLineNo());
field.setStartLinePos(defaultValue.getLinePos());
token = tokenizer.peek("field annotation, def or message end");
}
// Annotation
if (token.isSymbol(Token.kParamsStart)) {
tokenizer.next();
field.setAnnotations(parseAnnotations(tokenizer, "field"));
token = tokenizer.peek("field def or message end");
}
struct.addToFields(field.build());
if (token.isSymbol(Token.kLineSep1) || token.isSymbol(Token.kLineSep2)) {
tokenizer.next();
}
}
if (tokenizer.hasNext()) {
Token token = tokenizer.peek("optional annotations");
if (token.isSymbol(Token.kParamsStart)) {
tokenizer.next();
struct.setAnnotations(parseAnnotations(tokenizer, "message"));
}
}
return struct.build();
}
use of net.morimekta.providence.reflect.parser.internal.ThriftTokenizer in project providence by morimekta.
the class ThriftProgramParser method parseInternal.
private ProgramType parseInternal(InputStream in, File file, Collection<File> includeDirs) throws IOException {
ProgramType._Builder program = ProgramType.builder();
String programName = ReflectionUtils.programNameFromPath(file.getName());
if (!VALID_PROGRAM_NAME.matcher(programName).matches()) {
throw new ParseException("Program name \"%s\" derived from filename \"%s\" is not valid.", Strings.escape(programName), Strings.escape(file.getName()));
}
program.setProgramName(programName);
List<String> include_files = new ArrayList<>();
Set<String> includedPrograms = new HashSet<>();
Map<String, String> namespaces = new LinkedHashMap<>();
List<Declaration> declarations = new ArrayList<>();
ThriftTokenizer tokenizer = new ThriftTokenizer(in);
boolean has_header = false;
boolean hasDeclaration = false;
String doc_string = null;
Token token;
while ((token = tokenizer.next()) != null) {
if (token.strEquals(kLineCommentStart)) {
doc_string = parseDocLine(tokenizer, doc_string);
continue;
} else if (token.strEquals(kBlockCommentStart)) {
doc_string = tokenizer.parseDocBlock();
continue;
}
String keyword = token.asString();
if (!Model_Constants.kThriftKeywords.contains(keyword)) {
throw tokenizer.failure(token, "Unexpected token \'%s\'", token.asString());
}
switch(keyword) {
case kNamespace:
if (hasDeclaration) {
throw tokenizer.failure(token, "Unexpected token 'namespace', expected type declaration");
}
if (doc_string != null && !has_header) {
program.setDocumentation(doc_string);
}
doc_string = null;
has_header = true;
parseNamespace(tokenizer, namespaces);
break;
case kInclude:
if (hasDeclaration) {
throw tokenizer.failure(token, "Unexpected token 'include', expected type declaration");
}
if (doc_string != null && !has_header) {
program.setDocumentation(doc_string);
}
doc_string = null;
has_header = true;
parseIncludes(tokenizer, include_files, file, includedPrograms, includeDirs);
break;
case kTypedef:
has_header = true;
hasDeclaration = true;
parseTypedef(tokenizer, doc_string, declarations, includedPrograms);
doc_string = null;
break;
case kEnum:
has_header = true;
hasDeclaration = true;
EnumType et = parseEnum(tokenizer, doc_string);
declarations.add(Declaration.withDeclEnum(et));
doc_string = null;
break;
case kStruct:
case kUnion:
case kException:
has_header = true;
hasDeclaration = true;
MessageType st = parseMessage(tokenizer, token.asString(), doc_string, includedPrograms);
declarations.add(Declaration.withDeclStruct(st));
doc_string = null;
break;
case kService:
has_header = true;
hasDeclaration = true;
ServiceType srv = parseService(tokenizer, doc_string, includedPrograms);
declarations.add(Declaration.withDeclService(srv));
doc_string = null;
break;
case kConst:
has_header = true;
hasDeclaration = true;
ConstType cnst = parseConst(tokenizer, doc_string, includedPrograms);
declarations.add(Declaration.withDeclConst(cnst));
doc_string = null;
break;
default:
throw tokenizer.failure(token, "Unexpected token \'%s\'", Strings.escape(token.asString()));
}
}
if (namespaces.size() > 0) {
program.setNamespaces(namespaces);
}
if (include_files.size() > 0) {
program.setIncludes(include_files);
}
if (declarations.size() > 0) {
program.setDecl(declarations);
}
return program.build();
}
Aggregations