use of net.morimekta.providence.PMessage in project providence by morimekta.
the class HasFieldValueThat method describeMismatch.
@Override
public void describeMismatch(Object o, Description mismatchDescription) {
if (o == null) {
mismatchDescription.appendText("got null message");
} else {
if (!(o instanceof PMessage)) {
mismatchDescription.appendText("instance is not a message");
return;
}
List<String> stack = new ArrayList<>();
String path = "";
for (int i = 0; i < this.path.length; ++i) {
String fieldName = this.path[i];
stack.add(fieldName);
path = Strings.join(".", stack);
if (!(o instanceof PMessage)) {
mismatchDescription.appendText("field " + path + " is not a message");
return;
}
PMessage actual = (PMessage) o;
PField field = actual.descriptor().findFieldByName(fieldName);
if (field == null) {
mismatchDescription.appendText("field path " + path + " is not valid");
return;
}
if (!actual.has(field.getId())) {
mismatchDescription.appendText("field " + path + " is missing");
return;
}
o = actual.get(field.getId());
}
mismatchDescription.appendText("field " + path + " ");
valueMatcher.describeMismatch(o, mismatchDescription);
}
}
use of net.morimekta.providence.PMessage in project providence by morimekta.
the class GeneratorWatcher method dumpGeneratedMessages.
/**
* Dump all generated messages.
*
* @throws IOException If writing the messages failed.
*/
@SuppressWarnings("unchecked")
public void dumpGeneratedMessages() throws IOException {
MessageWriter writer = this.writer;
if (writer == null) {
writer = new IOMessageWriter(System.err, outputSerializer);
}
for (PMessage message : generated) {
writer.write(message);
writer.separator();
}
}
use of net.morimekta.providence.PMessage in project providence by morimekta.
the class ProvidenceConfigParser method parseConfigRecursively.
@SuppressWarnings("unchecked")
<M extends PMessage<M, F>, F extends PField> Pair<M, Set<String>> parseConfigRecursively(@Nonnull Path file, M parent, String[] stack) throws IOException {
Tokenizer tokenizer;
try (BufferedInputStream in = new BufferedInputStream(new FileInputStream(file.toFile()))) {
// Non-enclosed content, meaning we should read the whole file immediately.
tokenizer = new Tokenizer(new Utf8StreamReader(in), Tokenizer.DEFAULT_BUFFER_SIZE, true);
}
ProvidenceConfigContext context = new ProvidenceConfigContext();
Set<String> includedFilePaths = new TreeSet<>();
includedFilePaths.add(canonicalFileLocation(file).toString());
Stage lastStage = Stage.INCLUDES;
M result = null;
Token token = tokenizer.peek();
while (token != null) {
tokenizer.next();
if (lastStage == Stage.MESSAGE) {
throw new TokenizerException(token, "Unexpected token '" + token.asString() + "', expected end of file.").setLine(tokenizer.getLine());
} else if (INCLUDE.equals(token.asString())) {
// if include && stage == INCLUDES --> INCLUDES
if (lastStage != Stage.INCLUDES) {
throw new TokenizerException(token, "Include added after defines or message. Only one def block allowed.").setLine(tokenizer.getLine());
}
token = tokenizer.expectLiteral("file to be included");
String includedFilePath = token.decodeLiteral(strict);
PMessage included;
Path includedFile;
try {
includedFile = resolveFile(file, includedFilePath);
Pair<PMessage, Set<String>> tmp = checkAndParseInternal(includedFile, null, stack);
if (tmp != null) {
includedFilePaths.add(includedFile.toString());
includedFilePaths.addAll(tmp.second);
included = tmp.first;
} else {
included = null;
}
} catch (FileNotFoundException e) {
throw new TokenizerException(token, "Included file \"%s\" not found.", includedFilePath).setLine(tokenizer.getLine());
}
token = tokenizer.expectIdentifier("the token 'as'");
if (!AS.equals(token.asString())) {
throw new TokenizerException(token, "Expected token 'as' after included file \"%s\".", includedFilePath).setLine(tokenizer.getLine());
}
token = tokenizer.expectIdentifier("Include alias");
String alias = token.asString();
if (RESERVED_WORDS.contains(alias)) {
throw new TokenizerException(token, "Alias \"%s\" is a reserved word.", alias).setLine(tokenizer.getLine());
}
if (context.containsReference(alias)) {
throw new TokenizerException(token, "Alias \"%s\" is already used.", alias).setLine(tokenizer.getLine());
}
context.setInclude(alias, included);
} else if (DEF.equals(token.asString())) {
// if params && stage == DEF --> DEF
lastStage = Stage.DEFINES;
parseDefinitions(context, tokenizer);
} else if (token.isQualifiedIdentifier()) {
// if a.b (type identifier) --> MESSAGE
lastStage = Stage.MESSAGE;
PMessageDescriptor<M, F> descriptor;
try {
descriptor = (PMessageDescriptor) registry.getDeclaredType(token.asString());
} catch (IllegalArgumentException e) {
// even in non-strict mode.
if (strict || stack.length == 1) {
throw new TokenizerException(token, "Unknown declared type: %s", token.asString()).setLine(tokenizer.getLine());
}
return null;
}
result = parseConfigMessage(tokenizer, context, descriptor.builder(), parent, file);
} else {
throw new TokenizerException(token, "Unexpected token '" + token.asString() + "'. Expected include, defines or message type").setLine(tokenizer.getLine());
}
token = tokenizer.peek();
}
if (result == null) {
throw new TokenizerException("No message in config: " + file.getFileName().toString());
}
return Pair.create(result, includedFilePaths);
}
use of net.morimekta.providence.PMessage in project providence by morimekta.
the class ProvidenceConfigParser method resolveAny.
private static Object resolveAny(ProvidenceConfigContext context, Token token, Tokenizer tokenizer) throws TokenizerException {
String key = token.asString();
String name = key;
String subKey = null;
if (key.contains(IDENTIFIER_SEP)) {
int idx = key.indexOf(IDENTIFIER_SEP);
name = key.substring(0, idx);
subKey = key.substring(idx + 1);
}
Object value = context.getReference(name, token, tokenizer);
if (subKey != null) {
if (!(value instanceof PMessage)) {
throw new TokenizerException(token, "Reference name " + key + " not declared");
}
try {
return ProvidenceConfigUtil.getInMessage((PMessage) value, subKey, null);
} catch (ProvidenceConfigException e) {
throw new TokenizerException(token, e.getMessage()).setLine(tokenizer.getLine()).initCause(e);
}
}
return value;
}
use of net.morimekta.providence.PMessage in project providence by morimekta.
the class ProvidenceConfigParser method parseMessage.
@SuppressWarnings("unchecked")
<M extends PMessage<M, F>, F extends PField> M parseMessage(@Nonnull Tokenizer tokenizer, @Nonnull ProvidenceConfigContext context, @Nonnull PMessageBuilder<M, F> builder) throws IOException {
PMessageDescriptor<M, F> descriptor = builder.descriptor();
Token token = tokenizer.expect("object end or field");
while (!token.isSymbol(Token.kMessageEnd)) {
if (!token.isIdentifier()) {
throw new TokenizerException(token, "Invalid field name: " + token.asString()).setLine(tokenizer.getLine());
}
F field = descriptor.findFieldByName(token.asString());
if (field == null) {
if (strict) {
throw new TokenizerException("No such field " + token.asString() + " in " + descriptor.getQualifiedName()).setLine(tokenizer.getLine());
} else {
token = tokenizer.expect("field value sep, message start or reference start");
if (token.isSymbol(DEFINE_REFERENCE)) {
context.setReference(context.initReference(tokenizer.expectIdentifier("reference name"), tokenizer), null);
// Ignore reference.
token = tokenizer.expect("field value sep or message start");
}
if (token.isSymbol(Token.kFieldValueSep)) {
token = tokenizer.expect("value declaration");
} else if (!token.isSymbol(Token.kMessageStart)) {
throw new TokenizerException(token, "Expected field-value separator or inherited message").setLine(tokenizer.getLine());
}
// Non-strict will just consume unknown fields, this way
// we can be forward-compatible when reading config.
consumeValue(context, tokenizer, token);
token = nextNotLineSep(tokenizer, "field or message end");
continue;
}
}
if (field.getType() == PType.MESSAGE) {
// go recursive with optional
String reference = null;
char symbol = tokenizer.expectSymbol("Message assigner or start", Token.kFieldValueSep, Token.kMessageStart, DEFINE_REFERENCE);
if (symbol == DEFINE_REFERENCE) {
Token ref = tokenizer.expectIdentifier("reference name");
if (strict) {
throw tokenizer.failure(ref, "Reusable objects are not allowed in strict mode.");
}
reference = context.initReference(ref, tokenizer);
symbol = tokenizer.expectSymbol("Message assigner or start after " + reference, Token.kFieldValueSep, Token.kMessageStart);
}
PMessageBuilder bld;
if (symbol == Token.kFieldValueSep) {
token = tokenizer.expect("reference or message start");
if (UNDEFINED.equals(token.asString())) {
// unset.
builder.clear(field.getId());
context.setReference(reference, null);
// special casing this, as we don't want to duplicate the parse line below.
token = nextNotLineSep(tokenizer, "field or message end");
continue;
}
// overwrite with new.
bld = ((PMessageDescriptor) field.getDescriptor()).builder();
if (token.isReferenceIdentifier()) {
// Inherit from reference.
try {
PMessage ref = resolve(context, token, tokenizer, field.getDescriptor());
if (ref != null) {
bld.merge(ref);
} else {
if (tokenizer.peek().isSymbol(Token.kMessageStart)) {
throw new TokenizerException(token, "Inherit from unknown reference %s", token.asString()).setLine(tokenizer.getLine());
} else if (strict) {
throw new TokenizerException(token, "Unknown reference %s", token.asString()).setLine(tokenizer.getLine());
}
}
} catch (ProvidenceConfigException e) {
throw new TokenizerException(token, "Unknown inherited reference '%s'", token.asString()).setLine(tokenizer.getLine());
}
token = tokenizer.expect("after message reference");
// we assume a new field or end of current message.
if (!token.isSymbol(Token.kMessageStart)) {
builder.set(field.getId(), context.setReference(reference, bld.build()));
continue;
}
} else if (!token.isSymbol(Token.kMessageStart)) {
throw new TokenizerException(token, "Unexpected token " + token.asString() + ", expected message start").setLine(tokenizer.getLine());
}
} else {
// extend in-line.
bld = builder.mutator(field.getId());
}
builder.set(field.getId(), context.setReference(reference, parseMessage(tokenizer, context, bld)));
} else if (field.getType() == PType.MAP) {
// maps can be extended the same way as
token = tokenizer.expect("field sep or value start");
Map baseValue = new LinkedHashMap<>();
String reference = null;
if (token.isSymbol(DEFINE_REFERENCE)) {
Token ref = tokenizer.expectIdentifier("reference name");
if (strict) {
throw tokenizer.failure(ref, "Reusable objects are not allowed in strict mode.");
}
reference = context.initReference(ref, tokenizer);
token = tokenizer.expect("field sep or value start");
}
if (token.isSymbol(Token.kFieldValueSep)) {
token = tokenizer.expect("field id or start");
if (UNDEFINED.equals(token.asString())) {
builder.clear(field.getId());
context.setReference(reference, null);
token = tokenizer.expect("message end or field");
continue;
} else if (token.isReferenceIdentifier()) {
try {
baseValue = resolve(context, token, tokenizer, field.getDescriptor());
} catch (ProvidenceConfigException e) {
throw new TokenizerException(token, e.getMessage()).setLine(tokenizer.getLine());
}
token = tokenizer.expect("map start or next field");
if (!token.isSymbol(Token.kMessageStart)) {
builder.set(field.getId(), context.setReference(reference, baseValue));
continue;
} else if (baseValue == null) {
baseValue = new LinkedHashMap<>();
}
}
} else {
baseValue.putAll(builder.build().get(field.getId()));
}
if (!token.isSymbol(Token.kMessageStart)) {
throw new TokenizerException(token, "Expected map start, but got '%s'", token.asString()).setLine(tokenizer.getLine());
}
Map map = parseMapValue(tokenizer, context, (PMap) field.getDescriptor(), baseValue);
builder.set(field.getId(), context.setReference(reference, map));
} else {
String reference = null;
// Simple fields *must* have the '=' separation, may have '&' reference.
if (tokenizer.expectSymbol("field value sep", Token.kFieldValueSep, DEFINE_REFERENCE) == DEFINE_REFERENCE) {
Token ref = tokenizer.expectIdentifier("reference name");
if (strict) {
throw tokenizer.failure(ref, "Reusable objects are not allowed in strict mode.");
}
reference = context.initReference(ref, tokenizer);
tokenizer.expectSymbol("field value sep", Token.kFieldValueSep);
}
token = tokenizer.expect("field value");
if (UNDEFINED.equals(token.asString())) {
builder.clear(field.getId());
context.setReference(reference, null);
} else {
Object value = parseFieldValue(token, tokenizer, context, field.getDescriptor(), strict);
builder.set(field.getId(), context.setReference(reference, value));
}
}
token = nextNotLineSep(tokenizer, "field or message end");
}
return builder.build();
}
Aggregations