Search in sources :

Example 6 with TokenizerException

use of net.morimekta.providence.serializer.pretty.TokenizerException in project providence by morimekta.

the class ProvidenceConfigParser method parseDefinitions.

@SuppressWarnings("unchecked")
void parseDefinitions(ProvidenceConfigContext context, Tokenizer tokenizer) throws IOException {
    Token token = tokenizer.expect("defines group start or identifier");
    if (token.isIdentifier()) {
        String name = context.initReference(token, tokenizer);
        tokenizer.expectSymbol("def value sep", Token.kFieldValueSep);
        context.setReference(name, parseDefinitionValue(context, tokenizer));
    } else if (token.isSymbol(Token.kMessageStart)) {
        token = tokenizer.expect("define or end");
        while (!token.isSymbol(Token.kMessageEnd)) {
            if (!token.isIdentifier()) {
                throw new TokenizerException(token, "Token '%s' is not valid reference name.", token.asString()).setLine(tokenizer.getLine());
            }
            String name = context.initReference(token, tokenizer);
            tokenizer.expectSymbol("def value sep", Token.kFieldValueSep);
            context.setReference(name, parseDefinitionValue(context, tokenizer));
            token = tokenizer.expect("next define or end");
        }
    } else {
        throw new TokenizerException(token, "Unexpected token after def: '%s'", token.asString()).setLine(tokenizer.getLine());
    }
}
Also used : Token(net.morimekta.providence.serializer.pretty.Token) TokenizerException(net.morimekta.providence.serializer.pretty.TokenizerException)

Example 7 with TokenizerException

use of net.morimekta.providence.serializer.pretty.TokenizerException in project providence by morimekta.

the class ProvidenceConfigParser method parseMessage.

@SuppressWarnings("unchecked")
<M extends PMessage<M, F>, F extends PField> M parseMessage(@Nonnull Tokenizer tokenizer, @Nonnull ProvidenceConfigContext context, @Nonnull PMessageBuilder<M, F> builder) throws IOException {
    PMessageDescriptor<M, F> descriptor = builder.descriptor();
    Token token = tokenizer.expect("object end or field");
    while (!token.isSymbol(Token.kMessageEnd)) {
        if (!token.isIdentifier()) {
            throw new TokenizerException(token, "Invalid field name: " + token.asString()).setLine(tokenizer.getLine());
        }
        F field = descriptor.findFieldByName(token.asString());
        if (field == null) {
            if (strict) {
                throw new TokenizerException("No such field " + token.asString() + " in " + descriptor.getQualifiedName()).setLine(tokenizer.getLine());
            } else {
                token = tokenizer.expect("field value sep, message start or reference start");
                if (token.isSymbol(DEFINE_REFERENCE)) {
                    context.setReference(context.initReference(tokenizer.expectIdentifier("reference name"), tokenizer), null);
                    // Ignore reference.
                    token = tokenizer.expect("field value sep or message start");
                }
                if (token.isSymbol(Token.kFieldValueSep)) {
                    token = tokenizer.expect("value declaration");
                } else if (!token.isSymbol(Token.kMessageStart)) {
                    throw new TokenizerException(token, "Expected field-value separator or inherited message").setLine(tokenizer.getLine());
                }
                // Non-strict will just consume unknown fields, this way
                // we can be forward-compatible when reading config.
                consumeValue(context, tokenizer, token);
                token = nextNotLineSep(tokenizer, "field or message end");
                continue;
            }
        }
        if (field.getType() == PType.MESSAGE) {
            // go recursive with optional
            String reference = null;
            char symbol = tokenizer.expectSymbol("Message assigner or start", Token.kFieldValueSep, Token.kMessageStart, DEFINE_REFERENCE);
            if (symbol == DEFINE_REFERENCE) {
                Token ref = tokenizer.expectIdentifier("reference name");
                if (strict) {
                    throw tokenizer.failure(ref, "Reusable objects are not allowed in strict mode.");
                }
                reference = context.initReference(ref, tokenizer);
                symbol = tokenizer.expectSymbol("Message assigner or start after " + reference, Token.kFieldValueSep, Token.kMessageStart);
            }
            PMessageBuilder bld;
            if (symbol == Token.kFieldValueSep) {
                token = tokenizer.expect("reference or message start");
                if (UNDEFINED.equals(token.asString())) {
                    // unset.
                    builder.clear(field.getId());
                    context.setReference(reference, null);
                    // special casing this, as we don't want to duplicate the parse line below.
                    token = nextNotLineSep(tokenizer, "field or message end");
                    continue;
                }
                // overwrite with new.
                bld = ((PMessageDescriptor) field.getDescriptor()).builder();
                if (token.isReferenceIdentifier()) {
                    // Inherit from reference.
                    try {
                        PMessage ref = resolve(context, token, tokenizer, field.getDescriptor());
                        if (ref != null) {
                            bld.merge(ref);
                        } else {
                            if (tokenizer.peek().isSymbol(Token.kMessageStart)) {
                                throw new TokenizerException(token, "Inherit from unknown reference %s", token.asString()).setLine(tokenizer.getLine());
                            } else if (strict) {
                                throw new TokenizerException(token, "Unknown reference %s", token.asString()).setLine(tokenizer.getLine());
                            }
                        }
                    } catch (ProvidenceConfigException e) {
                        throw new TokenizerException(token, "Unknown inherited reference '%s'", token.asString()).setLine(tokenizer.getLine());
                    }
                    token = tokenizer.expect("after message reference");
                    // we assume a new field or end of current message.
                    if (!token.isSymbol(Token.kMessageStart)) {
                        builder.set(field.getId(), context.setReference(reference, bld.build()));
                        continue;
                    }
                } else if (!token.isSymbol(Token.kMessageStart)) {
                    throw new TokenizerException(token, "Unexpected token " + token.asString() + ", expected message start").setLine(tokenizer.getLine());
                }
            } else {
                // extend in-line.
                bld = builder.mutator(field.getId());
            }
            builder.set(field.getId(), context.setReference(reference, parseMessage(tokenizer, context, bld)));
        } else if (field.getType() == PType.MAP) {
            // maps can be extended the same way as
            token = tokenizer.expect("field sep or value start");
            Map baseValue = new LinkedHashMap<>();
            String reference = null;
            if (token.isSymbol(DEFINE_REFERENCE)) {
                Token ref = tokenizer.expectIdentifier("reference name");
                if (strict) {
                    throw tokenizer.failure(ref, "Reusable objects are not allowed in strict mode.");
                }
                reference = context.initReference(ref, tokenizer);
                token = tokenizer.expect("field sep or value start");
            }
            if (token.isSymbol(Token.kFieldValueSep)) {
                token = tokenizer.expect("field id or start");
                if (UNDEFINED.equals(token.asString())) {
                    builder.clear(field.getId());
                    context.setReference(reference, null);
                    token = tokenizer.expect("message end or field");
                    continue;
                } else if (token.isReferenceIdentifier()) {
                    try {
                        baseValue = resolve(context, token, tokenizer, field.getDescriptor());
                    } catch (ProvidenceConfigException e) {
                        throw new TokenizerException(token, e.getMessage()).setLine(tokenizer.getLine());
                    }
                    token = tokenizer.expect("map start or next field");
                    if (!token.isSymbol(Token.kMessageStart)) {
                        builder.set(field.getId(), context.setReference(reference, baseValue));
                        continue;
                    } else if (baseValue == null) {
                        baseValue = new LinkedHashMap<>();
                    }
                }
            } else {
                baseValue.putAll(builder.build().get(field.getId()));
            }
            if (!token.isSymbol(Token.kMessageStart)) {
                throw new TokenizerException(token, "Expected map start, but got '%s'", token.asString()).setLine(tokenizer.getLine());
            }
            Map map = parseMapValue(tokenizer, context, (PMap) field.getDescriptor(), baseValue);
            builder.set(field.getId(), context.setReference(reference, map));
        } else {
            String reference = null;
            // Simple fields *must* have the '=' separation, may have '&' reference.
            if (tokenizer.expectSymbol("field value sep", Token.kFieldValueSep, DEFINE_REFERENCE) == DEFINE_REFERENCE) {
                Token ref = tokenizer.expectIdentifier("reference name");
                if (strict) {
                    throw tokenizer.failure(ref, "Reusable objects are not allowed in strict mode.");
                }
                reference = context.initReference(ref, tokenizer);
                tokenizer.expectSymbol("field value sep", Token.kFieldValueSep);
            }
            token = tokenizer.expect("field value");
            if (UNDEFINED.equals(token.asString())) {
                builder.clear(field.getId());
                context.setReference(reference, null);
            } else {
                Object value = parseFieldValue(token, tokenizer, context, field.getDescriptor(), strict);
                builder.set(field.getId(), context.setReference(reference, value));
            }
        }
        token = nextNotLineSep(tokenizer, "field or message end");
    }
    return builder.build();
}
Also used : DEF(net.morimekta.providence.config.impl.ProvidenceConfigUtil.DEF) PMap(net.morimekta.providence.descriptor.PMap) Token(net.morimekta.providence.serializer.pretty.Token) TokenizerException(net.morimekta.providence.serializer.pretty.TokenizerException) ProvidenceConfigException(net.morimekta.providence.config.ProvidenceConfigException) LinkedHashMap(java.util.LinkedHashMap) PMessageBuilder(net.morimekta.providence.PMessageBuilder) PMessage(net.morimekta.providence.PMessage) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) PMap(net.morimekta.providence.descriptor.PMap)

Example 8 with TokenizerException

use of net.morimekta.providence.serializer.pretty.TokenizerException in project providence by morimekta.

the class ProvidenceConfigParser method parseFieldValue.

@SuppressWarnings("unchecked")
Object parseFieldValue(Token next, Tokenizer tokenizer, ProvidenceConfigContext context, PDescriptor descriptor, boolean requireEnumValue) throws IOException {
    try {
        switch(descriptor.getType()) {
            case BOOL:
                if (TRUE.equals(next.asString())) {
                    return true;
                } else if (FALSE.equals(next.asString())) {
                    return false;
                } else if (next.isReferenceIdentifier()) {
                    return resolve(context, next, tokenizer, descriptor);
                }
                break;
            case BYTE:
                if (next.isReferenceIdentifier()) {
                    return resolve(context, next, tokenizer, descriptor);
                } else if (next.isInteger()) {
                    return (byte) next.parseInteger();
                }
                break;
            case I16:
                if (next.isReferenceIdentifier()) {
                    return resolve(context, next, tokenizer, descriptor);
                } else if (next.isInteger()) {
                    return (short) next.parseInteger();
                }
                break;
            case I32:
                if (next.isReferenceIdentifier()) {
                    return resolve(context, next, tokenizer, descriptor);
                } else if (next.isInteger()) {
                    return (int) next.parseInteger();
                }
                break;
            case I64:
                if (next.isReferenceIdentifier()) {
                    return resolve(context, next, tokenizer, descriptor);
                } else if (next.isInteger()) {
                    return next.parseInteger();
                }
                break;
            case DOUBLE:
                if (next.isReferenceIdentifier()) {
                    return resolve(context, next, tokenizer, descriptor);
                } else if (next.isInteger() || next.isReal()) {
                    return next.parseDouble();
                }
                break;
            case STRING:
                if (next.isReferenceIdentifier()) {
                    return resolve(context, next, tokenizer, descriptor);
                } else if (next.isStringLiteral()) {
                    return next.decodeLiteral(strict);
                }
                break;
            case BINARY:
                if (Token.B64.equals(next.asString())) {
                    tokenizer.expectSymbol("binary data enclosing start", Token.kParamsStart);
                    return Binary.fromBase64(tokenizer.readBinary(Token.kParamsEnd));
                } else if (Token.HEX.equals(next.asString())) {
                    tokenizer.expectSymbol("binary data enclosing start", Token.kParamsStart);
                    return Binary.fromHexString(tokenizer.readBinary(Token.kParamsEnd));
                } else if (next.isReferenceIdentifier()) {
                    return resolve(context, next, tokenizer, descriptor);
                }
                break;
            case ENUM:
                {
                    PEnumDescriptor ed = (PEnumDescriptor) descriptor;
                    PEnumValue value;
                    String name = next.asString();
                    if (next.isInteger()) {
                        value = ed.findById((int) next.parseInteger());
                    } else if (next.isIdentifier()) {
                        value = ed.findByName(name);
                        if (value == null && context.containsReference(name)) {
                            value = resolve(context, next, tokenizer, ed);
                        }
                    } else if (next.isReferenceIdentifier()) {
                        value = resolve(context, next, tokenizer, descriptor);
                    } else {
                        break;
                    }
                    if (value == null && (strict || requireEnumValue)) {
                        PEnumValue option = null;
                        if (next.isIdentifier()) {
                            for (PEnumValue o : ed.getValues()) {
                                if (o.getName().equalsIgnoreCase(name)) {
                                    option = o;
                                    break;
                                }
                            }
                        }
                        if (option != null) {
                            throw new TokenizerException(next, "No such enum value '%s' for %s, did you mean '%s'?", name, ed.getQualifiedName(), option.getName()).setLine(tokenizer.getLine());
                        }
                        throw new TokenizerException(next, "No such enum value '%s' for %s.", name, ed.getQualifiedName()).setLine(tokenizer.getLine());
                    }
                    return value;
                }
            case MESSAGE:
                if (next.isReferenceIdentifier()) {
                    return resolve(context, next, tokenizer, descriptor);
                } else if (next.isSymbol(Token.kMessageStart)) {
                    return parseMessage(tokenizer, context, ((PMessageDescriptor) descriptor).builder());
                }
                break;
            case MAP:
                {
                    if (next.isReferenceIdentifier()) {
                        Map resolved;
                        try {
                            // Make sure the reference is to a map.
                            resolved = resolve(context, next, tokenizer, descriptor);
                        } catch (ClassCastException e) {
                            throw new TokenizerException(next, "Reference %s is not a map field ", next.asString()).setLine(tokenizer.getLine());
                        }
                        return resolved;
                    } else if (next.isSymbol(Token.kMessageStart)) {
                        return parseMapValue(tokenizer, context, (PMap) descriptor, new LinkedHashMap());
                    }
                    break;
                }
            case SET:
                {
                    if (next.isReferenceIdentifier()) {
                        return resolve(context, next, tokenizer, descriptor);
                    } else if (next.isSymbol(Token.kListStart)) {
                        @SuppressWarnings("unchecked") PSet<Object> ct = (PSet) descriptor;
                        Set<Object> value = new LinkedHashSet<>();
                        next = tokenizer.expect("set value or end");
                        while (!next.isSymbol(Token.kListEnd)) {
                            Object item = parseFieldValue(next, tokenizer, context, ct.itemDescriptor(), strict);
                            if (item != null) {
                                value.add(item);
                            }
                            // sets require separator, and allows separator after last.
                            if (tokenizer.expectSymbol("set separator or end", Token.kLineSep1, Token.kListEnd) == Token.kListEnd) {
                                break;
                            }
                            next = tokenizer.expect("set value or end");
                        }
                        return ct.builder().addAll(value).build();
                    }
                    break;
                }
            case LIST:
                {
                    if (next.isReferenceIdentifier()) {
                        return resolve(context, next, tokenizer, descriptor);
                    } else if (next.isSymbol(Token.kListStart)) {
                        @SuppressWarnings("unchecked") PList<Object> ct = (PList) descriptor;
                        PList.Builder<Object> builder = ct.builder();
                        next = tokenizer.expect("list value or end");
                        while (!next.isSymbol(Token.kListEnd)) {
                            Object item = parseFieldValue(next, tokenizer, context, ct.itemDescriptor(), strict);
                            if (item != null) {
                                builder.add(item);
                            }
                            // lists require separator, and allows separator after last.
                            if (tokenizer.expectSymbol("list separator or end", Token.kLineSep1, Token.kListEnd) == Token.kListEnd) {
                                break;
                            }
                            next = tokenizer.expect("list value or end");
                        }
                        return builder.build();
                    }
                    break;
                }
            default:
                {
                    throw new TokenizerException(next, descriptor.getType() + " not supported!").setLine(tokenizer.getLine());
                }
        }
    } catch (ProvidenceConfigException e) {
        throw new TokenizerException(next, e.getMessage()).setLine(tokenizer.getLine());
    }
    throw new TokenizerException(next, "Unhandled value \"%s\" for type %s", next.asString(), descriptor.getType()).setLine(tokenizer.getLine());
}
Also used : LinkedHashSet(java.util.LinkedHashSet) PList(net.morimekta.providence.descriptor.PList) PEnumValue(net.morimekta.providence.PEnumValue) TokenizerException(net.morimekta.providence.serializer.pretty.TokenizerException) PEnumDescriptor(net.morimekta.providence.descriptor.PEnumDescriptor) ProvidenceConfigException(net.morimekta.providence.config.ProvidenceConfigException) LinkedHashMap(java.util.LinkedHashMap) PSet(net.morimekta.providence.descriptor.PSet) PMessageDescriptor(net.morimekta.providence.descriptor.PMessageDescriptor) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) PMap(net.morimekta.providence.descriptor.PMap)

Example 9 with TokenizerException

use of net.morimekta.providence.serializer.pretty.TokenizerException in project providence by morimekta.

the class ProvidenceConfigUtil method consumeValue.

static void consumeValue(@Nonnull ProvidenceConfigContext context, @Nonnull Tokenizer tokenizer, @Nonnull Token token) throws IOException {
    boolean isMessage = false;
    if (UNDEFINED.equals(token.asString())) {
        // ignore undefined.
        return;
    } else if (token.asString().equals(Token.B64)) {
        tokenizer.expectSymbol("b64 body start", Token.kParamsStart);
        tokenizer.readBinary(Token.kParamsEnd);
    } else if (token.asString().equals(Token.HEX)) {
        tokenizer.expectSymbol("hex body start", Token.kParamsStart);
        tokenizer.readBinary(Token.kParamsEnd);
    } else if (token.isReferenceIdentifier()) {
        if (!tokenizer.peek("message start").isSymbol(Token.kMessageStart)) {
            // just a reference.
            return;
        }
        // reference + message.
        isMessage = true;
        token = tokenizer.expect("start of message");
    }
    if (token.isSymbol(Token.kMessageStart)) {
        // message or map.
        token = tokenizer.expect("map or message first entry");
        if (token.isSymbol(Token.kMessageEnd)) {
            return;
        }
        Token firstSep = tokenizer.peek("First separator");
        if (!isMessage && !firstSep.isSymbol(Token.kFieldValueSep) && !firstSep.isSymbol(Token.kMessageStart) && !firstSep.isSymbol(DEFINE_REFERENCE)) {
            // assume map.
            while (!token.isSymbol(Token.kMessageEnd)) {
                if (!token.isIdentifier() && token.isReferenceIdentifier()) {
                    throw new TokenizerException(token, "Invalid map key: " + token.asString()).setLine(tokenizer.getLine());
                }
                consumeValue(context, tokenizer, token);
                tokenizer.expectSymbol("key value sep.", Token.kKeyValueSep);
                consumeValue(context, tokenizer, tokenizer.expect("map value"));
                // maps do *not* require separator, but allows ',' separator, and separator after last.
                token = nextNotLineSep(tokenizer, "map key, sep or end");
            }
        } else {
            // assume message.
            while (!token.isSymbol(Token.kMessageEnd)) {
                if (!token.isIdentifier()) {
                    throw new TokenizerException(token, "Invalid field name: " + token.asString()).setLine(tokenizer.getLine());
                }
                token = tokenizer.expect("field value sep");
                if (token.isSymbol(DEFINE_REFERENCE)) {
                    token = tokenizer.expectIdentifier("reference name");
                    context.setReference(context.initReference(token, tokenizer), null);
                    token = tokenizer.expect("field value sep");
                }
                if (token.isSymbol(Token.kMessageStart)) {
                    // direct inheritance of message field.
                    consumeValue(context, tokenizer, token);
                } else if (token.isSymbol(Token.kFieldValueSep)) {
                    consumeValue(context, tokenizer, tokenizer.expect("field value"));
                } else {
                    throw new TokenizerException(token, "Unknown field value sep: " + token.asString()).setLine(tokenizer.getLine());
                }
                token = nextNotLineSep(tokenizer, "message field or end");
            }
        }
    } else if (token.isSymbol(Token.kListStart)) {
        token = tokenizer.expect("list value or end");
        while (!token.isSymbol(Token.kListEnd)) {
            consumeValue(context, tokenizer, token);
            // lists and sets require list separator (,), and allows trailing separator.
            if (tokenizer.expectSymbol("list separator or end", Token.kLineSep1, Token.kListEnd) == Token.kListEnd) {
                break;
            }
            token = tokenizer.expect("list value or end");
        }
    }
}
Also used : Token(net.morimekta.providence.serializer.pretty.Token) TokenizerException(net.morimekta.providence.serializer.pretty.TokenizerException)

Example 10 with TokenizerException

use of net.morimekta.providence.serializer.pretty.TokenizerException in project providence by morimekta.

the class ResourceConfigSupplier method loadInternal.

private static <Message extends PMessage<Message, Field>, Field extends PField> Message loadInternal(String resourceName, PMessageDescriptor<Message, Field> descriptor) throws ProvidenceConfigException {
    int lastDot = resourceName.lastIndexOf(".");
    if (lastDot < 1) {
        throw new ProvidenceConfigException("No file ending, or no resource file name: " + resourceName);
    }
    int lastSlash = resourceName.lastIndexOf("/");
    String fileName = resourceName;
    if (lastSlash >= 0) {
        fileName = resourceName.substring(lastSlash + 1);
    }
    String suffix = resourceName.substring(lastDot).toLowerCase(Locale.US);
    Serializer serializer;
    switch(suffix) {
        case ".jsn":
        case ".json":
            serializer = new JsonSerializer();
            break;
        case ".cfg":
        case ".cnf":
        case ".config":
        case ".pvd":
        case ".providence":
            serializer = new PrettySerializer().config();
            break;
        // TODO: Add YAML serializer to the file options. Could be a wrapper around SnakeYAML.
        default:
            throw new ProvidenceConfigException(String.format(Locale.US, "Unrecognized resource config type: %s (%s)", suffix, resourceName));
    }
    ClassLoader classLoader = ClassLoader.getSystemClassLoader();
    InputStream in = classLoader.getResourceAsStream(resourceName);
    if (in == null) {
        in = ResourceConfigSupplier.class.getResourceAsStream(resourceName);
        if (in == null) {
            throw new ProvidenceConfigException("No such config resource: " + resourceName);
        }
    }
    try {
        try (InputStream bin = new BufferedInputStream(in)) {
            return serializer.deserialize(bin, descriptor);
        } catch (TokenizerException te) {
            throw new ProvidenceConfigException(te);
        } catch (JsonSerializerException se) {
            throw new ProvidenceConfigException(se);
        } catch (IOException e) {
            throw new ProvidenceConfigException(e, "Unknown serializer exception: " + e.getMessage());
        }
    } catch (ProvidenceConfigException pce) {
        pce.setFile(fileName);
        throw pce;
    }
}
Also used : JsonSerializerException(net.morimekta.providence.serializer.JsonSerializerException) PrettySerializer(net.morimekta.providence.serializer.PrettySerializer) BufferedInputStream(java.io.BufferedInputStream) InputStream(java.io.InputStream) TokenizerException(net.morimekta.providence.serializer.pretty.TokenizerException) JsonSerializer(net.morimekta.providence.serializer.JsonSerializer) IOException(java.io.IOException) BufferedInputStream(java.io.BufferedInputStream) Serializer(net.morimekta.providence.serializer.Serializer) JsonSerializer(net.morimekta.providence.serializer.JsonSerializer) PrettySerializer(net.morimekta.providence.serializer.PrettySerializer)

Aggregations

TokenizerException (net.morimekta.providence.serializer.pretty.TokenizerException)17 Token (net.morimekta.providence.serializer.pretty.Token)7 PMessage (net.morimekta.providence.PMessage)6 IOException (java.io.IOException)5 ProvidenceConfigException (net.morimekta.providence.config.ProvidenceConfigException)4 PMessageDescriptor (net.morimekta.providence.descriptor.PMessageDescriptor)3 BufferedInputStream (java.io.BufferedInputStream)2 ByteArrayInputStream (java.io.ByteArrayInputStream)2 ByteArrayOutputStream (java.io.ByteArrayOutputStream)2 File (java.io.File)2 FileInputStream (java.io.FileInputStream)2 LinkedHashMap (java.util.LinkedHashMap)2 Map (java.util.Map)2 PEnumValue (net.morimekta.providence.PEnumValue)2 PMessageBuilder (net.morimekta.providence.PMessageBuilder)2 PEnumDescriptor (net.morimekta.providence.descriptor.PEnumDescriptor)2 PMap (net.morimekta.providence.descriptor.PMap)2 Tokenizer (net.morimekta.providence.serializer.pretty.Tokenizer)2 FileNotFoundException (java.io.FileNotFoundException)1 InputStream (java.io.InputStream)1