use of org.antlr.v4.runtime.misc.Interval in project antlr4 by antlr.
the class ATNSerializer method serialize.
/** Serialize state descriptors, edge descriptors, and decision→state map
* into list of ints:
*
* grammar-type, (ANTLRParser.LEXER, ...)
* max token type,
* num states,
* state-0-type ruleIndex, state-1-type ruleIndex, ... state-i-type ruleIndex optional-arg ...
* num rules,
* rule-1-start-state rule-1-args, rule-2-start-state rule-2-args, ...
* (args are token type,actionIndex in lexer else 0,0)
* num modes,
* mode-0-start-state, mode-1-start-state, ... (parser has 0 modes)
* num unicode-bmp-sets
* bmp-set-0-interval-count intervals, bmp-set-1-interval-count intervals, ...
* num unicode-smp-sets
* smp-set-0-interval-count intervals, smp-set-1-interval-count intervals, ...
* num total edges,
* src, trg, edge-type, edge arg1, optional edge arg2 (present always), ...
* num decisions,
* decision-0-start-state, decision-1-start-state, ...
*
* Convenient to pack into unsigned shorts to make as Java string.
*/
public IntegerList serialize() {
IntegerList data = new IntegerList();
data.add(ATNDeserializer.SERIALIZED_VERSION);
serializeUUID(data, ATNDeserializer.SERIALIZED_UUID);
// convert grammar type to ATN const to avoid dependence on ANTLRParser
data.add(atn.grammarType.ordinal());
data.add(atn.maxTokenType);
int nedges = 0;
// Note that we use a LinkedHashMap as a set to
// maintain insertion order while deduplicating
// entries with the same key.
Map<IntervalSet, Boolean> sets = new LinkedHashMap<>();
// dump states, count edges and collect sets while doing so
IntegerList nonGreedyStates = new IntegerList();
IntegerList precedenceStates = new IntegerList();
data.add(atn.states.size());
for (ATNState s : atn.states) {
if (s == null) {
// might be optimized away
data.add(ATNState.INVALID_TYPE);
continue;
}
int stateType = s.getStateType();
if (s instanceof DecisionState && ((DecisionState) s).nonGreedy) {
nonGreedyStates.add(s.stateNumber);
}
if (s instanceof RuleStartState && ((RuleStartState) s).isLeftRecursiveRule) {
precedenceStates.add(s.stateNumber);
}
data.add(stateType);
if (s.ruleIndex == -1) {
data.add(Character.MAX_VALUE);
} else {
data.add(s.ruleIndex);
}
if (s.getStateType() == ATNState.LOOP_END) {
data.add(((LoopEndState) s).loopBackState.stateNumber);
} else if (s instanceof BlockStartState) {
data.add(((BlockStartState) s).endState.stateNumber);
}
if (s.getStateType() != ATNState.RULE_STOP) {
// the deserializer can trivially derive these edges, so there's no need to serialize them
nedges += s.getNumberOfTransitions();
}
for (int i = 0; i < s.getNumberOfTransitions(); i++) {
Transition t = s.transition(i);
int edgeType = Transition.serializationTypes.get(t.getClass());
if (edgeType == Transition.SET || edgeType == Transition.NOT_SET) {
SetTransition st = (SetTransition) t;
sets.put(st.set, true);
}
}
}
// non-greedy states
data.add(nonGreedyStates.size());
for (int i = 0; i < nonGreedyStates.size(); i++) {
data.add(nonGreedyStates.get(i));
}
// precedence states
data.add(precedenceStates.size());
for (int i = 0; i < precedenceStates.size(); i++) {
data.add(precedenceStates.get(i));
}
int nrules = atn.ruleToStartState.length;
data.add(nrules);
for (int r = 0; r < nrules; r++) {
ATNState ruleStartState = atn.ruleToStartState[r];
data.add(ruleStartState.stateNumber);
if (atn.grammarType == ATNType.LEXER) {
if (atn.ruleToTokenType[r] == Token.EOF) {
data.add(Character.MAX_VALUE);
} else {
data.add(atn.ruleToTokenType[r]);
}
}
}
int nmodes = atn.modeToStartState.size();
data.add(nmodes);
if (nmodes > 0) {
for (ATNState modeStartState : atn.modeToStartState) {
data.add(modeStartState.stateNumber);
}
}
List<IntervalSet> bmpSets = new ArrayList<>();
List<IntervalSet> smpSets = new ArrayList<>();
for (IntervalSet set : sets.keySet()) {
if (set.getMaxElement() <= Character.MAX_VALUE) {
bmpSets.add(set);
} else {
smpSets.add(set);
}
}
serializeSets(data, bmpSets, new CodePointSerializer() {
@Override
public void serializeCodePoint(IntegerList data, int cp) {
data.add(cp);
}
});
serializeSets(data, smpSets, new CodePointSerializer() {
@Override
public void serializeCodePoint(IntegerList data, int cp) {
serializeInt(data, cp);
}
});
Map<IntervalSet, Integer> setIndices = new HashMap<>();
int setIndex = 0;
for (IntervalSet bmpSet : bmpSets) {
setIndices.put(bmpSet, setIndex++);
}
for (IntervalSet smpSet : smpSets) {
setIndices.put(smpSet, setIndex++);
}
data.add(nedges);
for (ATNState s : atn.states) {
if (s == null) {
// might be optimized away
continue;
}
if (s.getStateType() == ATNState.RULE_STOP) {
continue;
}
for (int i = 0; i < s.getNumberOfTransitions(); i++) {
Transition t = s.transition(i);
if (atn.states.get(t.target.stateNumber) == null) {
throw new IllegalStateException("Cannot serialize a transition to a removed state.");
}
int src = s.stateNumber;
int trg = t.target.stateNumber;
int edgeType = Transition.serializationTypes.get(t.getClass());
int arg1 = 0;
int arg2 = 0;
int arg3 = 0;
switch(edgeType) {
case Transition.RULE:
trg = ((RuleTransition) t).followState.stateNumber;
arg1 = ((RuleTransition) t).target.stateNumber;
arg2 = ((RuleTransition) t).ruleIndex;
arg3 = ((RuleTransition) t).precedence;
break;
case Transition.PRECEDENCE:
PrecedencePredicateTransition ppt = (PrecedencePredicateTransition) t;
arg1 = ppt.precedence;
break;
case Transition.PREDICATE:
PredicateTransition pt = (PredicateTransition) t;
arg1 = pt.ruleIndex;
arg2 = pt.predIndex;
arg3 = pt.isCtxDependent ? 1 : 0;
break;
case Transition.RANGE:
arg1 = ((RangeTransition) t).from;
arg2 = ((RangeTransition) t).to;
if (arg1 == Token.EOF) {
arg1 = 0;
arg3 = 1;
}
break;
case Transition.ATOM:
arg1 = ((AtomTransition) t).label;
if (arg1 == Token.EOF) {
arg1 = 0;
arg3 = 1;
}
break;
case Transition.ACTION:
ActionTransition at = (ActionTransition) t;
arg1 = at.ruleIndex;
arg2 = at.actionIndex;
if (arg2 == -1) {
arg2 = 0xFFFF;
}
arg3 = at.isCtxDependent ? 1 : 0;
break;
case Transition.SET:
arg1 = setIndices.get(((SetTransition) t).set);
break;
case Transition.NOT_SET:
arg1 = setIndices.get(((SetTransition) t).set);
break;
case Transition.WILDCARD:
break;
}
data.add(src);
data.add(trg);
data.add(edgeType);
data.add(arg1);
data.add(arg2);
data.add(arg3);
}
}
int ndecisions = atn.decisionToState.size();
data.add(ndecisions);
for (DecisionState decStartState : atn.decisionToState) {
data.add(decStartState.stateNumber);
}
//
if (atn.grammarType == ATNType.LEXER) {
data.add(atn.lexerActions.length);
for (LexerAction action : atn.lexerActions) {
data.add(action.getActionType().ordinal());
switch(action.getActionType()) {
case CHANNEL:
int channel = ((LexerChannelAction) action).getChannel();
data.add(channel != -1 ? channel : 0xFFFF);
data.add(0);
break;
case CUSTOM:
int ruleIndex = ((LexerCustomAction) action).getRuleIndex();
int actionIndex = ((LexerCustomAction) action).getActionIndex();
data.add(ruleIndex != -1 ? ruleIndex : 0xFFFF);
data.add(actionIndex != -1 ? actionIndex : 0xFFFF);
break;
case MODE:
int mode = ((LexerModeAction) action).getMode();
data.add(mode != -1 ? mode : 0xFFFF);
data.add(0);
break;
case MORE:
data.add(0);
data.add(0);
break;
case POP_MODE:
data.add(0);
data.add(0);
break;
case PUSH_MODE:
mode = ((LexerPushModeAction) action).getMode();
data.add(mode != -1 ? mode : 0xFFFF);
data.add(0);
break;
case SKIP:
data.add(0);
data.add(0);
break;
case TYPE:
int type = ((LexerTypeAction) action).getType();
data.add(type != -1 ? type : 0xFFFF);
data.add(0);
break;
default:
String message = String.format(Locale.getDefault(), "The specified lexer action type %s is not valid.", action.getActionType());
throw new IllegalArgumentException(message);
}
}
}
// don't adjust the first value since that's the version number
for (int i = 1; i < data.size(); i++) {
if (data.get(i) < Character.MIN_VALUE || data.get(i) > Character.MAX_VALUE) {
throw new UnsupportedOperationException("Serialized ATN data element " + data.get(i) + " element " + i + " out of range " + (int) Character.MIN_VALUE + ".." + (int) Character.MAX_VALUE);
}
int value = (data.get(i) + 2) & 0xFFFF;
data.set(i, value);
}
return data;
}
use of org.antlr.v4.runtime.misc.Interval in project antlr4 by antlr.
the class ParserATNSimulator method reportAmbiguity.
/** If context sensitive parsing, we know it's ambiguity not conflict */
protected void reportAmbiguity(DFA dfa, // the DFA state from execATN() that had SLL conflicts
DFAState D, int startIndex, int stopIndex, boolean exact, BitSet ambigAlts, // configs that LL not SLL considered conflicting
ATNConfigSet configs) {
if (debug || retry_debug) {
Interval interval = Interval.of(startIndex, stopIndex);
System.out.println("reportAmbiguity " + ambigAlts + ":" + configs + ", input=" + parser.getTokenStream().getText(interval));
}
if (parser != null)
parser.getErrorListenerDispatch().reportAmbiguity(parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs);
}
use of org.antlr.v4.runtime.misc.Interval in project antlr4 by antlr.
the class ParserATNSimulator method reportAttemptingFullContext.
protected void reportAttemptingFullContext(DFA dfa, BitSet conflictingAlts, ATNConfigSet configs, int startIndex, int stopIndex) {
if (debug || retry_debug) {
Interval interval = Interval.of(startIndex, stopIndex);
System.out.println("reportAttemptingFullContext decision=" + dfa.decision + ":" + configs + ", input=" + parser.getTokenStream().getText(interval));
}
if (parser != null)
parser.getErrorListenerDispatch().reportAttemptingFullContext(parser, dfa, startIndex, stopIndex, conflictingAlts, configs);
}
use of org.antlr.v4.runtime.misc.Interval in project antlr4 by antlr.
the class Grammar method getStateToGrammarRegionMap.
public static Map<Integer, Interval> getStateToGrammarRegionMap(GrammarRootAST ast, IntervalSet grammarTokenTypes) {
Map<Integer, Interval> stateToGrammarRegionMap = new HashMap<Integer, Interval>();
if (ast == null)
return stateToGrammarRegionMap;
List<GrammarAST> nodes = ast.getNodesWithType(grammarTokenTypes);
for (GrammarAST n : nodes) {
if (n.atnState != null) {
Interval tokenRegion = Interval.of(n.getTokenStartIndex(), n.getTokenStopIndex());
org.antlr.runtime.tree.Tree ruleNode = null;
// RULEs, BLOCKs of transformed recursive rules point to original token interval
switch(n.getType()) {
case ANTLRParser.RULE:
ruleNode = n;
break;
case ANTLRParser.BLOCK:
case ANTLRParser.CLOSURE:
ruleNode = n.getAncestor(ANTLRParser.RULE);
break;
}
if (ruleNode instanceof RuleAST) {
String ruleName = ((RuleAST) ruleNode).getRuleName();
Rule r = ast.g.getRule(ruleName);
if (r instanceof LeftRecursiveRule) {
RuleAST originalAST = ((LeftRecursiveRule) r).getOriginalAST();
tokenRegion = Interval.of(originalAST.getTokenStartIndex(), originalAST.getTokenStopIndex());
}
}
stateToGrammarRegionMap.put(n.atnState.stateNumber, tokenRegion);
}
}
return stateToGrammarRegionMap;
}
use of org.antlr.v4.runtime.misc.Interval in project antlr4 by antlr.
the class GrammarParserInterpreter method getLookaheadParseTrees.
/** Return a list of parse trees, one for each alternative in a decision
* given the same input.
*
* Very similar to {@link #getAllPossibleParseTrees} except
* that it re-parses the input for every alternative in a decision,
* not just the ambiguous ones (there is no alts parameter here).
* This method also tries to reduce the size of the parse trees
* by stripping away children of the tree that are completely out of range
* of startIndex..stopIndex. Also, because errors are expected, we
* use a specialized error handler that more or less bails out
* but that also consumes the first erroneous token at least. This
* ensures that an error node will be in the parse tree for display.
*
* NOTES:
// we must parse the entire input now with decision overrides
// we cannot parse a subset because it could be that a decision
// above our decision of interest needs to read way past
// lookaheadInfo.stopIndex. It seems like there is no escaping
// the use of a full and complete token stream if we are
// resetting to token index 0 and re-parsing from the start symbol.
// It's not easy to restart parsing somewhere in the middle like a
// continuation because our call stack does not match the
// tree stack because of left recursive rule rewriting. grrrr!
*
* @since 4.5.1
*/
public static List<ParserRuleContext> getLookaheadParseTrees(Grammar g, ParserInterpreter originalParser, TokenStream tokens, int startRuleIndex, int decision, int startIndex, int stopIndex) {
List<ParserRuleContext> trees = new ArrayList<ParserRuleContext>();
// Create a new parser interpreter to parse the ambiguous subphrase
ParserInterpreter parser = deriveTempParserInterpreter(g, originalParser, tokens);
DecisionState decisionState = originalParser.getATN().decisionToState.get(decision);
for (int alt = 1; alt <= decisionState.getTransitions().length; alt++) {
// re-parse entire input for all ambiguous alternatives
// (don't have to do first as it's been parsed, but do again for simplicity
// using this temp parser.)
GrammarParserInterpreter.BailButConsumeErrorStrategy errorHandler = new GrammarParserInterpreter.BailButConsumeErrorStrategy();
parser.setErrorHandler(errorHandler);
parser.reset();
parser.addDecisionOverride(decision, startIndex, alt);
ParserRuleContext tt = parser.parse(startRuleIndex);
int stopTreeAt = stopIndex;
if (errorHandler.firstErrorTokenIndex >= 0) {
// cut off rest at first error
stopTreeAt = errorHandler.firstErrorTokenIndex;
}
Interval overallRange = tt.getSourceInterval();
if (stopTreeAt > overallRange.b) {
// If we try to look beyond range of tree, stopTreeAt must be EOF
// for which there is no EOF ref in grammar. That means tree
// will not have node for stopTreeAt; limit to overallRange.b
stopTreeAt = overallRange.b;
}
ParserRuleContext subtree = Trees.getRootOfSubtreeEnclosingRegion(tt, startIndex, stopTreeAt);
// Use higher of overridden decision tree or tree enclosing all tokens
if (Trees.isAncestorOf(parser.getOverrideDecisionRoot(), subtree)) {
subtree = parser.getOverrideDecisionRoot();
}
Trees.stripChildrenOutOfRange(subtree, parser.getOverrideDecisionRoot(), startIndex, stopTreeAt);
trees.add(subtree);
}
return trees;
}
Aggregations