use of org.antlr.runtime.CommonToken in project hive by apache.
the class WindowingSpec method applyConstantPartition.
private void applyConstantPartition(WindowSpec wdwSpec) {
PartitionSpec partSpec = wdwSpec.getPartition();
if (partSpec == null) {
partSpec = new PartitionSpec();
PartitionExpression partExpr = new PartitionExpression();
partExpr.setExpression(new ASTNode(new CommonToken(HiveParser.Number, "0")));
partSpec.addExpression(partExpr);
wdwSpec.setPartition(partSpec);
}
}
use of org.antlr.runtime.CommonToken in project hive by apache.
the class CalcitePlanner method rewriteASTForMultiInsert.
private ASTNode rewriteASTForMultiInsert(ASTNode query, ASTNode nodeOfInterest) {
// 1. gather references from original query
// This is a map from aliases to references.
// We keep all references as we will need to modify them after creating
// the subquery
final Multimap<String, Object> aliasNodes = ArrayListMultimap.create();
// To know if we need to bail out
final AtomicBoolean notSupported = new AtomicBoolean(false);
TreeVisitorAction action = new TreeVisitorAction() {
@Override
public Object pre(Object t) {
if (!notSupported.get()) {
if (ParseDriver.adaptor.getType(t) == HiveParser.TOK_ALLCOLREF) {
// TODO: this is a limitation of the AST rewriting approach that we will
// not be able to overcome till proper integration of full multi-insert
// queries with Calcite is implemented.
// The current rewriting gather references from insert clauses and then
// updates them with the new subquery references. However, if insert
// clauses use * or tab.*, we cannot resolve the columns that we are
// referring to. Thus, we just bail out and those queries will not be
// currently optimized by Calcite.
// An example of such query is:
// FROM T_A a LEFT JOIN T_B b ON a.id = b.id
// INSERT OVERWRITE TABLE join_result_1
// SELECT a.*, b.*
// INSERT OVERWRITE TABLE join_result_3
// SELECT a.*, b.*;
notSupported.set(true);
} else if (ParseDriver.adaptor.getType(t) == HiveParser.DOT) {
Object c = ParseDriver.adaptor.getChild(t, 0);
if (c != null && ParseDriver.adaptor.getType(c) == HiveParser.TOK_TABLE_OR_COL) {
aliasNodes.put(((ASTNode) t).toStringTree(), t);
}
} else if (ParseDriver.adaptor.getType(t) == HiveParser.TOK_TABLE_OR_COL) {
Object p = ParseDriver.adaptor.getParent(t);
if (p == null || ParseDriver.adaptor.getType(p) != HiveParser.DOT) {
aliasNodes.put(((ASTNode) t).toStringTree(), t);
}
}
}
return t;
}
@Override
public Object post(Object t) {
return t;
}
};
TreeVisitor tv = new TreeVisitor(ParseDriver.adaptor);
// the subtree to gather the references
for (int i = 0; i < query.getChildCount(); i++) {
ASTNode child = (ASTNode) query.getChild(i);
if (ParseDriver.adaptor.getType(child) != HiveParser.TOK_INSERT) {
// If it is not an INSERT, we do not need to anything
continue;
}
tv.visit(child, action);
}
if (notSupported.get()) {
// Bail out
return null;
}
// 2. rewrite into query
// TOK_QUERY
// TOK_FROM
// join
// TOK_INSERT
// TOK_DESTINATION
// TOK_DIR
// TOK_TMP_FILE
// TOK_SELECT
// refs
ASTNode from = new ASTNode(new CommonToken(HiveParser.TOK_FROM, "TOK_FROM"));
from.addChild((ASTNode) ParseDriver.adaptor.dupTree(nodeOfInterest));
ASTNode destination = new ASTNode(new CommonToken(HiveParser.TOK_DESTINATION, "TOK_DESTINATION"));
ASTNode dir = new ASTNode(new CommonToken(HiveParser.TOK_DIR, "TOK_DIR"));
ASTNode tmpFile = new ASTNode(new CommonToken(HiveParser.TOK_TMP_FILE, "TOK_TMP_FILE"));
dir.addChild(tmpFile);
destination.addChild(dir);
ASTNode select = new ASTNode(new CommonToken(HiveParser.TOK_SELECT, "TOK_SELECT"));
int num = 0;
for (Collection<Object> selectIdentifier : aliasNodes.asMap().values()) {
Iterator<Object> it = selectIdentifier.iterator();
ASTNode node = (ASTNode) it.next();
// Add select expression
ASTNode selectExpr = new ASTNode(new CommonToken(HiveParser.TOK_SELEXPR, "TOK_SELEXPR"));
// Identifier
selectExpr.addChild((ASTNode) ParseDriver.adaptor.dupTree(node));
String colAlias = "col" + num;
// Alias
selectExpr.addChild(new ASTNode(new CommonToken(HiveParser.Identifier, colAlias)));
select.addChild(selectExpr);
// Rewrite all INSERT references (all the node values for this key)
ASTNode colExpr = new ASTNode(new CommonToken(HiveParser.TOK_TABLE_OR_COL, "TOK_TABLE_OR_COL"));
colExpr.addChild(new ASTNode(new CommonToken(HiveParser.Identifier, colAlias)));
replaceASTChild(node, colExpr);
while (it.hasNext()) {
// Loop to rewrite rest of INSERT references
node = (ASTNode) it.next();
colExpr = new ASTNode(new CommonToken(HiveParser.TOK_TABLE_OR_COL, "TOK_TABLE_OR_COL"));
colExpr.addChild(new ASTNode(new CommonToken(HiveParser.Identifier, colAlias)));
replaceASTChild(node, colExpr);
}
num++;
}
ASTNode insert = new ASTNode(new CommonToken(HiveParser.TOK_INSERT, "TOK_INSERT"));
insert.addChild(destination);
insert.addChild(select);
ASTNode newQuery = new ASTNode(new CommonToken(HiveParser.TOK_QUERY, "TOK_QUERY"));
newQuery.addChild(from);
newQuery.addChild(insert);
// 3. create subquery
ASTNode subq = new ASTNode(new CommonToken(HiveParser.TOK_SUBQUERY, "TOK_SUBQUERY"));
subq.addChild(newQuery);
subq.addChild(new ASTNode(new CommonToken(HiveParser.Identifier, "subq")));
replaceASTChild(nodeOfInterest, subq);
// 4. return subquery
return subq;
}
use of org.antlr.runtime.CommonToken in project hive by apache.
the class PTFTranslator method applyConstantPartition.
/*
* If this the first PPTF in the chain and there is no partition specified
* then assume the user wants to include the entire input in 1 partition.
*/
private static void applyConstantPartition(PartitionedTableFunctionSpec spec) {
if (spec.getPartition() != null) {
return;
}
PTFInputSpec iSpec = spec.getInput();
if (iSpec instanceof PTFInputSpec) {
PartitionSpec partSpec = new PartitionSpec();
PartitionExpression partExpr = new PartitionExpression();
partExpr.setExpression(new ASTNode(new CommonToken(HiveParser.Number, "0")));
partSpec.addExpression(partExpr);
spec.setPartition(partSpec);
}
}
use of org.antlr.runtime.CommonToken in project coffeescript-eclipse by adamschmideg.
the class FirstCommentIncludingXtextTokenStream method toString.
@Override
public String toString() {
if (p == -1) {
fillBuffer();
}
if (tokenSource instanceof Lexer) {
if (tokens.isEmpty())
return "";
Token lastToken = (Token) tokens.get(tokens.size() - 1);
if (lastToken instanceof CommonToken) {
CommonToken commonStop = (CommonToken) lastToken;
CharStream charStream = ((Lexer) tokenSource).getCharStream();
String result = charStream.substring(0, commonStop.getStopIndex());
return result;
}
}
return super.toString();
}
use of org.antlr.runtime.CommonToken in project coffeescript-eclipse by adamschmideg.
the class Lexer method nextToken.
/**
* Get next token. If an exception is thrown by the underlying lexer,
* keep calling it, and append an invalid token at the very end.
*/
@Override
public Token nextToken() {
Token token = null;
CoffeeSymbol symbol = null;
try {
symbol = aptanaScanner.nextToken();
if (symbol == null || symbol.getId() < 0) {
logger.warn("Unexpected symbol " + symbol, new Exception());
token = CommonToken.INVALID_TOKEN;
} else if (symbol.getId() == Terminals.EOF) {
token = CommonToken.EOF_TOKEN;
} else {
token = new BeaverToken(symbol);
if (((CommonToken) token).getStopIndex() >= input.size()) {
assert false : "Token stop index overflows " + symbol + " in:\n<<<" + content + ">>>";
}
}
} catch (Exception e) {
// Xtext wants token to be CommonToken, INVALID_TOKEN_TYPE, and HIDDEN_CHANNEL
String text = e.getLocalizedMessage();
if (text == null)
text = "simply " + e.getClass().getSimpleName();
CommonToken ct = new CommonToken(Token.INVALID_TOKEN_TYPE, text);
ct.setChannel(Token.HIDDEN_CHANNEL);
if (prevToken != null) {
int start = prevToken.getStopIndex() + 1;
// TODO: get more informative errors with length of token
int stop = start + 1;
ct.setStartIndex(start);
ct.setStopIndex(stop);
}
token = ct;
}
token.setTokenIndex(tokenIndex);
if (symbol != null && symbol.hidden)
token.setChannel(Token.HIDDEN_CHANNEL);
tokenIndex++;
if (token instanceof CommonToken) {
if (prevToken != null && token.getType() > 0) {
if (((CommonToken) token).getStartIndex() < prevToken.getStartIndex()) {
assert false : "Position not follows, prevToken: " + prevToken + ", token: " + token;
}
}
prevToken = (CommonToken) token;
}
logger.debug("token: " + token);
return token;
}
Aggregations