use of java.io.StreamTokenizer in project spanner-jdbc by olavloite.
the class DDLStatement method getTokens.
private static List<String> getTokens(String sql, int maxTokens) throws SQLException {
List<String> res = new ArrayList<>(maxTokens);
int tokenNumber = 0;
StreamTokenizer tokenizer = new StreamTokenizer(new StringReader(sql));
tokenizer.eolIsSignificant(false);
tokenizer.wordChars('_', '_');
tokenizer.wordChars('"', '"');
tokenizer.wordChars('\'', '\'');
tokenizer.quoteChar('`');
try {
while (tokenizer.nextToken() != StreamTokenizer.TT_EOF && (tokenizer.ttype == StreamTokenizer.TT_WORD || tokenizer.ttype == '`') && tokenNumber < maxTokens) {
res.add(tokenizer.sval);
tokenNumber++;
}
} catch (IOException e) {
throw new CloudSpannerSQLException("Could not parse DDL statement '" + sql + "'. Error: " + e.getMessage(), Code.INVALID_ARGUMENT, e);
}
return res;
}
use of java.io.StreamTokenizer in project knime-core by knime.
the class Expression method compile.
/**
* Tries to compile the given expression as entered in the dialog with the
* current spec.
*
* @param settings Contains node model settings, e.g. expression
* @param spec the spec
* @return the java expression
* @throws CompilationFailedException if that fails
* @throws InvalidSettingsException if settings are missing
*/
public static Expression compile(final JavaScriptingSettings settings, final DataTableSpec spec) throws CompilationFailedException, InvalidSettingsException {
String expression = settings.getExpression();
int ver = settings.getExpressionVersion();
Map<InputField, ExpressionField> nameValueMap = new HashMap<InputField, ExpressionField>();
StringBuffer correctedExp = new StringBuffer();
StreamTokenizer t = new StreamTokenizer(new StringReader(expression));
t.resetSyntax();
t.wordChars(0, 0xFF);
t.ordinaryChar('/');
t.eolIsSignificant(false);
t.slashSlashComments(true);
t.slashStarComments(true);
t.quoteChar('\'');
t.quoteChar('"');
t.quoteChar('$');
int tokType;
int variableIndex = 0;
boolean isNextTokenSpecial = false;
try {
while ((tokType = t.nextToken()) != StreamTokenizer.TT_EOF) {
final String expFieldName;
final Class<?> expFieldClass;
final FieldType inputFieldType;
final String inputFieldName;
switch(tokType) {
case StreamTokenizer.TT_WORD:
String s = t.sval;
if (isNextTokenSpecial) {
if (ROWNUMBER.equals(s) && ver == VERSION_1X || ROWINDEX.equals(s) && ver != VERSION_1X) {
expFieldName = ROWINDEX;
expFieldClass = Integer.class;
inputFieldName = ROWINDEX;
inputFieldType = FieldType.TableConstant;
} else if (ROWKEY.equals(s) && ver == VERSION_1X || ROWID.equals(s) && ver != VERSION_1X) {
expFieldName = ROWID;
expFieldClass = String.class;
inputFieldName = ROWID;
inputFieldType = FieldType.TableConstant;
} else if (ROWCOUNT.equals(s)) {
expFieldName = ROWCOUNT;
expFieldClass = Integer.class;
inputFieldName = ROWCOUNT;
inputFieldType = FieldType.TableConstant;
} else if (s.startsWith("{") && s.endsWith("}")) {
String var = s.substring(1, s.length() - 1);
if (var.length() == 0) {
throw new InvalidSettingsException("Empty variable string at line " + t.lineno());
}
switch(var.charAt(0)) {
case 'I':
expFieldClass = Integer.class;
break;
case 'D':
expFieldClass = Double.class;
break;
case 'S':
expFieldClass = String.class;
break;
default:
throw new InvalidSettingsException("Invalid type identifier for variable " + "in line " + t.lineno() + ": " + var.charAt(0));
}
var = var.substring(1);
if (var.length() == 0) {
throw new InvalidSettingsException("Empty variable identifier in line " + t.lineno());
}
inputFieldName = var;
inputFieldType = FieldType.Variable;
// bug fix 2128 (handle multiple occurrences of var)
InputField tempField = new InputField(inputFieldName, inputFieldType);
ExpressionField oldExpressionField = nameValueMap.get(tempField);
if (oldExpressionField != null) {
expFieldName = oldExpressionField.getExpressionFieldName();
} else {
expFieldName = "variable_" + (variableIndex++);
}
} else {
throw new InvalidSettingsException("Invalid special identifier: " + s + " (at line " + t.lineno() + ")");
}
InputField inputField = new InputField(inputFieldName, inputFieldType);
ExpressionField expField = new ExpressionField(expFieldName, expFieldClass);
nameValueMap.put(inputField, expField);
correctedExp.append(getJavaFieldName(expFieldName));
} else {
correctedExp.append(s);
}
break;
case '/':
correctedExp.append((char) tokType);
break;
case '\'':
case '"':
if (isNextTokenSpecial) {
throw new InvalidSettingsException("Invalid special identifier: " + t.sval + " (at line " + t.lineno() + ")");
}
correctedExp.append((char) tokType);
s = t.sval.replace(Character.toString('\\'), "\\\\");
s = s.replace(Character.toString('\n'), "\\n");
s = s.replace(Character.toString('\r'), "\\r");
// escape quote characters
s = s.replace(Character.toString((char) tokType), "\\" + (char) tokType);
correctedExp.append(s);
correctedExp.append((char) tokType);
break;
case '$':
if ("".equals(t.sval)) {
isNextTokenSpecial = !isNextTokenSpecial;
} else {
s = t.sval;
int colIndex = spec.findColumnIndex(s);
if (colIndex < 0) {
throw new InvalidSettingsException("No such column: " + s + " (at line " + t.lineno() + ")");
}
inputFieldName = s;
inputFieldType = FieldType.Column;
expFieldName = createColField(colIndex);
DataType colType = spec.getColumnSpec(colIndex).getType();
correctedExp.append(getJavaFieldName(expFieldName));
boolean isArray = colType.isCollectionType();
if (isArray) {
colType = colType.getCollectionElementType();
}
JavaSnippetType<?, ?, ?> jst = JavaSnippetType.findType(colType);
// e.g. Integer.class or Integer[].class
expFieldClass = jst.getJavaClass(isArray);
InputField inputField = new InputField(inputFieldName, inputFieldType);
ExpressionField expField = new ExpressionField(expFieldName, expFieldClass);
nameValueMap.put(inputField, expField);
}
break;
default:
throw new IllegalStateException("Unexpected type in tokenizer: " + tokType + " (at line " + t.lineno() + ")");
}
}
} catch (IOException e) {
throw new InvalidSettingsException("Unable to tokenize expression string", e);
}
String body = correctedExp.toString();
return new Expression(body, nameValueMap, settings);
}
use of java.io.StreamTokenizer in project wso2-axis2-transports by wso2.
the class TCPWorker method handleCharacterRecordDelimiterStringStream.
/**
* Handling record delimiter character type for string stream
*
* @param msgContext the messahe contenxt
* @param input socket input stream
* @param delimiter character value to delimit incoming message
* @throws XMLStreamException if xml parsing error occurred
* @throws FactoryConfigurationError if configuration issue occurred
*/
private void handleCharacterRecordDelimiterStringStream(MessageContext msgContext, InputStream input, int delimiter) throws AxisFault {
if (log.isDebugEnabled()) {
log.debug("Handle message with character delimiter type string stream");
}
StreamTokenizer tokenizer = new StreamTokenizer(new InputStreamReader(input));
tokenizer.resetSyntax();
tokenizer.wordChars('\u0000', (char) (delimiter - 1));
tokenizer.wordChars((char) (delimiter + 1), '\u00ff');
tokenizer.whitespaceChars('\n', '\n');
tokenizer.whitespaceChars(delimiter, delimiter);
tokenizer.eolIsSignificant(true);
// Stores the value returned by nextToken()
int type = 0;
try {
type = tokenizer.nextToken();
while (type != StreamTokenizer.TT_EOF) {
if (type == StreamTokenizer.TT_WORD) {
handleEnvelope(msgContext, tokenizer.sval.getBytes());
} else {
// We only expect words
assert false;
}
type = tokenizer.nextToken();
}
} catch (IOException e) {
sendFault(msgContext, e);
}
}
Aggregations