use of java.io.StreamTokenizer in project jop by jop-devel.
the class Jopa method pass1.
/**
* Parse the assembler file and build symbol table (first pass).
* During this pass, the assembler code, the symboltable and vartable are build.
* @return the map from program locations (pc) to microcode lines
*/
public void pass1() {
StreamTokenizer in = getSt();
int pc = 0;
try {
while (in.nextToken() != StreamTokenizer.TT_EOF) {
in.pushBack();
Line l = getLine(in);
if (l.jinstr == -1) {
if (l.label != null) {
if (symMap.containsKey(l.label)) {
error(in, "symbol " + l.label + " already defined");
} else {
symMap.put(l.label, new Integer(pc));
}
}
if (l.special == '=') {
if (l.symVal == null) {
error(in, "missing symbol for '='");
} else {
if (symMap.containsKey(l.symVal)) {
error(in, "symbol " + l.symVal + " allready defined");
} else {
symMap.put(l.symVal, new Integer(l.intVal));
}
}
} else if (l.special == '?') {
if (symMap.containsKey(l.symVal)) {
error(in, "symbol " + l.symVal + " allready defined");
} else {
symMap.put(l.symVal, new Integer(memcnt++));
varList.add(l.symVal);
}
}
} else {
jinstrMap.put(l.jinstr, pc);
}
if (l.instr != null) {
++pc;
instructions.add(l);
}
}
} catch (IOException e) {
System.out.println(e.getMessage());
System.exit(-1);
}
//System.out.println(symMap);
}
use of java.io.StreamTokenizer in project jop by jop-devel.
the class Jopa method getSt.
private StreamTokenizer getSt() {
try {
FileReader fileIn = new FileReader(srcDir + fname);
StreamTokenizer in = new StreamTokenizer(fileIn);
in.wordChars('_', '_');
in.wordChars(':', ':');
in.eolIsSignificant(true);
in.slashStarComments(true);
in.slashSlashComments(true);
in.lowerCaseMode(true);
return in;
} catch (IOException e) {
System.out.println(e.getMessage());
System.exit(-1);
return null;
}
}
use of java.io.StreamTokenizer in project openhab1-addons by openhab.
the class ExecuteCommandJob method parseCommand.
/**
* Parses a <code>command</code>. Utilizes the {@link StreamTokenizer} which
* takes care of quoted Strings as well.
*
* @param command the command to parse
* @return the tokenized command which can be processed by the
* <code>ConsoleInterpreter</code>
*
* @see org.openhab.io.console.ConsoleInterpreter
*/
protected String[] parseCommand(String command) {
logger.trace("going to parse command '{}'", command);
// further handling here ...
if (command.startsWith(">")) {
return new String[] { ">", command.substring(1).trim() };
}
StreamTokenizer tokenizer = new StreamTokenizer(new StringReader(command));
// treat all characters as ordinary, including digits, so we never
// have to deal with doubles
tokenizer.resetSyntax();
tokenizer.wordChars(0x23, 0xFF);
tokenizer.whitespaceChars(0x00, 0x20);
tokenizer.quoteChar('"');
List<String> tokens = new ArrayList<String>();
try {
int tokenType = 0;
while (tokenType != StreamTokenizer.TT_EOF && tokenType != StreamTokenizer.TT_EOL) {
tokenType = tokenizer.nextToken();
String token = "";
switch(tokenType) {
case StreamTokenizer.TT_WORD:
case 34:
/* quoted String */
token = tokenizer.sval;
break;
}
tokens.add(token);
logger.trace("read value {} from the given command", token);
}
} catch (IOException ioe) {
}
return tokens.toArray(new String[0]);
}
use of java.io.StreamTokenizer in project android_frameworks_base by ResurrectionRemix.
the class TypedProperties method parse.
/**
* Parses the data in the reader.
*
* @param r The {@code Reader} containing input data to parse
* @param map The {@code Map} to insert parameter values into
* @throws ParseException if the input data is malformed
* @throws IOException if there is a problem reading from the {@code Reader}
*/
static void parse(Reader r, Map<String, Object> map) throws ParseException, IOException {
final StreamTokenizer st = initTokenizer(r);
/* A property name must be a valid fully-qualified class + package name.
* We don't support Unicode, though.
*/
final String identifierPattern = "[a-zA-Z_$][0-9a-zA-Z_$]*";
final Pattern propertyNamePattern = Pattern.compile("(" + identifierPattern + "\\.)*" + identifierPattern);
while (true) {
int token;
// Read the next token, which is either the type or EOF.
token = st.nextToken();
if (token == StreamTokenizer.TT_EOF) {
break;
}
if (token != StreamTokenizer.TT_WORD) {
throw new ParseException(st, "type name");
}
final int type = interpretType(st.sval);
if (type == TYPE_ERROR) {
throw new ParseException(st, "valid type name");
}
st.sval = null;
if (type == TYPE_UNSET) {
// Expect '('.
token = st.nextToken();
if (token != '(') {
throw new ParseException(st, "'('");
}
}
// Read the property name.
token = st.nextToken();
if (token != StreamTokenizer.TT_WORD) {
throw new ParseException(st, "property name");
}
final String propertyName = st.sval;
if (!propertyNamePattern.matcher(propertyName).matches()) {
throw new ParseException(st, "valid property name");
}
st.sval = null;
if (type == TYPE_UNSET) {
// Expect ')'.
token = st.nextToken();
if (token != ')') {
throw new ParseException(st, "')'");
}
map.remove(propertyName);
} else {
// Expect '='.
token = st.nextToken();
if (token != '=') {
throw new ParseException(st, "'='");
}
// Read a value of the appropriate type, and insert into the map.
final Object value = parseValue(st, type);
final Object oldValue = map.remove(propertyName);
if (oldValue != null) {
// the same property is defined with a different type.
if (value.getClass() != oldValue.getClass()) {
throw new ParseException(st, "(property previously declared as a different type)");
}
}
map.put(propertyName, value);
}
// Expect ';'.
token = st.nextToken();
if (token != ';') {
throw new ParseException(st, "';'");
}
}
}
use of java.io.StreamTokenizer in project voltdb by VoltDB.
the class GeographyValue method loopsFromWkt.
/**
* A helper method to parse WKT and produce a list of polygon loops.
* Anything more complicated than this and we probably want a dedicated parser.
*
* Note that we assume that the vertices of the first loop are in counter-clockwise
* order, and that subsequent loops are in clockwise order. This is the OGC format's
* definition. When we send these to the EE we need to put them all into counter-clockwise
* order. So, we need to reverse the order of all but the first loop.
*/
private static List<List<XYZPoint>> loopsFromWkt(String wkt) throws IllegalArgumentException {
final String msgPrefix = "Improperly formatted WKT for polygon: ";
StreamTokenizer tokenizer = new StreamTokenizer(new StringReader(wkt));
tokenizer.lowerCaseMode(true);
tokenizer.eolIsSignificant(false);
List<XYZPoint> currentLoop = null;
List<List<XYZPoint>> loops = new ArrayList<List<XYZPoint>>();
boolean is_shell = true;
try {
int token = tokenizer.nextToken();
if (token != StreamTokenizer.TT_WORD || !tokenizer.sval.equals("polygon")) {
throw new IllegalArgumentException(msgPrefix + "expected WKT to start with POLYGON");
}
token = tokenizer.nextToken();
if (token != '(') {
throw new IllegalArgumentException(msgPrefix + "expected left parenthesis after POLYGON");
}
boolean polygonOpen = true;
while (polygonOpen) {
token = tokenizer.nextToken();
switch(token) {
case '(':
if (currentLoop != null) {
throw new IllegalArgumentException(msgPrefix + "missing closing parenthesis");
}
currentLoop = new ArrayList<XYZPoint>();
break;
case StreamTokenizer.TT_NUMBER:
if (currentLoop == null) {
throw new IllegalArgumentException(msgPrefix + "missing opening parenthesis");
}
double lng = tokenizer.nval;
token = tokenizer.nextToken();
if (token != StreamTokenizer.TT_NUMBER) {
throw new IllegalArgumentException(msgPrefix + "missing latitude in long lat pair");
}
double lat = tokenizer.nval;
currentLoop.add(XYZPoint.fromGeographyPointValue(new GeographyPointValue(lng, lat)));
token = tokenizer.nextToken();
if (token != ',') {
if (token != ')') {
throw new IllegalArgumentException(msgPrefix + "missing comma between long lat pairs");
}
tokenizer.pushBack();
}
break;
case ')':
// perform basic validation of loop
diagnoseLoop(currentLoop, msgPrefix);
// Following the OGC standard, the first loop should be CCW, and subsequent loops
// should be CW. But we will be building the S2 polygon here,
// and S2 wants everything to be CCW. So, we need to
// reverse all but the first loop.
//
// Note also that we don't want to touch the vertex at index 0, and we want
// to remove the vertex at index currentLoop.size() - 1. We want to hold the first
// vertex invariant. The vertex at currentLoop.size() - 1 should be a duplicate
// of the vertex at index 0, and should be removed before pushing it into the
// list of loops.
//
// We are also allowed to swap these out, because they have been
// created and are owned by us.
//
currentLoop.remove(currentLoop.size() - 1);
if (!is_shell) {
for (int fidx = 1, lidx = currentLoop.size() - 1; fidx < lidx; ++fidx, --lidx) {
Collections.swap(currentLoop, fidx, lidx);
}
}
is_shell = false;
loops.add(currentLoop);
currentLoop = null;
token = tokenizer.nextToken();
if (token == ')') {
polygonOpen = false;
} else if (token != ',') {
throw new IllegalArgumentException(msgPrefix + "unrecognized token in WKT: " + Character.toString((char) token));
}
break;
case StreamTokenizer.TT_EOF:
throw new IllegalArgumentException(msgPrefix + "premature end of input");
default:
throw new IllegalArgumentException(msgPrefix + "unrecognized token in WKT: " + Character.toString((char) token));
}
}
token = tokenizer.nextToken();
if (token != StreamTokenizer.TT_EOF) {
throw new IllegalArgumentException(msgPrefix + "unrecognized input after WKT");
}
} catch (IOException e) {
throw new IllegalArgumentException(msgPrefix + "error tokenizing string");
}
return loops;
}
Aggregations