use of org.codehaus.jackson.JsonFactory in project gwt-test-utils by gwt-test-utils.
the class JSONParserPatcher method evaluate.
@PatchMethod
static JSONValue evaluate(String json, boolean strict) {
JsonParser jp = null;
try {
JsonFactory f = new JsonFactory();
if (!strict) {
f.configure(JsonParser.Feature.ALLOW_COMMENTS, true);
f.configure(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES, true);
f.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true);
f.configure(JsonParser.Feature.ALLOW_BACKSLASH_ESCAPING_ANY_CHARACTER, true);
f.configure(JsonParser.Feature.ALLOW_UNQUOTED_CONTROL_CHARS, true);
f.configure(JsonParser.Feature.ALLOW_UNQUOTED_CONTROL_CHARS, true);
f.configure(JsonParser.Feature.ALLOW_NUMERIC_LEADING_ZEROS, true);
}
jp = f.createJsonParser(json);
// will return JsonToken.START_OBJECT (verify?)
jp.nextToken();
JSONObject jsonObject = extractJSONObject(json, jp);
return jsonObject;
} catch (Exception e) {
if (e instanceof GwtTestException) {
throw (GwtTestException) e;
}
throw new GwtTestJSONException("Error while parsing JSON string '" + json + "'", e);
} finally {
if (jp != null) {
try {
// ensure resources get cleaned up timely and properly
jp.close();
} catch (IOException e) {
// should never happen
}
}
}
}
use of org.codehaus.jackson.JsonFactory in project tdi-studio-se by Talend.
the class ExchangeUtils method parseJsonObject.
public static List parseJsonObject(String jsonContent, Class clazz) throws Exception {
// need factory for creating parser to use
List objList = new ArrayList();
// for 4.1.0 the is no json param on server ,so jsonContent is "wrong parameters for version"
if (!jsonContent.startsWith("[")) {
return objList;
}
JsonFactory jf = new JsonFactory();
JsonNode node = new ObjectMapper().reader().readTree(jf.createJsonParser(new StringReader(jsonContent)));
List<Object> list = new ObjectMapper().readValue(node.traverse(), List.class);
for (Object source : list) {
Object obj = clazz.newInstance();
BeanUtils.copyProperties(obj, source);
objList.add(obj);
}
return objList;
}
use of org.codehaus.jackson.JsonFactory in project eiger by wlloyd.
the class LeveledManifest method serialize.
public synchronized void serialize() {
File manifestFile = cfs.directories.getOrCreateLeveledManifest();
File oldFile = new File(manifestFile.getPath().replace(EXTENSION, "-old.json"));
File tmpFile = new File(manifestFile.getPath().replace(EXTENSION, "-tmp.json"));
JsonFactory f = new JsonFactory();
try {
JsonGenerator g = f.createJsonGenerator(tmpFile, JsonEncoding.UTF8);
g.useDefaultPrettyPrinter();
g.writeStartObject();
g.writeArrayFieldStart("generations");
for (int level = 0; level < generations.length; level++) {
g.writeStartObject();
g.writeNumberField("generation", level);
g.writeArrayFieldStart("members");
for (SSTableReader ssTableReader : generations[level]) g.writeNumber(ssTableReader.descriptor.generation);
// members
g.writeEndArray();
// generation
g.writeEndObject();
}
// for field generations
g.writeEndArray();
// write global object
g.writeEndObject();
g.close();
if (oldFile.exists() && manifestFile.exists())
FileUtils.deleteWithConfirm(oldFile);
if (manifestFile.exists())
FileUtils.renameWithConfirm(manifestFile, oldFile);
assert tmpFile.exists();
FileUtils.renameWithConfirm(tmpFile, manifestFile);
logger.debug("Saved manifest {}", manifestFile);
} catch (IOException e) {
throw new IOError(e);
}
}
use of org.codehaus.jackson.JsonFactory in project hbase by apache.
the class TestTableScan method testStreamingJSON.
@Test
public void testStreamingJSON() throws Exception {
// Test scanning particular columns with limit.
StringBuilder builder = new StringBuilder();
builder.append("/*");
builder.append("?");
builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1);
builder.append("&");
builder.append(Constants.SCAN_LIMIT + "=20");
Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_JSON);
assertEquals(200, response.getCode());
assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE);
CellSetModel model = mapper.readValue(response.getStream(), CellSetModel.class);
int count = TestScannerResource.countCellSet(model);
assertEquals(20, count);
checkRowsNotNull(model);
//Test scanning with no limit.
builder = new StringBuilder();
builder.append("/*");
builder.append("?");
builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_2);
response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_JSON);
assertEquals(200, response.getCode());
assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
model = mapper.readValue(response.getStream(), CellSetModel.class);
count = TestScannerResource.countCellSet(model);
assertEquals(expectedRows2, count);
checkRowsNotNull(model);
//Test with start row and end row.
builder = new StringBuilder();
builder.append("/*");
builder.append("?");
builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1);
builder.append("&");
builder.append(Constants.SCAN_START_ROW + "=aaa");
builder.append("&");
builder.append(Constants.SCAN_END_ROW + "=aay");
response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_JSON);
assertEquals(200, response.getCode());
count = 0;
JsonFactory jfactory = new JsonFactory(mapper);
JsonParser jParser = jfactory.createJsonParser(response.getStream());
boolean found = false;
while (jParser.nextToken() != JsonToken.END_OBJECT) {
if (jParser.getCurrentToken() == JsonToken.START_OBJECT && found) {
RowModel row = jParser.readValueAs(RowModel.class);
assertNotNull(row.getKey());
for (int i = 0; i < row.getCells().size(); i++) {
if (count == 0) {
assertEquals("aaa", Bytes.toString(row.getKey()));
}
if (count == 23) {
assertEquals("aax", Bytes.toString(row.getKey()));
}
count++;
}
jParser.skipChildren();
} else {
found = jParser.getCurrentToken() == JsonToken.START_ARRAY;
}
}
assertEquals(24, count);
}
use of org.codehaus.jackson.JsonFactory in project hive by apache.
the class EximUtil method createExportDump.
public static void createExportDump(FileSystem fs, Path metadataPath, org.apache.hadoop.hive.ql.metadata.Table tableHandle, Iterable<org.apache.hadoop.hive.ql.metadata.Partition> partitions, ReplicationSpec replicationSpec) throws SemanticException, IOException {
if (replicationSpec == null) {
// instantiate default values if not specified
replicationSpec = new ReplicationSpec();
}
if (tableHandle == null) {
replicationSpec.setNoop(true);
}
OutputStream out = fs.create(metadataPath);
JsonGenerator jgen = (new JsonFactory()).createJsonGenerator(out);
jgen.writeStartObject();
jgen.writeStringField("version", METADATA_FORMAT_VERSION);
if (METADATA_FORMAT_FORWARD_COMPATIBLE_VERSION != null) {
jgen.writeStringField("fcversion", METADATA_FORMAT_FORWARD_COMPATIBLE_VERSION);
}
if (replicationSpec.isInReplicationScope()) {
for (ReplicationSpec.KEY key : ReplicationSpec.KEY.values()) {
String value = replicationSpec.get(key);
if (value != null) {
jgen.writeStringField(key.toString(), value);
}
}
if (tableHandle != null) {
Table ttable = tableHandle.getTTable();
ttable.putToParameters(ReplicationSpec.KEY.CURR_STATE_ID.toString(), replicationSpec.getCurrentReplicationState());
if ((ttable.getParameters().containsKey("EXTERNAL")) && (ttable.getParameters().get("EXTERNAL").equalsIgnoreCase("TRUE"))) {
// Replication destination will not be external - override if set
ttable.putToParameters("EXTERNAL", "FALSE");
}
if (ttable.isSetTableType() && ttable.getTableType().equalsIgnoreCase(TableType.EXTERNAL_TABLE.toString())) {
// Replication dest will not be external - override if set
ttable.setTableType(TableType.MANAGED_TABLE.toString());
}
}
} else {
// ReplicationSpec.KEY scopeKey = ReplicationSpec.KEY.REPL_SCOPE;
// write(out, ",\""+ scopeKey.toString() +"\":\"" + replicationSpec.get(scopeKey) + "\"");
// TODO: if we want to be explicit about this dump not being a replication dump, we can
// uncomment this else section, but currently unnneeded. Will require a lot of golden file
// regen if we do so.
}
if ((tableHandle != null) && (!replicationSpec.isNoop())) {
TSerializer serializer = new TSerializer(new TJSONProtocol.Factory());
try {
jgen.writeStringField("table", serializer.toString(tableHandle.getTTable(), "UTF-8"));
jgen.writeFieldName("partitions");
jgen.writeStartArray();
if (partitions != null) {
for (org.apache.hadoop.hive.ql.metadata.Partition partition : partitions) {
Partition tptn = partition.getTPartition();
if (replicationSpec.isInReplicationScope()) {
tptn.putToParameters(ReplicationSpec.KEY.CURR_STATE_ID.toString(), replicationSpec.getCurrentReplicationState());
if ((tptn.getParameters().containsKey("EXTERNAL")) && (tptn.getParameters().get("EXTERNAL").equalsIgnoreCase("TRUE"))) {
// Replication destination will not be external
tptn.putToParameters("EXTERNAL", "FALSE");
}
}
jgen.writeString(serializer.toString(tptn, "UTF-8"));
jgen.flush();
}
}
jgen.writeEndArray();
} catch (TException e) {
throw new SemanticException(ErrorMsg.ERROR_SERIALIZE_METASTORE.getMsg(), e);
}
}
jgen.writeEndObject();
// JsonGenerator owns the OutputStream, so it closes it when we call close.
jgen.close();
}
Aggregations