Search in sources :

Example 1 with JSONException

use of org.json.JSONException in project hive by apache.

the class EximUtil method readMetaData.

public static ReadMetaData readMetaData(FileSystem fs, Path metadataPath) throws IOException, SemanticException {
    FSDataInputStream mdstream = null;
    try {
        mdstream = fs.open(metadataPath);
        byte[] buffer = new byte[1024];
        ByteArrayOutputStream sb = new ByteArrayOutputStream();
        int read = mdstream.read(buffer);
        while (read != -1) {
            sb.write(buffer, 0, read);
            read = mdstream.read(buffer);
        }
        String md = new String(sb.toByteArray(), "UTF-8");
        JSONObject jsonContainer = new JSONObject(md);
        String version = jsonContainer.getString("version");
        String fcversion = getJSONStringEntry(jsonContainer, "fcversion");
        checkCompatibility(version, fcversion);
        String dbDesc = getJSONStringEntry(jsonContainer, "db");
        String tableDesc = getJSONStringEntry(jsonContainer, "table");
        TDeserializer deserializer = new TDeserializer(new TJSONProtocol.Factory());
        Database db = null;
        if (dbDesc != null) {
            db = new Database();
            deserializer.deserialize(db, dbDesc, "UTF-8");
        }
        Table table = null;
        List<Partition> partitionsList = null;
        if (tableDesc != null) {
            table = new Table();
            deserializer.deserialize(table, tableDesc, "UTF-8");
            // TODO : jackson-streaming-iterable-redo this
            JSONArray jsonPartitions = new JSONArray(jsonContainer.getString("partitions"));
            partitionsList = new ArrayList<Partition>(jsonPartitions.length());
            for (int i = 0; i < jsonPartitions.length(); ++i) {
                String partDesc = jsonPartitions.getString(i);
                Partition partition = new Partition();
                deserializer.deserialize(partition, partDesc, "UTF-8");
                partitionsList.add(partition);
            }
        }
        return new ReadMetaData(db, table, partitionsList, readReplicationSpec(jsonContainer));
    } catch (JSONException e) {
        throw new SemanticException(ErrorMsg.ERROR_SERIALIZE_METADATA.getMsg(), e);
    } catch (TException e) {
        throw new SemanticException(ErrorMsg.ERROR_SERIALIZE_METADATA.getMsg(), e);
    } finally {
        if (mdstream != null) {
            mdstream.close();
        }
    }
}
Also used : TException(org.apache.thrift.TException) Partition(org.apache.hadoop.hive.metastore.api.Partition) TDeserializer(org.apache.thrift.TDeserializer) Table(org.apache.hadoop.hive.metastore.api.Table) JSONArray(org.json.JSONArray) JSONException(org.json.JSONException) ByteArrayOutputStream(java.io.ByteArrayOutputStream) TJSONProtocol(org.apache.thrift.protocol.TJSONProtocol) JSONObject(org.json.JSONObject) Database(org.apache.hadoop.hive.metastore.api.Database) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream)

Example 2 with JSONException

use of org.json.JSONException in project pinot by linkedin.

the class PQLParserUtils method decorateWithMapReduce.

public static void decorateWithMapReduce(JSONObject jsonObj, java.util.List<Pair<String, String>> aggreagationFunctions, JSONObject groupBy, String functionName, JSONObject parameters) {
    try {
        if (aggreagationFunctions == null) {
            aggreagationFunctions = new ArrayList<Pair<String, String>>();
        }
        if (aggreagationFunctions.size() > 0) {
            JSONObject meta = jsonObj.optJSONObject("meta");
            if (meta != null) {
                JSONArray selectList = meta.optJSONArray("select_list");
                if (selectList == null || selectList.length() == 0) {
                    meta.put("select_list", new JSONUtil.FastJSONArray().put("*"));
                }
            }
        }
        JSONArray array = new JSONUtil.FastJSONArray();
        if (groupBy == null) {
            for (Pair<String, String> pair : aggreagationFunctions) {
                JSONObject props = new JSONUtil.FastJSONObject();
                props.put("column", pair.getSecond());
                props.put("mapReduce", pair.getFirst());
                array.put(props);
            }
        } else {
            JSONArray columns = groupBy.optJSONArray("columns");
            if (aggreagationFunctions.size() > 0) {
                groupBy.put("columns", (JSONArray) null);
            }
            int countSum = 0;
            int top = groupBy.optInt("top");
            for (Pair<String, String> pair : aggreagationFunctions) {
                /*if (columns.length() == 1 && "sum".equalsIgnoreCase(pair.getFirst()) && countSum == 0) {
            countSum++;

            JSONObject facetSpec = new FastJSONObject().put("expand", false)
                .put("minhit", 0)
                .put("max", top).put("properties", new  FastJSONObject().put("dimension", columns.get(0)).put("metric", pair.getSecond()));
            if (jsonObj.opt("facets") == null) {
              jsonObj.put("facets", new FastJSONObject());
            }
            jsonObj.getJSONObject("facets").put(SenseiFacetHandlerBuilder.SUM_GROUP_BY_FACET_NAME, facetSpec);
          } else*/
                if (columns.length() == 1 && "count".equalsIgnoreCase(pair.getFirst())) {
                    JSONObject facetSpec = new FastJSONObject().put("expand", false).put("minhit", 0).put("max", top);
                    if (jsonObj.opt("facets") == null) {
                        jsonObj.put("facets", new FastJSONObject());
                    }
                    jsonObj.getJSONObject("facets").put(columns.getString(0), facetSpec);
                } else {
                    JSONObject props = new JSONUtil.FastJSONObject();
                    props.put("function", pair.getFirst());
                    props.put("metric", pair.getSecond());
                    props.put("columns", columns);
                    props.put("mapReduce", "sensei.groupBy");
                    props.put("top", top);
                    array.put(props);
                }
            }
        }
        if (functionName != null) {
            if (parameters == null) {
                parameters = new JSONUtil.FastJSONObject();
            }
            parameters.put("mapReduce", functionName);
            array.put(parameters);
        }
        JSONObject mapReduce = new JSONUtil.FastJSONObject();
        if (array.length() == 0) {
            return;
        }
        if (array.length() == 1) {
            JSONObject props = array.getJSONObject(0);
            mapReduce.put("function", props.get("mapReduce"));
            mapReduce.put("parameters", props);
        } else {
            mapReduce.put("function", "sensei.composite");
            JSONObject props = new JSONUtil.FastJSONObject();
            props.put("array", array);
            mapReduce.put("parameters", props);
        }
        jsonObj.put("mapReduce", mapReduce);
    // we need to remove group by since it's in Map reduce
    //jsonObj.remove("groupBy");
    } catch (JSONException e) {
        LOGGER.error("Caught exception", e);
        Utils.rethrowException(e);
        throw new AssertionError("Should not reach this");
    }
}
Also used : JSONArray(org.json.JSONArray) JSONException(org.json.JSONException) FastJSONObject(com.linkedin.pinot.pql.parsers.utils.JSONUtil.FastJSONObject) JSONObject(org.json.JSONObject) FastJSONObject(com.linkedin.pinot.pql.parsers.utils.JSONUtil.FastJSONObject) FastJSONObject(com.linkedin.pinot.pql.parsers.utils.JSONUtil.FastJSONObject)

Example 3 with JSONException

use of org.json.JSONException in project pinot by linkedin.

the class PinotTableRestletResource method updateTableConfig.

/*
   * NOTE: There is inconsistency in these APIs. GET returns OFFLINE + REALTIME configuration
   * in a single response but POST and this PUT request only operate on either offline or realtime
   * table configuration. If we make this API take both realtime and offline table configuration
   * then the update is not guaranteed to be transactional for both table types. This is more of a PATCH request
   * than PUT.
   */
@HttpVerb("put")
@Summary("Update table configuration. Request body is offline or realtime table configuration")
@Tags({ "Table" })
@Paths({ "/tables/{tableName}" })
public Representation updateTableConfig(@Parameter(name = "tableName", in = "path", description = "Table name (without type)") String tableName, Representation entity) {
    AbstractTableConfig config = null;
    try {
        config = AbstractTableConfig.init(entity.getText());
    } catch (JSONException e) {
        errorResponseRepresentation(Status.CLIENT_ERROR_BAD_REQUEST, "Invalid json in table configuration");
    } catch (IOException e) {
        LOGGER.error("Failed to read request body while updating configuration for table: {}", tableName, e);
        errorResponseRepresentation(Status.SERVER_ERROR_INTERNAL, "Failed to read request");
    }
    try {
        String tableTypeStr = config.getTableType();
        TableType tableType = TableType.valueOf(tableTypeStr.toUpperCase());
        String configTableName = config.getTableName();
        if (!configTableName.equals(tableName)) {
            errorResponseRepresentation(Status.CLIENT_ERROR_BAD_REQUEST, "Request table name does not match table name in the body");
        }
        String tableNameWithType = null;
        if (config.getTableType().equalsIgnoreCase(TableType.OFFLINE.name())) {
            tableNameWithType = TableNameBuilder.OFFLINE_TABLE_NAME_BUILDER.forTable(tableName);
        } else if (config.getTableType().equalsIgnoreCase(TableType.REALTIME.name())) {
            tableNameWithType = TableNameBuilder.REALTIME_TABLE_NAME_BUILDER.forTable(tableName);
        }
        _pinotHelixResourceManager.setTableConfig(config, tableNameWithType, tableType);
        return responseRepresentation(Status.SUCCESS_OK, "{\"status\" : \"Success\"}");
    } catch (IOException e) {
        LOGGER.error("Failed to update table configuration for table: {}", tableName, e);
        return errorResponseRepresentation(Status.SERVER_ERROR_INTERNAL, "Internal error while updating table configuration");
    }
}
Also used : TableType(com.linkedin.pinot.common.utils.CommonConstants.Helix.TableType) JSONException(org.json.JSONException) AbstractTableConfig(com.linkedin.pinot.common.config.AbstractTableConfig) IOException(java.io.IOException) Summary(com.linkedin.pinot.common.restlet.swagger.Summary) HttpVerb(com.linkedin.pinot.common.restlet.swagger.HttpVerb) Paths(com.linkedin.pinot.common.restlet.swagger.Paths) Tags(com.linkedin.pinot.common.restlet.swagger.Tags)

Example 4 with JSONException

use of org.json.JSONException in project pinot by linkedin.

the class PinotRealtimeSegmentManager method assignRealtimeSegmentsToServerInstancesIfNecessary.

private synchronized void assignRealtimeSegmentsToServerInstancesIfNecessary() throws JSONException, IOException {
    // Fetch current ideal state snapshot
    Map<String, IdealState> idealStateMap = new HashMap<String, IdealState>();
    for (String resource : _pinotHelixResourceManager.getAllRealtimeTables()) {
        final String tableName = TableNameBuilder.extractRawTableName(resource);
        AbstractTableConfig tableConfig = _pinotHelixResourceManager.getTableConfig(tableName, TableType.REALTIME);
        KafkaStreamMetadata metadata = new KafkaStreamMetadata(tableConfig.getIndexingConfig().getStreamConfigs());
        if (metadata.hasHighLevelKafkaConsumerType()) {
            idealStateMap.put(resource, _pinotHelixResourceManager.getHelixAdmin().getResourceIdealState(_pinotHelixResourceManager.getHelixClusterName(), resource));
        } else {
            LOGGER.debug("Not considering table {} for realtime segment assignment");
        }
    }
    List<Pair<String, String>> listOfSegmentsToAddToInstances = new ArrayList<Pair<String, String>>();
    for (String resource : idealStateMap.keySet()) {
        try {
            IdealState state = idealStateMap.get(resource);
            // Are there any partitions?
            if (state.getPartitionSet().size() == 0) {
                // No, this is a brand new ideal state, so we will add one new segment to every partition and replica
                List<String> instancesInResource = new ArrayList<String>();
                try {
                    instancesInResource.addAll(_pinotHelixResourceManager.getServerInstancesForTable(resource, TableType.REALTIME));
                } catch (Exception e) {
                    LOGGER.error("Caught exception while fetching instances for resource {}", resource, e);
                    _controllerMetrics.addMeteredGlobalValue(ControllerMeter.CONTROLLER_REALTIME_TABLE_SEGMENT_ASSIGNMENT_ERROR, 1L);
                }
                // Assign a new segment to all server instances
                for (String instanceId : instancesInResource) {
                    InstanceZKMetadata instanceZKMetadata = _pinotHelixResourceManager.getInstanceZKMetadata(instanceId);
                    if (instanceZKMetadata == null) {
                        LOGGER.warn("Instance {} has no associated instance metadata in ZK, ignoring for segment assignment.", instanceId);
                        _controllerMetrics.addMeteredGlobalValue(ControllerMeter.CONTROLLER_REALTIME_TABLE_SEGMENT_ASSIGNMENT_ERROR, 1L);
                        continue;
                    }
                    String groupId = instanceZKMetadata.getGroupId(resource);
                    String partitionId = instanceZKMetadata.getPartition(resource);
                    if (groupId != null && !groupId.isEmpty() && partitionId != null && !partitionId.isEmpty()) {
                        listOfSegmentsToAddToInstances.add(new Pair<String, String>(new HLCSegmentName(groupId, partitionId, String.valueOf(System.currentTimeMillis())).getSegmentName(), instanceId));
                    } else {
                        LOGGER.warn("Instance {} has invalid groupId ({}) and/or partitionId ({}) for resource {}, ignoring for segment assignment.", instanceId, groupId, partitionId, resource);
                        _controllerMetrics.addMeteredGlobalValue(ControllerMeter.CONTROLLER_REALTIME_TABLE_SEGMENT_ASSIGNMENT_ERROR, 1L);
                    }
                }
            } else {
                // Add all server instances to the list of instances for which to assign a realtime segment
                Set<String> instancesToAssignRealtimeSegment = new HashSet<String>();
                try {
                    instancesToAssignRealtimeSegment.addAll(_pinotHelixResourceManager.getServerInstancesForTable(resource, TableType.REALTIME));
                } catch (Exception e) {
                    LOGGER.error("Caught exception while fetching instances for resource {}", resource, e);
                    _controllerMetrics.addMeteredGlobalValue(ControllerMeter.CONTROLLER_REALTIME_TABLE_SEGMENT_ASSIGNMENT_ERROR, 1L);
                }
                // Remove server instances that are currently processing a segment
                for (String partition : state.getPartitionSet()) {
                    // Helix partition is the segment name
                    if (SegmentName.isHighLevelConsumerSegmentName(partition)) {
                        HLCSegmentName segName = new HLCSegmentName(partition);
                        RealtimeSegmentZKMetadata realtimeSegmentZKMetadata = ZKMetadataProvider.getRealtimeSegmentZKMetadata(_pinotHelixResourceManager.getPropertyStore(), segName.getTableName(), partition);
                        if (realtimeSegmentZKMetadata == null) {
                            // Segment was deleted by retention manager.
                            continue;
                        }
                        if (realtimeSegmentZKMetadata.getStatus() == Status.IN_PROGRESS) {
                            instancesToAssignRealtimeSegment.removeAll(state.getInstanceSet(partition));
                        }
                    }
                }
                // Assign a new segment to the server instances not currently processing this segment
                for (String instanceId : instancesToAssignRealtimeSegment) {
                    InstanceZKMetadata instanceZKMetadata = _pinotHelixResourceManager.getInstanceZKMetadata(instanceId);
                    String groupId = instanceZKMetadata.getGroupId(resource);
                    String partitionId = instanceZKMetadata.getPartition(resource);
                    listOfSegmentsToAddToInstances.add(new Pair<String, String>(new HLCSegmentName(groupId, partitionId, String.valueOf(System.currentTimeMillis())).getSegmentName(), instanceId));
                }
            }
        } catch (Exception e) {
            LOGGER.warn("Caught exception while processing resource {}, skipping.", resource, e);
            _controllerMetrics.addMeteredGlobalValue(ControllerMeter.CONTROLLER_REALTIME_TABLE_SEGMENT_ASSIGNMENT_ERROR, 1L);
        }
    }
    LOGGER.info("Computed list of new segments to add : " + Arrays.toString(listOfSegmentsToAddToInstances.toArray()));
    // Add the new segments to the server instances
    for (final Pair<String, String> segmentIdAndInstanceId : listOfSegmentsToAddToInstances) {
        final String segmentId = segmentIdAndInstanceId.getFirst();
        final String instanceName = segmentIdAndInstanceId.getSecond();
        try {
            final HLCSegmentName segName = new HLCSegmentName(segmentId);
            String resourceName = segName.getTableName();
            // Does the ideal state already contain this segment?
            if (!idealStateMap.get(resourceName).getPartitionSet().contains(segmentId)) {
                // No, add it
                // Create the realtime segment metadata
                RealtimeSegmentZKMetadata realtimeSegmentMetadataToAdd = new RealtimeSegmentZKMetadata();
                realtimeSegmentMetadataToAdd.setTableName(TableNameBuilder.extractRawTableName(resourceName));
                realtimeSegmentMetadataToAdd.setSegmentType(SegmentType.REALTIME);
                realtimeSegmentMetadataToAdd.setStatus(Status.IN_PROGRESS);
                realtimeSegmentMetadataToAdd.setSegmentName(segmentId);
                // Add the new metadata to the property store
                ZKMetadataProvider.setRealtimeSegmentZKMetadata(_pinotHelixResourceManager.getPropertyStore(), realtimeSegmentMetadataToAdd);
                // Update the ideal state to add the new realtime segment
                HelixHelper.updateIdealState(_pinotHelixResourceManager.getHelixZkManager(), resourceName, new Function<IdealState, IdealState>() {

                    @Override
                    public IdealState apply(IdealState idealState) {
                        return PinotTableIdealStateBuilder.addNewRealtimeSegmentToIdealState(segmentId, idealState, instanceName);
                    }
                }, RetryPolicies.exponentialBackoffRetryPolicy(5, 500L, 2.0f));
            }
        } catch (Exception e) {
            LOGGER.warn("Caught exception while processing segment {} for instance {}, skipping.", segmentId, instanceName, e);
            _controllerMetrics.addMeteredGlobalValue(ControllerMeter.CONTROLLER_REALTIME_TABLE_SEGMENT_ASSIGNMENT_ERROR, 1L);
        }
    }
}
Also used : HLCSegmentName(com.linkedin.pinot.common.utils.HLCSegmentName) KafkaStreamMetadata(com.linkedin.pinot.common.metadata.stream.KafkaStreamMetadata) HashMap(java.util.HashMap) InstanceZKMetadata(com.linkedin.pinot.common.metadata.instance.InstanceZKMetadata) ArrayList(java.util.ArrayList) IdealState(org.apache.helix.model.IdealState) JSONException(org.json.JSONException) IOException(java.io.IOException) RealtimeSegmentZKMetadata(com.linkedin.pinot.common.metadata.segment.RealtimeSegmentZKMetadata) AbstractTableConfig(com.linkedin.pinot.common.config.AbstractTableConfig) Pair(com.linkedin.pinot.core.query.utils.Pair) HashSet(java.util.HashSet)

Example 5 with JSONException

use of org.json.JSONException in project pinot by linkedin.

the class ControllerTest method getBrokerReturnJson.

private JSONObject getBrokerReturnJson(URLConnection conn) throws Exception {
    final BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream(), "UTF-8"));
    final StringBuilder sb = new StringBuilder();
    String line = null;
    while ((line = reader.readLine()) != null) {
        sb.append(line);
    }
    final String res = sb.toString();
    try {
        final JSONObject ret = new JSONObject(res);
        return ret;
    } catch (JSONException e) {
        LOGGER.warn("Exception  to parse response \"{}\" as JSON", res);
        return null;
    }
}
Also used : InputStreamReader(java.io.InputStreamReader) JSONObject(org.json.JSONObject) BufferedReader(java.io.BufferedReader) JSONException(org.json.JSONException)

Aggregations

JSONException (org.json.JSONException)4451 JSONObject (org.json.JSONObject)3392 JSONArray (org.json.JSONArray)1461 IOException (java.io.IOException)827 ArrayList (java.util.ArrayList)573 HashMap (java.util.HashMap)324 Test (org.junit.Test)192 URL (java.net.URL)187 Response (javax.ws.rs.core.Response)179 Builder (javax.ws.rs.client.Invocation.Builder)177 Test (org.testng.annotations.Test)176 ResteasyClientBuilder (org.jboss.resteasy.client.jaxrs.ResteasyClientBuilder)175 BaseTest (org.gluu.oxauth.BaseTest)170 File (java.io.File)169 Parameters (org.testng.annotations.Parameters)167 InputStream (java.io.InputStream)147 Intent (android.content.Intent)138 Bundle (android.os.Bundle)114 HttpURLConnection (java.net.HttpURLConnection)114 UnsupportedEncodingException (java.io.UnsupportedEncodingException)112