use of org.apache.solr.common.util.SimpleOrderedMap in project lucene-solr by apache.
the class ClusterStatus method getClusterStatus.
@SuppressWarnings("unchecked")
public void getClusterStatus(NamedList results) throws KeeperException, InterruptedException {
// read aliases
Aliases aliases = zkStateReader.getAliases();
Map<String, List<String>> collectionVsAliases = new HashMap<>();
Map<String, String> aliasVsCollections = aliases.getCollectionAliasMap();
if (aliasVsCollections != null) {
for (Map.Entry<String, String> entry : aliasVsCollections.entrySet()) {
List<String> colls = StrUtils.splitSmart(entry.getValue(), ',');
String alias = entry.getKey();
for (String coll : colls) {
if (collection == null || collection.equals(coll)) {
List<String> list = collectionVsAliases.get(coll);
if (list == null) {
list = new ArrayList<>();
collectionVsAliases.put(coll, list);
}
list.add(alias);
}
}
}
}
Map roles = null;
if (zkStateReader.getZkClient().exists(ZkStateReader.ROLES, true)) {
roles = (Map) Utils.fromJSON(zkStateReader.getZkClient().getData(ZkStateReader.ROLES, null, null, true));
}
ClusterState clusterState = zkStateReader.getClusterState();
// convert cluster state into a map of writable types
byte[] bytes = Utils.toJSON(clusterState);
Map<String, Object> stateMap = (Map<String, Object>) Utils.fromJSON(bytes);
String routeKey = message.getStr(ShardParams._ROUTE_);
String shard = message.getStr(ZkStateReader.SHARD_ID_PROP);
Map<String, DocCollection> collectionsMap = null;
if (collection == null) {
collectionsMap = clusterState.getCollectionsMap();
} else {
collectionsMap = Collections.singletonMap(collection, clusterState.getCollectionOrNull(collection));
}
NamedList<Object> collectionProps = new SimpleOrderedMap<>();
for (Map.Entry<String, DocCollection> entry : collectionsMap.entrySet()) {
Map<String, Object> collectionStatus;
String name = entry.getKey();
DocCollection clusterStateCollection = entry.getValue();
if (clusterStateCollection == null) {
if (collection != null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection: " + name + " not found");
} else {
//collection might have got deleted at the same time
continue;
}
}
Set<String> requestedShards = new HashSet<>();
if (routeKey != null) {
DocRouter router = clusterStateCollection.getRouter();
Collection<Slice> slices = router.getSearchSlices(routeKey, null, clusterStateCollection);
for (Slice slice : slices) {
requestedShards.add(slice.getName());
}
}
if (shard != null) {
requestedShards.add(shard);
}
if (clusterStateCollection.getStateFormat() > 1) {
bytes = Utils.toJSON(clusterStateCollection);
Map<String, Object> docCollection = (Map<String, Object>) Utils.fromJSON(bytes);
collectionStatus = getCollectionStatus(docCollection, name, requestedShards);
} else {
collectionStatus = getCollectionStatus((Map<String, Object>) stateMap.get(name), name, requestedShards);
}
collectionStatus.put("znodeVersion", clusterStateCollection.getZNodeVersion());
if (collectionVsAliases.containsKey(name) && !collectionVsAliases.get(name).isEmpty()) {
collectionStatus.put("aliases", collectionVsAliases.get(name));
}
String configName = zkStateReader.readConfigName(name);
collectionStatus.put("configName", configName);
collectionProps.add(name, collectionStatus);
}
List<String> liveNodes = zkStateReader.getZkClient().getChildren(ZkStateReader.LIVE_NODES_ZKNODE, null, true);
// now we need to walk the collectionProps tree to cross-check replica state with live nodes
crossCheckReplicaStateWithLiveNodes(liveNodes, collectionProps);
NamedList<Object> clusterStatus = new SimpleOrderedMap<>();
clusterStatus.add("collections", collectionProps);
// read cluster properties
Map clusterProps = zkStateReader.getClusterProperties();
if (clusterProps != null && !clusterProps.isEmpty()) {
clusterStatus.add("properties", clusterProps);
}
// add the alias map too
if (aliasVsCollections != null && !aliasVsCollections.isEmpty()) {
clusterStatus.add("aliases", aliasVsCollections);
}
// add the roles map
if (roles != null) {
clusterStatus.add("roles", roles);
}
// add live_nodes
clusterStatus.add("live_nodes", liveNodes);
results.add("cluster", clusterStatus);
}
use of org.apache.solr.common.util.SimpleOrderedMap in project lucene-solr by apache.
the class LoggingHandler method handleRequestBody.
@Override
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
// Don't do anything if the framework is unknown
if (watcher == null) {
rsp.add("error", "Logging Not Initialized");
return;
}
rsp.add("watcher", watcher.getName());
SolrParams params = req.getParams();
if (params.get("threshold") != null) {
watcher.setThreshold(params.get("threshold"));
}
// Write something at each level
if (params.get("test") != null) {
log.trace("trace message");
log.debug("debug message");
log.info("info (with exception)", new RuntimeException("test"));
log.warn("warn (with exception)", new RuntimeException("test"));
log.error("error (with exception)", new RuntimeException("test"));
}
String[] set = params.getParams("set");
if (set != null) {
for (String pair : set) {
String[] split = pair.split(":");
if (split.length != 2) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Invalid format, expected level:value, got " + pair);
}
String category = split[0];
String level = split[1];
watcher.setLogLevel(category, level);
}
}
String since = req.getParams().get("since");
if (since != null) {
long time = -1;
try {
time = Long.parseLong(since);
} catch (Exception ex) {
throw new SolrException(ErrorCode.BAD_REQUEST, "invalid timestamp: " + since);
}
AtomicBoolean found = new AtomicBoolean(false);
SolrDocumentList docs = watcher.getHistory(time, found);
if (docs == null) {
rsp.add("error", "History not enabled");
return;
} else {
SimpleOrderedMap<Object> info = new SimpleOrderedMap<>();
if (time > 0) {
info.add("since", time);
info.add("found", found.get());
} else {
// show for the first request
info.add("levels", watcher.getAllLevels());
}
info.add("last", watcher.getLastEvent());
info.add("buffer", watcher.getHistorySize());
info.add("threshold", watcher.getThreshold());
rsp.add("info", info);
rsp.add("history", docs);
}
} else {
rsp.add("levels", watcher.getAllLevels());
List<LoggerInfo> loggers = new ArrayList<>(watcher.getAllLoggers());
Collections.sort(loggers);
List<SimpleOrderedMap<?>> info = new ArrayList<>();
for (LoggerInfo wrap : loggers) {
info.add(wrap.getInfo());
}
rsp.add("loggers", info);
}
rsp.setHttpCaching(false);
}
use of org.apache.solr.common.util.SimpleOrderedMap in project lucene-solr by apache.
the class LukeRequestHandler method getIndexedFieldsInfo.
private static SimpleOrderedMap<Object> getIndexedFieldsInfo(SolrQueryRequest req) throws Exception {
SolrIndexSearcher searcher = req.getSearcher();
SolrParams params = req.getParams();
Set<String> fields = null;
String fl = params.get(CommonParams.FL);
if (fl != null) {
fields = new TreeSet<>(Arrays.asList(fl.split("[,\\s]+")));
}
LeafReader reader = searcher.getSlowAtomicReader();
IndexSchema schema = searcher.getSchema();
// Don't be tempted to put this in the loop below, the whole point here is to alphabetize the fields!
Set<String> fieldNames = new TreeSet<>();
for (FieldInfo fieldInfo : reader.getFieldInfos()) {
fieldNames.add(fieldInfo.name);
}
// Walk the term enum and keep a priority queue for each map in our set
SimpleOrderedMap<Object> finfo = new SimpleOrderedMap<>();
for (String fieldName : fieldNames) {
if (fields != null && !fields.contains(fieldName) && !fields.contains("*")) {
//we're not interested in this field Still an issue here
continue;
}
SimpleOrderedMap<Object> fieldMap = new SimpleOrderedMap<>();
SchemaField sfield = schema.getFieldOrNull(fieldName);
FieldType ftype = (sfield == null) ? null : sfield.getType();
fieldMap.add("type", (ftype == null) ? null : ftype.getTypeName());
fieldMap.add("schema", getFieldFlags(sfield));
if (sfield != null && schema.isDynamicField(sfield.getName()) && schema.getDynamicPattern(sfield.getName()) != null) {
fieldMap.add("dynamicBase", schema.getDynamicPattern(sfield.getName()));
}
Terms terms = reader.fields().terms(fieldName);
if (terms == null) {
// Not indexed, so we need to report what we can (it made it through the fl param if specified)
finfo.add(fieldName, fieldMap);
continue;
}
if (sfield != null && sfield.indexed()) {
if (params.getBool(INCLUDE_INDEX_FIELD_FLAGS, true)) {
Document doc = getFirstLiveDoc(terms, reader);
if (doc != null) {
// Found a document with this field
try {
IndexableField fld = doc.getField(fieldName);
if (fld != null) {
fieldMap.add("index", getFieldFlags(fld));
} else {
// it is a non-stored field...
fieldMap.add("index", "(unstored field)");
}
} catch (Exception ex) {
log.warn("error reading field: " + fieldName);
}
}
}
fieldMap.add("docs", terms.getDocCount());
}
if (fields != null && (fields.contains(fieldName) || fields.contains("*"))) {
getDetailedFieldInfo(req, fieldName, fieldMap);
}
// Add the field
finfo.add(fieldName, fieldMap);
}
return finfo;
}
use of org.apache.solr.common.util.SimpleOrderedMap in project lucene-solr by apache.
the class LukeRequestHandler method getIndexInfo.
// This method just gets the top-most level of information. This was conflated with getting detailed info
// for *all* the fields, called from CoreAdminHandler etc.
public static SimpleOrderedMap<Object> getIndexInfo(DirectoryReader reader) throws IOException {
Directory dir = reader.directory();
SimpleOrderedMap<Object> indexInfo = new SimpleOrderedMap<>();
indexInfo.add("numDocs", reader.numDocs());
indexInfo.add("maxDoc", reader.maxDoc());
indexInfo.add("deletedDocs", reader.maxDoc() - reader.numDocs());
indexInfo.add("indexHeapUsageBytes", getIndexHeapUsed(reader));
// TODO? Is this different then: IndexReader.getCurrentVersion( dir )?
indexInfo.add("version", reader.getVersion());
indexInfo.add("segmentCount", reader.leaves().size());
indexInfo.add("current", closeSafe(reader::isCurrent));
indexInfo.add("hasDeletions", reader.hasDeletions());
indexInfo.add("directory", dir);
IndexCommit indexCommit = reader.getIndexCommit();
String segmentsFileName = indexCommit.getSegmentsFileName();
indexInfo.add("segmentsFile", segmentsFileName);
indexInfo.add("segmentsFileSizeInBytes", getFileLength(indexCommit.getDirectory(), segmentsFileName));
Map<String, String> userData = indexCommit.getUserData();
indexInfo.add("userData", userData);
String s = userData.get(SolrIndexWriter.COMMIT_TIME_MSEC_KEY);
if (s != null) {
indexInfo.add("lastModified", new Date(Long.parseLong(s)));
}
return indexInfo;
}
use of org.apache.solr.common.util.SimpleOrderedMap in project lucene-solr by apache.
the class LukeRequestHandler method getDocumentFieldsInfo.
private static SimpleOrderedMap<Object> getDocumentFieldsInfo(Document doc, int docId, IndexReader reader, IndexSchema schema) throws IOException {
final CharsRefBuilder spare = new CharsRefBuilder();
SimpleOrderedMap<Object> finfo = new SimpleOrderedMap<>();
for (Object o : doc.getFields()) {
Field field = (Field) o;
SimpleOrderedMap<Object> f = new SimpleOrderedMap<>();
SchemaField sfield = schema.getFieldOrNull(field.name());
FieldType ftype = (sfield == null) ? null : sfield.getType();
f.add("type", (ftype == null) ? null : ftype.getTypeName());
f.add("schema", getFieldFlags(sfield));
f.add("flags", getFieldFlags(field));
f.add("value", (ftype == null) ? null : ftype.toExternal(field));
// TODO: this really should be "stored"
// may be a binary number
f.add("internal", field.stringValue());
BytesRef bytes = field.binaryValue();
if (bytes != null) {
f.add("binary", Base64.byteArrayToBase64(bytes.bytes, bytes.offset, bytes.length));
}
if (!ftype.isPointField()) {
Term t = new Term(field.name(), ftype != null ? ftype.storedToIndexed(field) : field.stringValue());
// this can be 0 for non-indexed fields
f.add("docFreq", t.text() == null ? 0 : reader.docFreq(t));
}
// If we have a term vector, return that
if (field.fieldType().storeTermVectors()) {
try {
Terms v = reader.getTermVector(docId, field.name());
if (v != null) {
SimpleOrderedMap<Integer> tfv = new SimpleOrderedMap<>();
final TermsEnum termsEnum = v.iterator();
BytesRef text;
while ((text = termsEnum.next()) != null) {
final int freq = (int) termsEnum.totalTermFreq();
spare.copyUTF8Bytes(text);
tfv.add(spare.toString(), freq);
}
f.add("termVector", tfv);
}
} catch (Exception ex) {
log.warn("error writing term vector", ex);
}
}
finfo.add(field.name(), f);
}
return finfo;
}
Aggregations