Search in sources :

Example 61 with TreeSet

use of java.util.TreeSet in project hadoop by apache.

the class JMXGet method init.

/**
   * @throws Exception
   *           initializes MBeanServer
   */
public void init() throws Exception {
    err("init: server=" + server + ";port=" + port + ";service=" + service + ";localVMUrl=" + localVMUrl);
    String url_string = null;
    // build connection url
    if (localVMUrl != null) {
        // use
        // jstat -snap <vmpid> | grep sun.management.JMXConnectorServer.address
        // to get url
        url_string = localVMUrl;
        err("url string for local pid = " + localVMUrl + " = " + url_string);
    } else if (!port.isEmpty() && !server.isEmpty()) {
        // using server and port
        url_string = "service:jmx:rmi:///jndi/rmi://" + server + ":" + port + "/jmxrmi";
    }
    if (url_string == null) {
        // assume local vm (for example for Testing)
        mbsc = ManagementFactory.getPlatformMBeanServer();
    } else {
        JMXServiceURL url = new JMXServiceURL(url_string);
        err("Create RMI connector and connect to the RMI connector server" + url);
        JMXConnector jmxc = JMXConnectorFactory.connect(url, null);
        // Get an MBeanServerConnection
        //
        err("\nGet an MBeanServerConnection");
        mbsc = jmxc.getMBeanServerConnection();
    }
    // Get domains from MBeanServer
    //
    err("\nDomains:");
    String[] domains = mbsc.getDomains();
    Arrays.sort(domains);
    for (String domain : domains) {
        err("\tDomain = " + domain);
    }
    // Get MBeanServer's default domain
    //
    err("\nMBeanServer default domain = " + mbsc.getDefaultDomain());
    // Get MBean count
    //
    err("\nMBean count = " + mbsc.getMBeanCount());
    // Query MBean names for specific domain "hadoop" and service
    ObjectName query = new ObjectName("Hadoop:service=" + service + ",*");
    hadoopObjectNames = new ArrayList<ObjectName>(5);
    err("\nQuery MBeanServer MBeans:");
    Set<ObjectName> names = new TreeSet<ObjectName>(mbsc.queryNames(query, null));
    for (ObjectName name : names) {
        hadoopObjectNames.add(name);
        err("Hadoop service: " + name);
    }
}
Also used : JMXServiceURL(javax.management.remote.JMXServiceURL) JMXConnector(javax.management.remote.JMXConnector) TreeSet(java.util.TreeSet) ObjectName(javax.management.ObjectName)

Example 62 with TreeSet

use of java.util.TreeSet in project hadoop by apache.

the class NamenodeFsck method copyBlock.

/*
   * XXX (ab) Bulk of this method is copied verbatim from {@link DFSClient}, which is
   * bad. Both places should be refactored to provide a method to copy blocks
   * around.
   */
private void copyBlock(final DFSClient dfs, LocatedBlock lblock, OutputStream fos) throws Exception {
    int failures = 0;
    InetSocketAddress targetAddr = null;
    TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
    BlockReader blockReader = null;
    ExtendedBlock block = lblock.getBlock();
    while (blockReader == null) {
        DatanodeInfo chosenNode;
        try {
            chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
            targetAddr = NetUtils.createSocketAddr(chosenNode.getXferAddr());
        } catch (IOException ie) {
            if (failures >= HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT) {
                throw new IOException("Could not obtain block " + lblock, ie);
            }
            LOG.info("Could not obtain block from any node:  " + ie);
            try {
                Thread.sleep(10000);
            } catch (InterruptedException iex) {
            }
            deadNodes.clear();
            failures++;
            continue;
        }
        try {
            String file = BlockReaderFactory.getFileName(targetAddr, block.getBlockPoolId(), block.getBlockId());
            blockReader = new BlockReaderFactory(dfs.getConf()).setFileName(file).setBlock(block).setBlockToken(lblock.getBlockToken()).setStartOffset(0).setLength(block.getNumBytes()).setVerifyChecksum(true).setClientName("fsck").setDatanodeInfo(chosenNode).setInetSocketAddress(targetAddr).setCachingStrategy(CachingStrategy.newDropBehind()).setClientCacheContext(dfs.getClientContext()).setConfiguration(namenode.getConf()).setTracer(tracer).setRemotePeerFactory(new RemotePeerFactory() {

                @Override
                public Peer newConnectedPeer(InetSocketAddress addr, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException {
                    Peer peer = null;
                    Socket s = NetUtils.getDefaultSocketFactory(conf).createSocket();
                    try {
                        s.connect(addr, HdfsConstants.READ_TIMEOUT);
                        s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
                        peer = DFSUtilClient.peerFromSocketAndKey(dfs.getSaslDataTransferClient(), s, NamenodeFsck.this, blockToken, datanodeId, HdfsConstants.READ_TIMEOUT);
                    } finally {
                        if (peer == null) {
                            IOUtils.closeQuietly(s);
                        }
                    }
                    return peer;
                }
            }).build();
        } catch (IOException ex) {
            // Put chosen node into dead list, continue
            LOG.info("Failed to connect to " + targetAddr + ":" + ex);
            deadNodes.add(chosenNode);
        }
    }
    byte[] buf = new byte[1024];
    int cnt = 0;
    boolean success = true;
    long bytesRead = 0;
    try {
        while ((cnt = blockReader.read(buf, 0, buf.length)) > 0) {
            fos.write(buf, 0, cnt);
            bytesRead += cnt;
        }
        if (bytesRead != block.getNumBytes()) {
            throw new IOException("Recorded block size is " + block.getNumBytes() + ", but datanode returned " + bytesRead + " bytes");
        }
    } catch (Exception e) {
        LOG.error("Error reading block", e);
        success = false;
    } finally {
        blockReader.close();
    }
    if (!success) {
        throw new Exception("Could not copy block data for " + lblock.getBlock());
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) InetSocketAddress(java.net.InetSocketAddress) BlockReader(org.apache.hadoop.hdfs.BlockReader) Peer(org.apache.hadoop.hdfs.net.Peer) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) IOException(java.io.IOException) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) AccessControlException(org.apache.hadoop.security.AccessControlException) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) BlockTokenIdentifier(org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier) TreeSet(java.util.TreeSet) BlockReaderFactory(org.apache.hadoop.hdfs.client.impl.BlockReaderFactory) RemotePeerFactory(org.apache.hadoop.hdfs.RemotePeerFactory) Socket(java.net.Socket)

Example 63 with TreeSet

use of java.util.TreeSet in project Mycat-Server by MyCATApache.

the class DruidMycatRouteStrategy method routeNormalSqlWithAST.

@Override
public RouteResultset routeNormalSqlWithAST(SchemaConfig schema, String stmt, RouteResultset rrs, String charset, LayerCachePool cachePool) throws SQLNonTransientException {
    /**
		 *  只有mysql时只支持mysql语法
		 */
    SQLStatementParser parser = null;
    if (schema.isNeedSupportMultiDBType()) {
        parser = new MycatStatementParser(stmt);
    } else {
        parser = new MySqlStatementParser(stmt);
    }
    MycatSchemaStatVisitor visitor = null;
    SQLStatement statement;
    /**
		 * 解析出现问题统一抛SQL语法错误
		 */
    try {
        statement = parser.parseStatement();
        visitor = new MycatSchemaStatVisitor();
    } catch (Exception t) {
        LOGGER.error("DruidMycatRouteStrategyError", t);
        throw new SQLSyntaxErrorException(t);
    }
    /**
		 * 检验unsupported statement
		 */
    checkUnSupportedStatement(statement);
    DruidParser druidParser = DruidParserFactory.create(schema, statement, visitor);
    druidParser.parser(schema, rrs, statement, stmt, cachePool, visitor);
    DruidShardingParseInfo ctx = druidParser.getCtx();
    rrs.setTables(ctx.getTables());
    /**
		 * DruidParser 解析过程中已完成了路由的直接返回
		 */
    if (rrs.isFinishedRoute()) {
        return rrs;
    }
    /**
		 * 没有from的select语句或其他
		 */
    if ((ctx.getTables() == null || ctx.getTables().size() == 0) && (ctx.getTableAliasMap() == null || ctx.getTableAliasMap().isEmpty())) {
        return RouterUtil.routeToSingleNode(rrs, schema.getRandomDataNode(), druidParser.getCtx().getSql());
    }
    if (druidParser.getCtx().getRouteCalculateUnits().size() == 0) {
        RouteCalculateUnit routeCalculateUnit = new RouteCalculateUnit();
        druidParser.getCtx().addRouteCalculateUnit(routeCalculateUnit);
    }
    SortedSet<RouteResultsetNode> nodeSet = new TreeSet<RouteResultsetNode>();
    for (RouteCalculateUnit unit : druidParser.getCtx().getRouteCalculateUnits()) {
        RouteResultset rrsTmp = RouterUtil.tryRouteForTables(schema, druidParser.getCtx(), unit, rrs, isSelect(statement), cachePool);
        if (rrsTmp != null) {
            for (RouteResultsetNode node : rrsTmp.getNodes()) {
                nodeSet.add(node);
            }
        }
    }
    RouteResultsetNode[] nodes = new RouteResultsetNode[nodeSet.size()];
    int i = 0;
    for (RouteResultsetNode aNodeSet : nodeSet) {
        nodes[i] = aNodeSet;
        if (statement instanceof MySqlInsertStatement && ctx.getTables().size() == 1 && schema.getTables().containsKey(ctx.getTables().get(0))) {
            RuleConfig rule = schema.getTables().get(ctx.getTables().get(0)).getRule();
            if (rule != null && rule.getRuleAlgorithm() instanceof SlotFunction) {
                aNodeSet.setStatement(ParseUtil.changeInsertAddSlot(aNodeSet.getStatement(), aNodeSet.getSlot()));
            }
        }
        i++;
    }
    rrs.setNodes(nodes);
    /**
		 *  subTables="t_order$1-2,t_order3"
		 *目前分表 1.6 开始支持 幵丏 dataNode 在分表条件下只能配置一个,分表条件下不支持join。
		 */
    if (rrs.isDistTable()) {
        return this.routeDisTable(statement, rrs);
    }
    return rrs;
}
Also used : DruidShardingParseInfo(io.mycat.route.parser.druid.DruidShardingParseInfo) RouteCalculateUnit(io.mycat.route.parser.druid.RouteCalculateUnit) SQLStatementParser(com.alibaba.druid.sql.parser.SQLStatementParser) MycatStatementParser(io.mycat.route.parser.druid.MycatStatementParser) MycatSchemaStatVisitor(io.mycat.route.parser.druid.MycatSchemaStatVisitor) SQLSyntaxErrorException(java.sql.SQLSyntaxErrorException) MySqlInsertStatement(com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlInsertStatement) SQLStatement(com.alibaba.druid.sql.ast.SQLStatement) SQLNonTransientException(java.sql.SQLNonTransientException) SQLSyntaxErrorException(java.sql.SQLSyntaxErrorException) SlotFunction(io.mycat.route.function.SlotFunction) DruidParser(io.mycat.route.parser.druid.DruidParser) TreeSet(java.util.TreeSet) RouteResultsetNode(io.mycat.route.RouteResultsetNode) RuleConfig(io.mycat.config.model.rule.RuleConfig) MySqlStatementParser(com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser) RouteResultset(io.mycat.route.RouteResultset)

Example 64 with TreeSet

use of java.util.TreeSet in project sonarqube by SonarSource.

the class AnalysisContextReportPublisher method writeSystemProps.

private void writeSystemProps(BufferedWriter fileWriter) throws IOException {
    fileWriter.write("System properties:\n");
    Properties sysProps = system.properties();
    for (String prop : new TreeSet<>(sysProps.stringPropertyNames())) {
        if (prop.startsWith(SONAR_PROP_PREFIX)) {
            continue;
        }
        fileWriter.append(String.format(KEY_VALUE_FORMAT, prop, sysProps.getProperty(prop))).append('\n');
    }
}
Also used : TreeSet(java.util.TreeSet) Properties(java.util.Properties) CoreProperties(org.sonar.api.CoreProperties)

Example 65 with TreeSet

use of java.util.TreeSet in project android_frameworks_base by ParanoidAndroid.

the class DependencyFinder method findClassesDeps.

/**
     * Finds all dependencies for all classes in keepClasses which are also
     * listed in zipClasses. Returns a map of all the dependencies found.
     */
Map<String, Set<String>> findClassesDeps(Map<String, ClassReader> zipClasses) {
    // The dependencies that we'll collect.
    // It's a map Class name => uses class names.
    Map<String, Set<String>> dependencyMap = new TreeMap<String, Set<String>>();
    DependencyVisitor visitor = getVisitor();
    int count = 0;
    try {
        for (Entry<String, ClassReader> entry : zipClasses.entrySet()) {
            String name = entry.getKey();
            TreeSet<String> set = new TreeSet<String>();
            dependencyMap.put(name, set);
            visitor.setDependencySet(set);
            ClassReader cr = entry.getValue();
            cr.accept(visitor, 0);
            visitor.setDependencySet(null);
            mLog.debugNoln("Visited %d classes\r", ++count);
        }
    } finally {
        mLog.debugNoln("\n");
    }
    return dependencyMap;
}
Also used : Set(java.util.Set) TreeSet(java.util.TreeSet) TreeSet(java.util.TreeSet) ClassReader(org.objectweb.asm.ClassReader) TreeMap(java.util.TreeMap)

Aggregations

TreeSet (java.util.TreeSet)3785 ArrayList (java.util.ArrayList)833 Test (org.junit.Test)544 HashMap (java.util.HashMap)500 HashSet (java.util.HashSet)428 Set (java.util.Set)422 Map (java.util.Map)401 IOException (java.io.IOException)374 File (java.io.File)339 List (java.util.List)320 TreeMap (java.util.TreeMap)229 Iterator (java.util.Iterator)189 SortedSet (java.util.SortedSet)186 LinkedList (java.util.LinkedList)110 LinkedHashSet (java.util.LinkedHashSet)106 Date (java.util.Date)94 Collection (java.util.Collection)90 Comparator (java.util.Comparator)85 Test (org.testng.annotations.Test)81 Text (org.apache.hadoop.io.Text)79