Search in sources :

Example 11 with ShadowDatabaseConfig

use of com.pamirs.pradar.internal.config.ShadowDatabaseConfig in project LinkAgent by shulieTech.

the class ShadowDataSourceSPIManager method refreshAllShadowDatabaseConfigs.

public static Map<String, ShadowDatabaseConfig> refreshAllShadowDatabaseConfigs(Map<String, ShadowDatabaseConfig> datasourceConfigs) {
    Map<String, ShadowDatabaseConfig> newConfig = new HashMap<String, ShadowDatabaseConfig>();
    for (Map.Entry<String, ShadowDatabaseConfig> entry : datasourceConfigs.entrySet()) {
        ShadowDatabaseConfig config = entry.getValue();
        boolean shadowTable = config.getDsType() == 1;
        boolean emptyConfig = StringUtils.isEmpty(config.getShadowUsername()) || StringUtils.isEmpty(config.getShadowPassword());
        if (shadowTable || emptyConfig || !config.getShadowUsername().startsWith("$") && !config.getShadowPassword().startsWith("$")) {
            newConfig.put(entry.getKey(), entry.getValue());
            continue;
        }
        logger.info("start process shadow datasource config :{}", entry.getKey());
        boolean result = refreshShadowDatabaseConfig(config);
        if (result) {
            String key = DbUrlUtils.getKey(config.getUrl(), config.getUsername());
            logger.info("success process shadow datasource config, url:{}, shadow userName{}, shadow password length :{}", config.getShadowUrl(), config.getShadowUsername(), config.getShadowPassword().length());
            newConfig.put(key, config);
        } else {
            logger.error("failed process shadow datasource config, shadow userName:{}", config.getShadowUsername());
        }
    }
    return newConfig;
}
Also used : ShadowDatabaseConfig(com.pamirs.pradar.internal.config.ShadowDatabaseConfig)

Example 12 with ShadowDatabaseConfig

use of com.pamirs.pradar.internal.config.ShadowDatabaseConfig in project LinkAgent by shulieTech.

the class MongoExecuteInterceptor method getParameter0.

@Override
public Object[] getParameter0(Advice advice) throws Throwable {
    if (!Pradar.isClusterTest()) {
        return advice.getParameterArray();
    }
    Object[] args = advice.getParameterArray();
    Integer operationNum = operationNumMap.get(args[0].getClass());
    if (operationNum == null) {
        LOGGER.error("not support operation class is {} ", args[0].getClass().getName());
        throw new PressureMeasureError("[2]mongo not support pressure operation class is " + args[0].getClass().getName());
    }
    List<ServerAddress> serverAddresses = ((MongoClient) advice.getTarget()).getAllAddress();
    ShadowDatabaseConfig shadowDatabaseConfig = getShadowDatabaseConfig(serverAddresses);
    if (operationNum > 7 && shadowDatabaseConfig == null) {
        ErrorReporter.Error error = ErrorReporter.buildError().setErrorType(ErrorTypeEnum.DataSource).setErrorCode("datasource-0005").setMessage("mongo 未配置对应影子表或者影子库").setDetail("mongo 未配置对应影子表或者影子库");
        error.closePradar(ConfigNames.SHADOW_DATABASE_CONFIGS);
        error.report();
        throw new PressureMeasureError("mongo 未配置对应影子表或者影子库");
    }
    if (shadowDatabaseConfig.isShadowDatabase()) {
        return advice.getParameterArray();
    }
    MongoNamespace busMongoNamespace;
    switch(operationNum) {
        case FIND:
            objectFieldMapAdd(FindOperation.class);
            busMongoNamespace = ((FindOperation) args[0]).getNamespace();
            setReadPtMongoNamespace(FindOperation.class, args[0], busMongoNamespace, shadowDatabaseConfig);
            break;
        case COUNT:
            objectFieldMapAdd(CountOperation.class);
            busMongoNamespace = (MongoNamespace) objectFieldMap.get(CountOperation.class).get(args[0]);
            setReadPtMongoNamespace(CountOperation.class, args[0], busMongoNamespace, shadowDatabaseConfig);
            break;
        case DISTINCT:
            objectFieldMapAdd(DistinctOperation.class);
            busMongoNamespace = (MongoNamespace) objectFieldMap.get(DistinctOperation.class).get(args[0]);
            setReadPtMongoNamespace(DistinctOperation.class, args[0], busMongoNamespace, shadowDatabaseConfig);
            break;
        case GROUP:
            objectFieldMapAdd(GroupOperation.class);
            busMongoNamespace = ((GroupOperation) args[0]).getNamespace();
            setReadPtMongoNamespace(GroupOperation.class, args[0], busMongoNamespace, shadowDatabaseConfig);
            break;
        case LIST_INDEXES:
            objectFieldMapAdd(ListIndexesOperation.class);
            busMongoNamespace = (MongoNamespace) objectFieldMap.get(ListIndexesOperation.class).get(args[0]);
            setReadPtMongoNamespace(ListIndexesOperation.class, args[0], busMongoNamespace, shadowDatabaseConfig);
            break;
        case MAP_REDUCE_WITH_INLINE:
            busMongoNamespace = ((MapReduceWithInlineResultsOperation) args[0]).getNamespace();
            setReadPtMongoNamespace(MapReduceWithInlineResultsOperation.class, args[0], busMongoNamespace, shadowDatabaseConfig);
            break;
        case PARALLEL_COLLECTION_SCAN:
            objectFieldMapAdd(ParallelCollectionScanOperation.class);
            busMongoNamespace = (MongoNamespace) objectFieldMap.get(ParallelCollectionScanOperation.class).get(args[0]);
            setReadPtMongoNamespace(ParallelCollectionScanOperation.class, args[0], busMongoNamespace, shadowDatabaseConfig);
            break;
        case MIXED_BULK_WRITE:
            objectFieldMapAdd(MixedBulkWriteOperation.class);
            busMongoNamespace = ((MixedBulkWriteOperation) (args[0])).getNamespace();
            setWritePtMongoNamespace(MixedBulkWriteOperation.class, args[0], busMongoNamespace, shadowDatabaseConfig);
            break;
        case BASE_WRITE:
            objectFieldMapAdd(BaseWriteOperation.class);
            busMongoNamespace = ((BaseWriteOperation) (args[0])).getNamespace();
            setWritePtMongoNamespace(BaseWriteOperation.class, args[0], busMongoNamespace, shadowDatabaseConfig);
            break;
        case FIND_AND_DELETE:
            objectFieldMapAdd(FindAndDeleteOperation.class);
            busMongoNamespace = ((FindAndDeleteOperation) (args[0])).getNamespace();
            setWritePtMongoNamespace(FindAndDeleteOperation.class, args[0], busMongoNamespace, shadowDatabaseConfig);
            break;
        case FIND_AND_REPLACE:
            objectFieldMapAdd(FindAndReplaceOperation.class);
            busMongoNamespace = ((FindAndReplaceOperation) (args[0])).getNamespace();
            setWritePtMongoNamespace(FindAndReplaceOperation.class, args[0], busMongoNamespace, shadowDatabaseConfig);
            break;
        case FIND_AND_UPDATE:
            objectFieldMapAdd(FindAndUpdateOperation.class);
            busMongoNamespace = ((FindAndUpdateOperation) (args[0])).getNamespace();
            setWritePtMongoNamespace(FindAndUpdateOperation.class, args[0], busMongoNamespace, shadowDatabaseConfig);
            break;
        case MAP_REDUCE_TO_COLLECTION:
            objectFieldMapAdd(MapReduceToCollectionOperation.class);
            busMongoNamespace = ((MapReduceToCollectionOperation) (args[0])).getNamespace();
            setWritePtMongoNamespace(MapReduceToCollectionOperation.class, args[0], busMongoNamespace, shadowDatabaseConfig);
            break;
        case INSERT_TO_COLLECTION:
            objectFieldMapAdd(InsertOperation.class);
            busMongoNamespace = ((InsertOperation) (args[0])).getNamespace();
            setWritePtMongoNamespace(InsertOperation.class, args[0], busMongoNamespace, shadowDatabaseConfig);
            break;
        case UPDATE_OPERATION:
            objectFieldMapAdd(UpdateOperation.class);
            busMongoNamespace = ((UpdateOperation) (args[0])).getNamespace();
            setWritePtMongoNamespace(UpdateOperation.class, args[0], busMongoNamespace, shadowDatabaseConfig);
            break;
        case DELETE_OPERATION:
            objectFieldMapAdd(DeleteOperation.class);
            busMongoNamespace = ((DeleteOperation) (args[0])).getNamespace();
            setWritePtMongoNamespace(DeleteOperation.class, args[0], busMongoNamespace, shadowDatabaseConfig);
            break;
        default:
            LOGGER.error("not support operation class is {} ", args[0].getClass().getName());
            throw new PressureMeasureError("[3]mongo not support pressure operation class is " + args[0].getClass().getName());
    }
    return advice.getParameterArray();
}
Also used : ServerAddress(com.mongodb.ServerAddress) ShadowDatabaseConfig(com.pamirs.pradar.internal.config.ShadowDatabaseConfig) MongoNamespace(com.mongodb.MongoNamespace) MongoClient(com.mongodb.MongoClient) ErrorReporter(com.pamirs.pradar.pressurement.agent.shared.service.ErrorReporter) PressureMeasureError(com.pamirs.pradar.exception.PressureMeasureError)

Example 13 with ShadowDatabaseConfig

use of com.pamirs.pradar.internal.config.ShadowDatabaseConfig in project LinkAgent by shulieTech.

the class SyncDelegateOperationExecutorInterceptor method getParameter0.

@Override
public Object[] getParameter0(Advice advice) throws Throwable {
    if (!Pradar.isClusterTest()) {
        return advice.getParameterArray();
    }
    Object[] args = advice.getParameterArray();
    Integer operationNum = operationNumMap.get(args[0].getClass().getSimpleName());
    if (operationNum == null) {
        LOGGER.error("not support operation class is {} ", args[0].getClass().getName());
        throw new PressureMeasureError("[4]mongo not support pressure operation class is " + args[0].getClass().getName());
    }
    if (mongoClientDelegate == null) {
        Field field = null;
        try {
            field = advice.getTarget().getClass().getDeclaredField("this$0");
            field.setAccessible(true);
            mongoClientDelegate = (MongoClientDelegate) field.get(advice.getTarget());
        } catch (Throwable e) {
            LOGGER.error("DelegateOperationExecutorInterceptor error {}", e);
        } finally {
            if (field != null) {
                field.setAccessible(false);
            }
        }
    }
    ClusterSettings clusterSettings = (ClusterSettings) ReflectionUtils.getFieldValue(ReflectionUtils.getFieldValue(mongoClientDelegate, "cluster"), "settings");
    List<ServerAddress> serverAddresses = clusterSettings.getHosts();
    ShadowDatabaseConfig shadowDatabaseConfig = null;
    for (ServerAddress serverAddress : serverAddresses) {
        shadowDatabaseConfig = GlobalConfig.getInstance().getShadowDatabaseConfig(serverAddress.toString());
        if (shadowDatabaseConfig != null) {
            break;
        }
    }
    final Field field = objectFieldMap.get(args[0].getClass());
    if (field == null) {
        final Field namespace = ReflectionUtils.getDeclaredField(args[0], "namespace");
        namespace.setAccessible(Boolean.TRUE);
        objectFieldMap.put(args[0].getClass(), namespace);
    }
    MongoNamespace busMongoNamespace = (MongoNamespace) objectFieldMap.get(args[0].getClass()).get(args[0]);
    switch(operationNum) {
        case 1:
            setReadPtMongoNamespace(args[0], busMongoNamespace, shadowDatabaseConfig);
            break;
        case 2:
            setWritePtMongoNamespace(args[0], busMongoNamespace, shadowDatabaseConfig);
            break;
        default:
            LOGGER.error("not support operation class is {} ", args[0].getClass().getName());
            throw new PressureMeasureError("[5]mongo not support pressure operation class is " + args[0].getClass().getName());
    }
    return advice.getParameterArray();
}
Also used : Field(java.lang.reflect.Field) ClusterSettings(com.mongodb.connection.ClusterSettings) PressureMeasureError(com.pamirs.pradar.exception.PressureMeasureError) ServerAddress(com.mongodb.ServerAddress) ShadowDatabaseConfig(com.pamirs.pradar.internal.config.ShadowDatabaseConfig) MongoNamespace(com.mongodb.MongoNamespace)

Example 14 with ShadowDatabaseConfig

use of com.pamirs.pradar.internal.config.ShadowDatabaseConfig in project LinkAgent by shulieTech.

the class AbstractDBCollectionInterceptor method getPtCollection.

protected DBCollection getPtCollection(DBCollection bizDbCollection, Advice advice) throws Throwable {
    String busCollectionName = getCollectionName(bizDbCollection);
    if ("$cmd".equals(busCollectionName)) {
        return null;
    }
    if (Pradar.isClusterTestPrefix(busCollectionName) || Pradar.isClusterTestPrefix(bizDbCollection.getDB().getName())) {
        return null;
    }
    if (StringUtils.isBlank(busCollectionName)) {
        throw new PressureMeasureError("mongo压测请求获取业务collection异常");
    }
    DBCollection ptCollection = collectionMapping.get(busCollectionName);
    if (ptCollection == null) {
        ptCollection = collectionMapping.get(busCollectionName);
        synchronized (lock) {
            if (ptCollection == null) {
                ShadowDatabaseConfig shadowDatabaseConfig = getShadowDatabaseConfig(bizDbCollection);
                if (shadowDatabaseConfig == null) {
                    if (isRead()) {
                        // 读操作,未配置影子表,直接读取业务表
                        return null;
                    } else {
                        ErrorReporter.buildError().setErrorType(ErrorTypeEnum.DataSource).setErrorCode("datasource-0002").setMessage("mongodb影子库/表未配置!").setDetail("业务库配置:::url: " + bizDbCollection.getDB().getMongo().getAddress().toString()).report();
                        throw new PressureMeasureError("mongodb影子库/表未配置");
                    }
                }
                if (shadowDatabaseConfig.isShadowDatabase()) {
                    ptCollection = doShadowDatabase(bizDbCollection, busCollectionName, shadowDatabaseConfig, advice);
                } else {
                    ptCollection = doShadowTable(bizDbCollection, busCollectionName, shadowDatabaseConfig);
                }
                if (ptCollection != null) {
                    ptCollection.setWriteConcern(bizDbCollection.getWriteConcern());
                    ptCollection.setDBDecoderFactory(bizDbCollection.getDBDecoderFactory());
                    ptCollection.setDBEncoderFactory(bizDbCollection.getDBEncoderFactory());
                    ptCollection.setObjectClass(bizDbCollection.getObjectClass());
                    ptCollection.setReadPreference(bizDbCollection.getReadPreference());
                    ptCollection.setOptions(bizDbCollection.getOptions());
                    collectionMapping.put(busCollectionName, ptCollection);
                }
            }
        }
    }
    return ptCollection;
}
Also used : PressureMeasureError(com.pamirs.pradar.exception.PressureMeasureError) ShadowDatabaseConfig(com.pamirs.pradar.internal.config.ShadowDatabaseConfig)

Example 15 with ShadowDatabaseConfig

use of com.pamirs.pradar.internal.config.ShadowDatabaseConfig in project LinkAgent by shulieTech.

the class SqlParser method parseAndReplaceSchema.

/**
 * 替换 schema
 *
 * @param sql
 * @param key
 * @param dbTypeName
 * @return
 * @throws SQLException
 */
public static String parseAndReplaceSchema(String sql, String key, String dbTypeName) throws SQLException {
    sql = sql.replaceAll("<  >", "<>");
    ShadowDatabaseConfig config = GlobalConfig.getInstance().getShadowDatabaseConfig(key);
    if (config == null) {
        return sql;
    }
    DbType dbType = DbType.of(dbTypeName);
    // new MySQL Parser
    SQLStatementParser parser = SQLParserUtils.createSQLStatementParser(sql, dbTypeName);
    if (parser == null) {
        if (GlobalConfig.getInstance().getWrongSqlDetail().size() < 10) {
            GlobalConfig.getInstance().addWrongSqlDetail(StringUtils.substring("dbType not support" + key + " dbType" + dbTypeName + " sql" + sql, 0, 1995));
        }
        throw new SQLException("dbType not support" + key + " dbType" + dbTypeName + " sql" + sql);
    }
    // 使用Parser解析生成AST,这里SQLStatement就是AST
    final StringWriter val = new StringWriter();
    try {
        final List<SQLStatement> sqlStatements = parser.parseStatementList();
        for (final SQLStatement sqlStatement : sqlStatements) {
            SQLASTOutputVisitor visitor = SQLUtils.createOutputVisitor(val, dbType);
            SchemaStatVisitor visitor2 = SQLUtils.createSchemaStatVisitor(dbType);
            sqlStatement.accept(visitor2);
            final Map<TableStat.Name, TableStat> map2 = visitor2.getTables();
            Map<String, String> map = new HashMap<String, String>();
            for (Map.Entry<TableStat.Name, TableStat> entry : map2.entrySet()) {
                String fullTable = StringUtils.replace(entry.getKey().getName(), "\"", "");
                /**
                 * 过滤掉函数
                 */
                if (StringUtils.indexOf(fullTable, '(') != -1 && StringUtils.indexOf(fullTable, ')') != -1) {
                    continue;
                }
                String table = null, schema = null;
                final int indexOfDot = StringUtils.indexOf(fullTable, '.');
                if (indexOfDot == -1) {
                    schema = null;
                    table = fullTable;
                } else {
                    schema = StringUtils.substring(fullTable, 0, indexOfDot);
                    table = StringUtils.substring(fullTable, indexOfDot + 1);
                }
                String shadowSchema = toShadowSchema(schema, config);
                String shadowTable = toShadowTable(table, config);
                if (StringUtils.isBlank(shadowSchema)) {
                    map.put(fullTable, shadowTable);
                } else {
                    map.put(fullTable, shadowSchema + '.' + shadowTable);
                    map.put(schema, shadowSchema);
                }
            }
            visitor.setTableMapping(map);
            sqlStatement.accept(visitor);
        }
    } catch (Throwable e) {
        if (GlobalConfig.getInstance().getWrongSqlDetail().size() < 10) {
            GlobalConfig.getInstance().addWrongSqlDetail(StringUtils.substring(("Exception:" + e + " sql" + sql), 0, 1995));
        }
        throw new SQLException("Wrong sql:" + sql, e);
    }
    return val.toString();
}
Also used : SQLStatementParser(com.shulie.druid.sql.parser.SQLStatementParser) SQLException(java.sql.SQLException) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) TableStat(com.shulie.druid.stat.TableStat) ShadowDatabaseConfig(com.pamirs.pradar.internal.config.ShadowDatabaseConfig) SQLStatement(com.shulie.druid.sql.ast.SQLStatement) DbType(com.shulie.druid.DbType) StringWriter(java.io.StringWriter) SQLASTOutputVisitor(com.shulie.druid.sql.visitor.SQLASTOutputVisitor) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) SchemaStatVisitor(com.shulie.druid.sql.visitor.SchemaStatVisitor)

Aggregations

ShadowDatabaseConfig (com.pamirs.pradar.internal.config.ShadowDatabaseConfig)39 PressureMeasureError (com.pamirs.pradar.exception.PressureMeasureError)8 ServerAddress (com.mongodb.ServerAddress)6 Map (java.util.Map)4 MongoNamespace (com.mongodb.MongoNamespace)3 ClusterSettings (com.mongodb.connection.ClusterSettings)3 ErrorReporter (com.pamirs.pradar.pressurement.agent.shared.service.ErrorReporter)3 Properties (java.util.Properties)3 DruidDataSource (com.alibaba.druid.pool.DruidDataSource)2 AtomikosNonXADataSourceBean (com.atomikos.jdbc.nonxa.AtomikosNonXADataSourceBean)2 ComboPooledDataSource (com.mchange.v2.c3p0.ComboPooledDataSource)2 MongoClient (com.mongodb.MongoClient)2 Cluster (com.mongodb.internal.connection.Cluster)2 ShadowDataSourceConfigModifyEvent (com.pamirs.pradar.pressurement.agent.event.impl.ShadowDataSourceConfigModifyEvent)2 HikariDataSource (com.zaxxer.hikari.HikariDataSource)2 SQLException (java.sql.SQLException)2 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2 BasicDataSource (org.apache.commons.dbcp.BasicDataSource)2 BasicDataSource (org.apache.commons.dbcp2.BasicDataSource)2 DataSource (org.apache.tomcat.jdbc.pool.DataSource)2