use of io.mycat.config.ClusterConfig in project Mycat2 by MyCATApache.
the class StatisticCenterTest method init.
@BeforeClass
public static void init() throws Exception {
HashMap<Class, Object> context = new HashMap<>();
context.put(Vertx.class, Vertx.vertx());
context.put(ServerConfig.class, new ServerConfig());
context.put(DrdsSqlCompiler.class, new DrdsSqlCompiler(new DrdsConst() {
@Override
public NameMap<SchemaHandler> schemas() {
return new NameMap<>();
}
}));
MetaClusterCurrent.register(context);
String customerDatasourceProvider = DruidDatasourceProvider.class.getName();
DatasourceConfig datasourceConfig = new DatasourceConfig();
datasourceConfig.setDbType("mysql");
datasourceConfig.setUser("root");
datasourceConfig.setPassword("123456");
datasourceConfig.setName("prototypeDs");
datasourceConfig.setUrl("jdbc:mysql://localhost:3306/mysql");
Map<String, DatasourceConfig> datasources = Maps.of("prototypeDs", datasourceConfig);
ClusterConfig clusterConfig = new ClusterConfig();
clusterConfig.setName("prototype");
clusterConfig.setMasters(Arrays.asList("prototypeDs"));
Map<String, ClusterConfig> clusterConfigs = Maps.of("prototype", clusterConfig);
LinkedList<Runnable> runnables = new LinkedList<>();
ReplicaSelectorManager manager = ReplicaSelectorRuntime.create(new ArrayList<>(clusterConfigs.values()), datasources, new LoadBalanceManager(), name -> 0, (command, initialDelay, period, unit) -> {
runnables.add(command);
return () -> {
};
});
context.put(ReplicaSelectorManager.class, manager);
context.put(JdbcConnectionManager.class, jdbcManager = new JdbcConnectionManager(DruidDatasourceProvider.class.getName(), datasources));
MetaClusterCurrent.register(context);
statisticCenter.init();
}
use of io.mycat.config.ClusterConfig in project Mycat2 by MyCATApache.
the class SpmTest method init.
@BeforeClass
public static void init() throws Exception {
HashMap<Class, Object> context = new HashMap<>();
context.put(Vertx.class, Vertx.vertx());
context.put(ServerConfig.class, new ServerConfig());
context.put(IOExecutor.class, IOExecutor.DEFAULT);
context.put(DrdsSqlCompiler.class, new DrdsSqlCompiler(new DrdsConst() {
@Override
public NameMap<SchemaHandler> schemas() {
return new NameMap<>();
}
}));
MetaClusterCurrent.register(context);
String customerDatasourceProvider = DruidDatasourceProvider.class.getName();
DatasourceConfig datasourceConfig = new DatasourceConfig();
datasourceConfig.setDbType("mysql");
datasourceConfig.setUser("root");
datasourceConfig.setPassword("123456");
datasourceConfig.setName("prototypeDs");
datasourceConfig.setUrl("jdbc:mysql://localhost:3306/mysql");
Map<String, DatasourceConfig> datasources = Maps.of("prototypeDs", datasourceConfig);
ClusterConfig clusterConfig = new ClusterConfig();
clusterConfig.setName("prototype");
clusterConfig.setMasters(Arrays.asList("prototypeDs"));
Map<String, ClusterConfig> clusterConfigs = Maps.of("prototype", clusterConfig);
LinkedList<Runnable> runnables = new LinkedList<>();
ReplicaSelectorManager manager = ReplicaSelectorRuntime.create(new ArrayList<>(clusterConfigs.values()), datasources, new LoadBalanceManager(), name -> 0, (command, initialDelay, period, unit) -> {
runnables.add(command);
return () -> {
};
});
context.put(ReplicaSelectorManager.class, manager);
context.put(JdbcConnectionManager.class, jdbcManager = new JdbcConnectionManager(DruidDatasourceProvider.class.getName(), datasources));
MetaClusterCurrent.register(context);
}
use of io.mycat.config.ClusterConfig in project Mycat2 by MyCATApache.
the class RWEntry method snapshot.
public static RWEntryMap snapshot() {
if ((!MetaClusterCurrent.exist(ReplicaSelectorManager.class)) || (!MetaClusterCurrent.exist(MycatRouterConfig.class))) {
return new RWEntryMap();
}
ReplicaSelectorManager replicaSelectorManager = MetaClusterCurrent.wrapper(ReplicaSelectorManager.class);
MycatRouterConfig routerConfig = MetaClusterCurrent.wrapper(MycatRouterConfig.class);
Map<String, ClusterConfig> clusterConfigMap = routerConfig.getClusters().stream().collect(Collectors.toMap(k -> k.getName(), v -> v));
Map<String, ReplicaSelector> replicaMap = replicaSelectorManager.getReplicaMap();
Map<String, RWEntry> rwEntryMap = new HashMap<>();
for (Map.Entry<String, Entry> entry : map.entrySet()) {
String name = entry.getKey();
Entry value = entry.getValue();
// //////////////////////////////////////////////////////////////
boolean status = false;
ReplicaSelector replicaSelector = replicaMap.get(name);
if (replicaSelector != null) {
ClusterConfig clusterConfig = clusterConfigMap.get(replicaSelector.getName());
List<String> dsNames = (List) ImmutableList.builder().addAll(clusterConfig.getMasters()).addAll(clusterConfig.getReplicas()).build().stream().distinct().collect(Collectors.toList());
int i = 0;
for (; i < dsNames.size(); i++) {
String dsName = dsNames.get(i);
PhysicsInstance physicsInstance = replicaSelector.getRawDataSourceMap().get(dsName);
if (physicsInstance == null) {
break;
} else {
if (!physicsInstance.isAlive()) {
break;
}
}
}
status = i == dsNames.size();
} else {
status = false;
}
rwEntryMap.put(name, new RWEntry(value.MASTER.get(), value.SLAVE.get(), status));
}
RWEntryMap res = new RWEntryMap();
res.rwMap = rwEntryMap;
return res;
}
use of io.mycat.config.ClusterConfig in project Mycat2 by MyCATApache.
the class CreateTableSQLHandler method createTable.
public synchronized void createTable(Map hint, String schemaName, String tableName, MySqlCreateTableStatement createTableSql) throws Exception {
if (createTableSql == null && hint != null) {
Object sql = hint.get("createTableSql");
if (sql instanceof MySqlCreateTableStatement) {
createTableSql = (MySqlCreateTableStatement) sql;
} else {
createTableSql = (MySqlCreateTableStatement) SQLUtils.parseSingleMysqlStatement(Objects.toString(sql));
}
}
Objects.requireNonNull(createTableSql);
try (MycatRouterConfigOps ops = ConfigUpdater.getOps()) {
if (schemaName == null || tableName == null) {
MySqlCreateTableStatement ast = (MySqlCreateTableStatement) createTableSql;
schemaName = SQLUtils.normalize(ast.getSchema());
tableName = SQLUtils.normalize(ast.getTableName());
}
if (hint == null || (hint != null && hint.isEmpty())) {
if (createTableSql.isBroadCast()) {
ops.putGlobalTable(schemaName, tableName, createTableSql);
} else if (createTableSql.getDbPartitionBy() == null && createTableSql.getTablePartitionBy() == null) {
ops.putNormalTable(schemaName, tableName, createTableSql);
} else {
MetadataManager metadataManager = MetaClusterCurrent.wrapper(MetadataManager.class);
int defaultStoreNodeNum = metadataManager.getDefaultStoreNodeNum();
if (defaultStoreNodeNum == 0) {
ops.getOriginal().getClusters().stream().filter(i -> "prototype".equals(i.getName())).findFirst().ifPresent(clusterConfig -> {
ClusterConfig config = JsonUtil.from(JsonUtil.toJson(clusterConfig), ClusterConfig.class);
ops.putReplica(config);
});
}
ops.putHashTable(schemaName, tableName, createTableSql, getAutoHashProperties(createTableSql));
}
} else {
Map<String, Object> infos = hint;
switch(Objects.requireNonNull(infos.get("type")).toString()) {
case "normal":
{
String targetName = (String) infos.get("targetName");
ops.putNormalTable(schemaName, tableName, createTableSql, targetName);
break;
}
case "global":
{
ops.putGlobalTable(schemaName, tableName, createTableSql);
break;
}
case "range":
{
ops.putRangeTable(schemaName, tableName, createTableSql, infos);
break;
}
case "hash":
{
ops.putHashTable(schemaName, tableName, createTableSql, infos);
break;
}
}
}
ops.commit();
MetadataManager metadataManager = MetaClusterCurrent.wrapper(MetadataManager.class);
TableHandler table = metadataManager.getTable(schemaName, tableName);
if (table == null) {
throw new MycatException("create table fail:" + schemaName + "." + tableName);
}
}
}
use of io.mycat.config.ClusterConfig in project Mycat2 by MyCATApache.
the class StdStorageManagerImpl method getPrototypeDatasourceConfig.
@Nullable
public static Optional<DatasourceConfig> getPrototypeDatasourceConfig(StorageManager fileStorageManager) {
KV<ClusterConfig> clusterConfigKV = fileStorageManager.get(ClusterConfig.class);
KV<DatasourceConfig> datasourceConfigKV = fileStorageManager.get(DatasourceConfig.class);
Optional<ClusterConfig> prototypeOptional = clusterConfigKV.get("prototype");
Optional<DatasourceConfig> datasourceConfigOptional = prototypeOptional.flatMap(clusterConfig -> {
List<String> masters = Optional.ofNullable(clusterConfig.getMasters()).orElse(Collections.emptyList());
List<String> replicas = Optional.ofNullable(clusterConfig.getReplicas()).orElse(Collections.emptyList());
List<String> strings = new ArrayList<>();
strings.addAll(masters);
strings.addAll(replicas);
return strings.stream().map(i -> datasourceConfigKV.get(i)).filter(i -> i != null).findFirst();
}).orElse(Optional.ofNullable(datasourceConfigKV.get("prototype")).orElse(datasourceConfigKV.get("prototypeDs")));
DatasourceConfig configPrototypeDs = datasourceConfigOptional.orElse(null);
if (configPrototypeDs == null) {
List<DatasourceConfig> values = datasourceConfigKV.values();
if (values.isEmpty()) {
// 不开启db
} else {
configPrototypeDs = values.get(0);
}
}
return Optional.ofNullable(configPrototypeDs);
}
Aggregations