use of com.jolbox.bonecp.BoneCPDataSource in project hive by apache.
the class TxnHandler method setupJdbcConnectionPool.
private static synchronized void setupJdbcConnectionPool(HiveConf conf) throws SQLException {
if (connPool != null)
return;
String driverUrl = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORECONNECTURLKEY);
String user = getMetastoreJdbcUser(conf);
String passwd = getMetastoreJdbcPasswd(conf);
String connectionPooler = conf.getVar(HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE).toLowerCase();
if ("bonecp".equals(connectionPooler)) {
BoneCPConfig config = new BoneCPConfig();
config.setJdbcUrl(driverUrl);
//if we are waiting for connection for 60s, something is really wrong
//better raise an error than hang forever
config.setConnectionTimeoutInMs(60000);
config.setMaxConnectionsPerPartition(10);
config.setPartitionCount(1);
config.setUser(user);
config.setPassword(passwd);
connPool = new BoneCPDataSource(config);
// Enable retries to work around BONECP bug.
doRetryOnConnPool = true;
} else if ("dbcp".equals(connectionPooler)) {
ObjectPool objectPool = new GenericObjectPool();
ConnectionFactory connFactory = new DriverManagerConnectionFactory(driverUrl, user, passwd);
// This doesn't get used, but it's still necessary, see
// http://svn.apache.org/viewvc/commons/proper/dbcp/branches/DBCP_1_4_x_BRANCH/doc/ManualPoolingDataSourceExample.java?view=markup
PoolableConnectionFactory poolConnFactory = new PoolableConnectionFactory(connFactory, objectPool, null, null, false, true);
connPool = new PoolingDataSource(objectPool);
} else if ("hikaricp".equals(connectionPooler)) {
HikariConfig config = new HikariConfig();
config.setJdbcUrl(driverUrl);
config.setUsername(user);
config.setPassword(passwd);
connPool = new HikariDataSource(config);
} else if ("none".equals(connectionPooler)) {
LOG.info("Choosing not to pool JDBC connections");
connPool = new NoPoolConnectionPool(conf);
} else {
throw new RuntimeException("Unknown JDBC connection pooling " + connectionPooler);
}
}
use of com.jolbox.bonecp.BoneCPDataSource in project druid by alibaba.
the class Case2 method f_test_2.
// 当testOnBorrow为true时,BoneCP的处理策略不一样,所以略过
public void f_test_2() throws Exception {
BoneCPDataSource dataSource = new BoneCPDataSource();
// dataSource.(10);
// dataSource.setMaxActive(50);
dataSource.setMinConnectionsPerPartition(minPoolSize);
dataSource.setMaxConnectionsPerPartition(maxPoolSize);
dataSource.setDriverClass(driverClass);
dataSource.setJdbcUrl(jdbcUrl);
dataSource.setStatementsCacheSize(100);
// dataSource.setMaxOpenPreparedStatements(100);
dataSource.setUsername(user);
dataSource.setPassword(password);
dataSource.setConnectionTestStatement("SELECT 1");
dataSource.setPartitionCount(1);
dataSource.setAcquireIncrement(5);
dataSource.setIdleConnectionTestPeriod(0L);
for (int i = 0; i < executeCount; ++i) {
p0(dataSource, "boneCP", threadCount);
}
System.out.println();
}
use of com.jolbox.bonecp.BoneCPDataSource in project druid by alibaba.
the class TestPSCache method test_boneCP.
public void test_boneCP() throws Exception {
BoneCPDataSource ds = new BoneCPDataSource();
ds.setJdbcUrl("jdbc:mock:test");
ds.setPartitionCount(1);
ds.setMaxConnectionsPerPartition(10);
ds.setMinConnectionsPerPartition(0);
ds.setPreparedStatementsCacheSize(10);
for (int i = 0; i < 10; ++i) {
f(ds, 5);
System.out.println("--------------------------------------------");
}
}
use of com.jolbox.bonecp.BoneCPDataSource in project voltdb by VoltDB.
the class JDBCBenchmark method main.
// Application entry point
public static void main(String[] args) {
try {
KVConfig config = new KVConfig();
config.parse(JDBCBenchmark.class.getName(), args);
System.out.println(config.getConfigDumpString());
// ---------------------------------------------------------------------------------------------------------------------------------------------------
// We need only do this once, to "hot cache" the JDBC driver reference so the JVM may realize it's there.
Class.forName(DRIVER_NAME);
// Prepare the JDBC URL for the VoltDB driver
String url = "jdbc:voltdb://" + config.servers;
// Prepare the Datasource if choose to use a connection pool
if (config.externalConnectionPool.equalsIgnoreCase(C3P0_CONNECTIONPOOL)) {
useConnectionPool = true;
ComboPooledDataSource cpds = new ComboPooledDataSource();
//loads the jdbc driver
cpds.setDriverClass(DRIVER_NAME);
cpds.setJdbcUrl(url);
Ds = cpds;
} else if (config.externalConnectionPool.equalsIgnoreCase(TOMCAT_CONNECTIONPOOL)) {
useConnectionPool = true;
// read the config file for connection pool
String configName = "tomcat.properties";
boolean useDefaultConnectionPoolConfig = true;
Properties cpProperties = new Properties();
try {
FileInputStream fileInput = new FileInputStream(new File(configName));
cpProperties.load(fileInput);
fileInput.close();
useDefaultConnectionPoolConfig = false;
} catch (FileNotFoundException e) {
System.out.println("connection pool property file '" + configName + "' not found, use default settings");
}
PoolProperties p = new PoolProperties();
p.setUrl(url);
p.setDriverClassName(DRIVER_NAME);
if (useDefaultConnectionPoolConfig) {
p.setInitialSize(config.threads + 1);
} else {
p.setInitialSize(Integer.parseInt(cpProperties.getProperty("tomcat.initialSize", "40")));
}
org.apache.tomcat.jdbc.pool.DataSource tomcatDs = new org.apache.tomcat.jdbc.pool.DataSource();
tomcatDs.setPoolProperties(p);
Ds = tomcatDs;
} else if (config.externalConnectionPool.equalsIgnoreCase(BONECP_CONNECTIONPOOL)) {
useConnectionPool = true;
String configName = "bonecp.properties";
boolean useDefaultConnectionPoolConfig = true;
Properties cpProperties = new Properties();
try {
FileInputStream fileInput = new FileInputStream(new File(configName));
cpProperties.load(fileInput);
fileInput.close();
useDefaultConnectionPoolConfig = false;
} catch (FileNotFoundException e) {
System.out.println("connection pool property file '" + configName + "' not found, use default settings");
}
BoneCPConfig p;
if (useDefaultConnectionPoolConfig) {
p = new BoneCPConfig();
p.setDefaultReadOnly(false);
p.setPartitionCount(config.threads / 2);
p.setMaxConnectionsPerPartition(4);
} else {
p = new BoneCPConfig(cpProperties);
}
// set the JDBC url
p.setJdbcUrl(url + "?enableSetReadOnly=true");
BoneCPDataSource boneDs = new BoneCPDataSource(p);
Ds = boneDs;
} else if (config.externalConnectionPool.equalsIgnoreCase(HIKARI_CONNECTIONPOOL)) {
useConnectionPool = true;
HikariConfig p = new HikariConfig("hikari.properties");
p.setDriverClassName(DRIVER_NAME);
p.setJdbcUrl(url);
HikariDataSource hiDs = new HikariDataSource(p);
Ds = hiDs;
} else {
useConnectionPool = false;
Ds = null;
}
// Get a client connection - we retry for a while in case the server hasn't started yet
System.out.printf("Connecting to: %s\n", url);
int sleep = 1000;
while (true) {
try {
if (useConnectionPool) {
Ds.getConnection();
System.out.printf("Using Connection Pool: %s\n", config.externalConnectionPool);
}
Con = DriverManager.getConnection(url, "", "");
break;
} catch (Exception e) {
System.err.printf("Connection failed - retrying in %d second(s).\n " + e, sleep / 1000);
try {
Thread.sleep(sleep);
} catch (Exception tie) {
}
if (sleep < 8000)
sleep += sleep;
}
}
// Statistics manager objects from the connection, used to generate latency histogram
ClientStatsContext fullStatsContext = ((IVoltDBConnection) Con).createStatsContext();
periodicStatsContext = ((IVoltDBConnection) Con).createStatsContext();
System.out.println("Connected. Starting benchmark.");
// Get a payload generator to create random Key-Value pairs to store in the database and process (uncompress) pairs retrieved from the database.
final PayloadProcessor processor = new PayloadProcessor(config.keysize, config.minvaluesize, config.maxvaluesize, config.entropy, config.poolsize, config.usecompression);
// Initialize the store
if (config.preload) {
System.out.print("Initializing data store... ");
final PreparedStatement removeCS = Con.prepareStatement("DELETE FROM store;");
final CallableStatement putCS = Con.prepareCall("{call STORE.upsert(?,?)}");
for (int i = 0; i < config.poolsize; i++) {
if (i == 0) {
removeCS.execute();
}
putCS.setString(1, String.format(processor.KeyFormat, i));
putCS.setBytes(2, processor.generateForStore().getStoreValue());
putCS.execute();
}
System.out.println(" Done.");
}
// start the stats
fullStatsContext.fetchAndResetBaseline();
periodicStatsContext.fetchAndResetBaseline();
benchmarkStartTS = System.currentTimeMillis();
// ---------------------------------------------------------------------------------------------------------------------------------------------------
// Create a Timer task to display performance data on the operating procedures
Timer timer = new Timer();
TimerTask statsPrinting = new TimerTask() {
@Override
public void run() {
printStatistics();
}
};
timer.scheduleAtFixedRate(statsPrinting, config.displayinterval * 1000l, config.displayinterval * 1000l);
// ---------------------------------------------------------------------------------------------------------------------------------------------------
// Create multiple processing threads
ArrayList<Thread> threads = new ArrayList<Thread>();
for (int i = 0; i < config.threads; i++) threads.add(new Thread(new ClientThread(url, processor, config.duration, config.getputratio)));
// Start threads
for (Thread thread : threads) thread.start();
// Wait for threads to complete
for (Thread thread : threads) thread.join();
// ---------------------------------------------------------------------------------------------------------------------------------------------------
// We're done - stop the performance statistics display task
timer.cancel();
// ---------------------------------------------------------------------------------------------------------------------------------------------------
// Now print application results:
// stop and fetch the stats
ClientStats stats = fullStatsContext.fetch().getStats();
// 1. Store statistics as tracked by the application (ops counts, payload traffic)
System.out.printf("\n-------------------------------------------------------------------------------------\n" + " Store Results\n" + "-------------------------------------------------------------------------------------\n\n" + "A total of %,d operations was posted...\n" + " - GETs: %,9d Operations (%,9d Misses/Failures)\n" + " %,9d MB in compressed store data\n" + " %,9d MB in uncompressed application data\n" + " Network Throughput: %6.3f Gbps*\n\n" + " - PUTs: %,9d Operations (%,9d Failures)\n" + " %,9d MB in compressed store data\n" + " %,9d MB in uncompressed application data\n" + " Network Throughput: %6.3f Gbps*\n\n" + " - Total Network Throughput: %6.3f Gbps*\n\n" + "* Figure includes key & value traffic but not database protocol overhead.\n" + "\n" + "-------------------------------------------------------------------------------------\n", GetStoreResults.get(0) + GetStoreResults.get(1) + PutStoreResults.get(0) + PutStoreResults.get(1), GetStoreResults.get(0), GetStoreResults.get(1), GetCompressionResults.get(0) / 1048576l, GetCompressionResults.get(1) / 1048576l, ((double) GetCompressionResults.get(0) + (GetStoreResults.get(0) + GetStoreResults.get(1)) * config.keysize) / (134217728d * config.duration), PutStoreResults.get(0), PutStoreResults.get(1), PutCompressionResults.get(0) / 1048576l, PutCompressionResults.get(1) / 1048576l, ((double) PutCompressionResults.get(0) + (PutStoreResults.get(0) + PutStoreResults.get(1)) * config.keysize) / (134217728d * config.duration), ((double) GetCompressionResults.get(0) + (GetStoreResults.get(0) + GetStoreResults.get(1)) * config.keysize) / (134217728d * config.duration) + ((double) PutCompressionResults.get(0) + (PutStoreResults.get(0) + PutStoreResults.get(1)) * config.keysize) / (134217728d * config.duration));
System.out.println("\n\n-------------------------------------------------------------------------------------\n" + " Client Latency Statistics\n" + "-------------------------------------------------------------------------------------\n\n");
System.out.printf("Average latency: %,9.2f ms\n", stats.getAverageLatency());
System.out.printf("10th percentile latency: %,9.2f ms\n", stats.kPercentileLatencyAsDouble(.1));
System.out.printf("25th percentile latency: %,9.2f ms\n", stats.kPercentileLatencyAsDouble(.25));
System.out.printf("50th percentile latency: %,9.2f ms\n", stats.kPercentileLatencyAsDouble(.5));
System.out.printf("75th percentile latency: %,9.2f ms\n", stats.kPercentileLatencyAsDouble(.75));
System.out.printf("90th percentile latency: %,9.2f ms\n", stats.kPercentileLatencyAsDouble(.9));
System.out.printf("95th percentile latency: %,9.2f ms\n", stats.kPercentileLatencyAsDouble(.95));
System.out.printf("99th percentile latency: %,9.2f ms\n", stats.kPercentileLatencyAsDouble(.99));
System.out.printf("99.5th percentile latency: %,9.2f ms\n", stats.kPercentileLatencyAsDouble(.995));
System.out.printf("99.9th percentile latency: %,9.2f ms\n", stats.kPercentileLatencyAsDouble(.999));
System.out.println("\n\n" + stats.latencyHistoReport());
// Dump statistics to a CSV file
Con.unwrap(IVoltDBConnection.class).saveStatistics(stats, config.statsfile);
Con.close();
// ---------------------------------------------------------------------------------------------------------------------------------------------------
} catch (Exception x) {
System.out.println("Exception: " + x);
x.printStackTrace();
}
}
use of com.jolbox.bonecp.BoneCPDataSource in project tomee by apache.
the class BoneCPDataSourceCreator method pool.
@Override
public DataSource pool(final String name, final DataSource ds, final Properties properties) {
final BoneCPDataSource dataSourceProvidedPool = createPool(properties);
dataSourceProvidedPool.setDatasourceBean(ds);
if (dataSourceProvidedPool.getPoolName() == null) {
dataSourceProvidedPool.setPoolName(name);
}
return dataSourceProvidedPool;
}
Aggregations