use of org.apache.phoenix.util.ReadOnlyProps in project phoenix by apache.
the class LocalIndexSplitMergeIT method doSetup.
@BeforeClass
public static void doSetup() throws Exception {
Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(2);
serverProps.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(2);
clientProps.put(QueryServices.TRANSACTIONS_ENABLED, Boolean.TRUE.toString());
clientProps.put(QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, Boolean.TRUE.toString());
setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet().iterator()));
}
use of org.apache.phoenix.util.ReadOnlyProps in project phoenix by apache.
the class UpdateCacheAcrossDifferentClientsIT method doSetup.
@BeforeClass
public static void doSetup() throws Exception {
Map<String, String> props = Maps.newConcurrentMap();
props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.TRUE.toString());
props.put(QueryServices.MUTATE_BATCH_SIZE_ATTRIB, Integer.toString(3000));
// When we run all tests together we are using global cluster(driver)
// so to make drop work we need to re register driver with DROP_METADATA_ATTRIB property
destroyDriver();
setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
// Registering real Phoenix driver to have multiple ConnectionQueryServices created across connections
// so that metadata changes doesn't get propagated across connections
DriverManager.registerDriver(PhoenixDriver.INSTANCE);
}
use of org.apache.phoenix.util.ReadOnlyProps in project phoenix by apache.
the class UserDefinedFunctionsIT method doSetup.
@BeforeClass
public static void doSetup() throws Exception {
Configuration conf = HBaseConfiguration.create();
setUpConfigForMiniCluster(conf);
util = new HBaseTestingUtility(conf);
util.startMiniDFSCluster(1);
util.startMiniZKCluster(1);
String string = util.getConfiguration().get("fs.defaultFS");
// PHOENIX-4675 setting the trailing slash implicitly tests that we're doing some path normalization
conf.set(DYNAMIC_JARS_DIR_KEY, string + "/hbase/tmpjars/");
util.startMiniHBaseCluster(1, 1);
UDFExpression.setConfig(conf);
String clientPort = util.getConfiguration().get(QueryServices.ZOOKEEPER_PORT_ATTRIB);
url = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + LOCALHOST + JDBC_PROTOCOL_SEPARATOR + clientPort + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM;
Map<String, String> props = Maps.newHashMapWithExpectedSize(1);
props.put(QueryServices.ALLOW_USER_DEFINED_FUNCTIONS_ATTRIB, "true");
props.put(QueryServices.DYNAMIC_JARS_DIR_KEY, string + "/hbase/tmpjars/");
driver = initAndRegisterTestDriver(url, new ReadOnlyProps(props.entrySet().iterator()));
}
use of org.apache.phoenix.util.ReadOnlyProps in project phoenix by apache.
the class PartialIndexRebuilderIT method doSetup.
@BeforeClass
public static void doSetup() throws Exception {
Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(10);
serverProps.put(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_ATTRIB, Boolean.TRUE.toString());
serverProps.put(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_INTERVAL_ATTRIB, Long.toString(REBUILD_INTERVAL));
serverProps.put(QueryServices.INDEX_REBUILD_DISABLE_TIMESTAMP_THRESHOLD, "50000000");
// batch at 50 seconds
serverProps.put(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_PERIOD, Long.toString(REBUILD_PERIOD));
serverProps.put(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_FORWARD_TIME_ATTRIB, Long.toString(WAIT_AFTER_DISABLED));
Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(1);
clientProps.put(HConstants.HBASE_CLIENT_RETRIES_NUMBER, "2");
setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet().iterator()));
indexRebuildTaskRegionEnvironment = (RegionCoprocessorEnvironment) getUtility().getRSForFirstRegionInTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME).getOnlineRegions(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME).get(0).getCoprocessorHost().findCoprocessorEnvironment(MetaDataRegionObserver.class.getName());
MetaDataRegionObserver.initRebuildIndexConnectionProps(indexRebuildTaskRegionEnvironment.getConfiguration());
}
use of org.apache.phoenix.util.ReadOnlyProps in project phoenix by apache.
the class BaseIndexIT method testTableDescriptorPriority.
/**
* Ensure that HTD contains table priorities correctly.
*/
@Test
public void testTableDescriptorPriority() throws SQLException, IOException {
String tableName = "TBL_" + generateUniqueName();
String indexName = "IND_" + generateUniqueName();
String fullTableName = SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, tableName);
// Check system tables priorities.
try (HBaseAdmin admin = driver.getConnectionQueryServices(null, null).getAdmin();
Connection c = DriverManager.getConnection(getUrl())) {
ResultSet rs = c.getMetaData().getTables("", "\"" + PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA + "\"", null, new String[] { PTableType.SYSTEM.toString() });
ReadOnlyProps p = c.unwrap(PhoenixConnection.class).getQueryServices().getProps();
while (rs.next()) {
String schemaName = rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM);
String tName = rs.getString(PhoenixDatabaseMetaData.TABLE_NAME);
org.apache.hadoop.hbase.TableName hbaseTableName = SchemaUtil.getPhysicalTableName(SchemaUtil.getTableName(schemaName, tName), p);
HTableDescriptor htd = admin.getTableDescriptor(hbaseTableName);
String val = htd.getValue("PRIORITY");
assertNotNull("PRIORITY is not set for table:" + htd, val);
assertTrue(Integer.parseInt(val) >= PhoenixRpcSchedulerFactory.getMetadataPriority(config));
}
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
String ddl = "CREATE TABLE " + fullTableName + TestUtil.TEST_TABLE_SCHEMA + tableDDLOptions;
try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
conn.setAutoCommit(false);
Statement stmt = conn.createStatement();
stmt.execute(ddl);
BaseTest.populateTestTable(fullTableName);
ddl = "CREATE " + (localIndex ? "LOCAL" : "") + " INDEX " + indexName + " ON " + fullTableName + " (long_col1, long_col2)" + " INCLUDE (decimal_col1, decimal_col2)";
stmt.execute(ddl);
}
HTableDescriptor dataTable = admin.getTableDescriptor(org.apache.hadoop.hbase.TableName.valueOf(fullTableName));
String val = dataTable.getValue("PRIORITY");
assertTrue(val == null || Integer.parseInt(val) < HConstants.HIGH_QOS);
if (!localIndex && mutable) {
HTableDescriptor indexTable = admin.getTableDescriptor(org.apache.hadoop.hbase.TableName.valueOf(indexName));
val = indexTable.getValue("PRIORITY");
assertNotNull("PRIORITY is not set for table:" + indexTable, val);
assertTrue(Integer.parseInt(val) >= PhoenixRpcSchedulerFactory.getIndexPriority(config));
}
}
}
Aggregations