use of org.apache.hadoop.hbase.client.HBaseAdmin in project phoenix by apache.
the class FlappingLocalIndexIT method testBuildIndexWhenUserTableAlreadyHasData.
@Test
public void testBuildIndexWhenUserTableAlreadyHasData() throws Exception {
String tableName = schemaName + "." + generateUniqueName();
String indexName = "IDX_" + generateUniqueName();
String indexTableName = schemaName + "." + indexName;
TableName physicalTableName = SchemaUtil.getPhysicalTableName(tableName.getBytes(), isNamespaceMapped);
String indexPhysicalTableName = physicalTableName.getNameAsString();
createBaseTable(tableName, null, "('e','i','o')");
Connection conn1 = DriverManager.getConnection(getUrl());
conn1.createStatement().execute("UPSERT INTO " + tableName + " values('b',1,2,4,'z')");
conn1.createStatement().execute("UPSERT INTO " + tableName + " values('f',1,2,3,'z')");
conn1.createStatement().execute("UPSERT INTO " + tableName + " values('j',2,4,2,'a')");
conn1.createStatement().execute("UPSERT INTO " + tableName + " values('q',3,1,1,'c')");
conn1.commit();
conn1.createStatement().execute("CREATE LOCAL INDEX " + indexName + " ON " + tableName + "(v1)");
ResultSet rs = conn1.createStatement().executeQuery("SELECT COUNT(*) FROM " + indexTableName);
assertTrue(rs.next());
assertEquals(4, rs.getInt(1));
HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
HTable indexTable = new HTable(admin.getConfiguration(), Bytes.toBytes(indexPhysicalTableName));
Pair<byte[][], byte[][]> startEndKeys = indexTable.getStartEndKeys();
byte[][] startKeys = startEndKeys.getFirst();
byte[][] endKeys = startEndKeys.getSecond();
for (int i = 0; i < startKeys.length; i++) {
Scan s = new Scan();
s.addFamily(QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES);
s.setStartRow(startKeys[i]);
s.setStopRow(endKeys[i]);
ResultScanner scanner = indexTable.getScanner(s);
int count = 0;
for (Result r : scanner) {
count++;
}
scanner.close();
assertEquals(1, count);
}
indexTable.close();
}
use of org.apache.hadoop.hbase.client.HBaseAdmin in project phoenix by apache.
the class IndexExtendedIT method testLocalIndexScanAfterRegionsMerge.
// Moved from LocalIndexIT because it was causing parallel runs to hang
@Test
public void testLocalIndexScanAfterRegionsMerge() throws Exception {
// This test just needs be run once
if (!localIndex || transactional || mutable || directApi) {
return;
}
String schemaName = generateUniqueName();
String tableName = schemaName + "." + generateUniqueName();
String indexName = "IDX_" + generateUniqueName();
TableName physicalTableName = SchemaUtil.getPhysicalTableName(tableName.getBytes(), false);
String indexPhysicalTableName = physicalTableName.getNameAsString();
createBaseTable(tableName, "('e','j','o')");
Connection conn1 = getConnectionForLocalIndexTest();
try {
String[] strings = { "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z" };
for (int i = 0; i < 26; i++) {
conn1.createStatement().execute("UPSERT INTO " + tableName + " values('" + strings[i] + "'," + i + "," + (i + 1) + "," + (i + 2) + ",'" + strings[25 - i] + "')");
}
conn1.commit();
conn1.createStatement().execute("CREATE LOCAL INDEX " + indexName + " ON " + tableName + "(v1)");
conn1.createStatement().execute("CREATE LOCAL INDEX " + indexName + "_2 ON " + tableName + "(k3)");
ResultSet rs = conn1.createStatement().executeQuery("SELECT * FROM " + tableName);
assertTrue(rs.next());
HBaseAdmin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
List<HRegionInfo> regionsOfUserTable = MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(), admin.getConnection(), physicalTableName, false);
admin.mergeRegions(regionsOfUserTable.get(0).getEncodedNameAsBytes(), regionsOfUserTable.get(1).getEncodedNameAsBytes(), false);
regionsOfUserTable = MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(), admin.getConnection(), physicalTableName, false);
while (regionsOfUserTable.size() != 3) {
Thread.sleep(100);
regionsOfUserTable = MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(), admin.getConnection(), physicalTableName, false);
}
String query = "SELECT t_id,k1,v1 FROM " + tableName;
rs = conn1.createStatement().executeQuery(query);
Thread.sleep(1000);
for (int j = 0; j < 26; j++) {
assertTrue(rs.next());
assertEquals(strings[25 - j], rs.getString("t_id"));
assertEquals(25 - j, rs.getInt("k1"));
assertEquals(strings[j], rs.getString("V1"));
}
rs = conn1.createStatement().executeQuery("EXPLAIN " + query);
assertEquals("CLIENT PARALLEL " + 3 + "-WAY RANGE SCAN OVER " + indexPhysicalTableName + " [1]\n" + " SERVER FILTER BY FIRST KEY ONLY\n" + "CLIENT MERGE SORT", QueryUtil.getExplainPlan(rs));
query = "SELECT t_id,k1,k3 FROM " + tableName;
rs = conn1.createStatement().executeQuery("EXPLAIN " + query);
assertEquals("CLIENT PARALLEL " + 3 + "-WAY RANGE SCAN OVER " + indexPhysicalTableName + " [2]\n" + " SERVER FILTER BY FIRST KEY ONLY\n" + "CLIENT MERGE SORT", QueryUtil.getExplainPlan(rs));
rs = conn1.createStatement().executeQuery(query);
Thread.sleep(1000);
for (int j = 0; j < 26; j++) {
assertTrue(rs.next());
assertEquals(strings[j], rs.getString("t_id"));
assertEquals(j, rs.getInt("k1"));
assertEquals(j + 2, rs.getInt("k3"));
}
} finally {
conn1.close();
}
}
use of org.apache.hadoop.hbase.client.HBaseAdmin in project phoenix by apache.
the class DynamicColumnIT method initTable.
@Before
public void initTable() throws Exception {
tableName = generateUniqueName();
try (PhoenixConnection pconn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class)) {
ConnectionQueryServices services = pconn.getQueryServices();
try (HBaseAdmin admin = services.getAdmin()) {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
htd.addFamily(new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES));
htd.addFamily(new HColumnDescriptor(FAMILY_NAME_A));
htd.addFamily(new HColumnDescriptor(FAMILY_NAME_B));
admin.createTable(htd);
}
try (HTableInterface hTable = services.getTable(Bytes.toBytes(tableName))) {
// Insert rows using standard HBase mechanism with standard HBase "types"
List<Row> mutations = new ArrayList<Row>();
byte[] dv = Bytes.toBytes("DV");
byte[] first = Bytes.toBytes("F");
byte[] f1v1 = Bytes.toBytes("F1V1");
byte[] f1v2 = Bytes.toBytes("F1V2");
byte[] f2v1 = Bytes.toBytes("F2V1");
byte[] f2v2 = Bytes.toBytes("F2V2");
byte[] key = Bytes.toBytes("entry1");
Put put = new Put(key);
put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, dv, Bytes.toBytes("default"));
put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, first, Bytes.toBytes("first"));
put.add(FAMILY_NAME_A, f1v1, Bytes.toBytes("f1value1"));
put.add(FAMILY_NAME_A, f1v2, Bytes.toBytes("f1value2"));
put.add(FAMILY_NAME_B, f2v1, Bytes.toBytes("f2value1"));
put.add(FAMILY_NAME_B, f2v2, Bytes.toBytes("f2value2"));
mutations.add(put);
hTable.batch(mutations);
// Create Phoenix table after HBase table was created through the native APIs
// The timestamp of the table creation must be later than the timestamp of the data
pconn.createStatement().execute("create table " + tableName + " (entry varchar not null," + " F varchar," + " A.F1v1 varchar," + " A.F1v2 varchar," + " B.F2v1 varchar" + " CONSTRAINT pk PRIMARY KEY (entry))");
}
}
}
use of org.apache.hadoop.hbase.client.HBaseAdmin in project phoenix by apache.
the class QueryDatabaseMetaDataIT method testCreateOnExistingTable.
@Test
public void testCreateOnExistingTable() throws Exception {
PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
String tableName = MDTEST_NAME;
String schemaName = MDTEST_SCHEMA_NAME;
byte[] cfA = Bytes.toBytes(SchemaUtil.normalizeIdentifier("a"));
byte[] cfB = Bytes.toBytes(SchemaUtil.normalizeIdentifier("b"));
byte[] cfC = Bytes.toBytes("c");
byte[][] familyNames = new byte[][] { cfB, cfC };
byte[] htableName = SchemaUtil.getTableNameAsBytes(schemaName, tableName);
HBaseAdmin admin = pconn.getQueryServices().getAdmin();
try {
admin.disableTable(htableName);
admin.deleteTable(htableName);
admin.enableTable(htableName);
} catch (org.apache.hadoop.hbase.TableNotFoundException e) {
}
@SuppressWarnings("deprecation") HTableDescriptor descriptor = new HTableDescriptor(htableName);
for (byte[] familyName : familyNames) {
HColumnDescriptor columnDescriptor = new HColumnDescriptor(familyName);
descriptor.addFamily(columnDescriptor);
}
admin.createTable(descriptor);
long ts = nextTimestamp();
Properties props = new Properties();
props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 5));
PhoenixConnection conn1 = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class);
ensureTableCreated(getUrl(), tableName, tableName, ts);
descriptor = admin.getTableDescriptor(htableName);
assertEquals(3, descriptor.getColumnFamilies().length);
HColumnDescriptor cdA = descriptor.getFamily(cfA);
assertNotEquals(HColumnDescriptor.DEFAULT_KEEP_DELETED, cdA.getKeepDeletedCells());
// Overriden using WITH
assertEquals(DataBlockEncoding.NONE, cdA.getDataBlockEncoding());
// Overriden using WITH
assertEquals(1, cdA.getMaxVersions());
HColumnDescriptor cdB = descriptor.getFamily(cfB);
// Allow KEEP_DELETED_CELLS to be false for VIEW
assertEquals(HColumnDescriptor.DEFAULT_KEEP_DELETED, cdB.getKeepDeletedCells());
// Should keep the original value.
assertEquals(DataBlockEncoding.NONE, cdB.getDataBlockEncoding());
// CF c should stay the same since it's not a Phoenix cf.
HColumnDescriptor cdC = descriptor.getFamily(cfC);
assertNotNull("Column family not found", cdC);
assertEquals(HColumnDescriptor.DEFAULT_KEEP_DELETED, cdC.getKeepDeletedCells());
assertFalse(SchemaUtil.DEFAULT_DATA_BLOCK_ENCODING == cdC.getDataBlockEncoding());
assertTrue(descriptor.hasCoprocessor(UngroupedAggregateRegionObserver.class.getName()));
assertTrue(descriptor.hasCoprocessor(GroupedAggregateRegionObserver.class.getName()));
assertTrue(descriptor.hasCoprocessor(ServerCachingEndpointImpl.class.getName()));
admin.close();
int rowCount = 5;
String upsert = "UPSERT INTO " + tableName + "(id,col1,col2) VALUES(?,?,?)";
PreparedStatement ps = conn1.prepareStatement(upsert);
for (int i = 0; i < rowCount; i++) {
ps.setString(1, Integer.toString(i));
ps.setInt(2, i + 1);
ps.setInt(3, i + 2);
ps.execute();
}
conn1.commit();
conn1.close();
props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 6));
Connection conn2 = DriverManager.getConnection(getUrl(), props);
String query = "SELECT count(1) FROM " + tableName;
ResultSet rs = conn2.createStatement().executeQuery(query);
assertTrue(rs.next());
assertEquals(rowCount, rs.getLong(1));
query = "SELECT id, col1,col2 FROM " + tableName;
rs = conn2.createStatement().executeQuery(query);
for (int i = 0; i < rowCount; i++) {
assertTrue(rs.next());
assertEquals(Integer.toString(i), rs.getString(1));
assertEquals(i + 1, rs.getInt(2));
assertEquals(i + 2, rs.getInt(3));
}
assertFalse(rs.next());
conn2.close();
}
use of org.apache.hadoop.hbase.client.HBaseAdmin in project phoenix by apache.
the class ProductMetricsIT method testFilterOnTrailingKeyColumn.
/**
* Test to repro ArrayIndexOutOfBoundException that happens during filtering in BinarySubsetComparator
* only after a flush is performed
* @throws Exception
*/
@Test
public void testFilterOnTrailingKeyColumn() throws Exception {
long ts = nextTimestamp();
String tenantId = getOrganizationId();
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 1));
Connection conn = DriverManager.getConnection(getUrl(), props);
HBaseAdmin admin = null;
try {
initTableValues(tenantId, getSplits(tenantId), ts);
admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
admin.flush(SchemaUtil.getTableNameAsBytes(PRODUCT_METRICS_SCHEMA_NAME, PRODUCT_METRICS_NAME));
String query = "SELECT SUM(TRANSACTIONS) FROM " + PRODUCT_METRICS_NAME + " WHERE FEATURE=?";
PreparedStatement statement = conn.prepareStatement(query);
statement.setString(1, F1);
ResultSet rs = statement.executeQuery();
assertTrue(rs.next());
assertEquals(1200, rs.getInt(1));
} finally {
if (admin != null)
admin.close();
conn.close();
}
}
Aggregations