use of org.apache.hadoop.hbase.client.HBaseAdmin in project phoenix by apache.
the class NamespaceSchemaMappingIT method testBackWardCompatibility.
/**
* Tests that when: There is a table created with older version of phoenix and a table created with newer version
* having {@code QueryServices#IS_NAMESPACE_MAPPING_ENABLED} true, then there is only a flag
* {@code PhoenixDatabaseMetaData#IS_NAMESPACE_MAPPED} differentiates that whether schema of the table is mapped to
* namespace or not
*/
@Test
@SuppressWarnings("deprecation")
public void testBackWardCompatibility() throws Exception {
String namespace = "TEST_SCHEMA";
String schemaName = namespace;
String tableName = generateUniqueName();
String phoenixFullTableName = schemaName + "." + tableName;
String hbaseFullTableName = schemaName + ":" + tableName;
HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
admin.createNamespace(NamespaceDescriptor.create(namespace).build());
admin.createTable(new HTableDescriptor(TableName.valueOf(namespace, tableName)).addFamily(new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES)));
admin.createTable(new HTableDescriptor(TableName.valueOf(phoenixFullTableName)).addFamily(new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES)));
Put put = new Put(PVarchar.INSTANCE.toBytes(phoenixFullTableName));
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
HTable phoenixSchematable = new HTable(admin.getConfiguration(), phoenixFullTableName);
phoenixSchematable.put(put);
phoenixSchematable.close();
put = new Put(PVarchar.INSTANCE.toBytes(hbaseFullTableName));
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
phoenixSchematable.close();
HTable namespaceMappedtable = new HTable(admin.getConfiguration(), hbaseFullTableName);
namespaceMappedtable.put(put);
namespaceMappedtable.close();
Properties props = new Properties();
props.setProperty(QueryServices.DROP_METADATA_ATTRIB, Boolean.TRUE.toString());
Connection conn = DriverManager.getConnection(getUrl(), props);
String ddl = "create table " + phoenixFullTableName + "(tableName varchar primary key)";
conn.createStatement().execute(ddl);
String query = "select tableName from " + phoenixFullTableName;
ResultSet rs = conn.createStatement().executeQuery(query);
assertTrue(rs.next());
assertEquals(phoenixFullTableName, rs.getString(1));
HTable metatable = new HTable(admin.getConfiguration(), SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, (conn.unwrap(PhoenixConnection.class).getQueryServices().getProps())));
Put p = new Put(SchemaUtil.getTableKey(null, schemaName, tableName));
p.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.IS_NAMESPACE_MAPPED_BYTES, PBoolean.INSTANCE.toBytes(true));
metatable.put(p);
metatable.close();
PhoenixConnection phxConn = (conn.unwrap(PhoenixConnection.class));
phxConn.getQueryServices().clearCache();
rs = conn.createStatement().executeQuery(query);
assertTrue(rs.next());
assertEquals(hbaseFullTableName, rs.getString(1));
admin.disableTable(phoenixFullTableName);
admin.deleteTable(phoenixFullTableName);
conn.createStatement().execute("DROP TABLE " + phoenixFullTableName);
admin.close();
conn.close();
}
use of org.apache.hadoop.hbase.client.HBaseAdmin in project phoenix by apache.
the class StatisticsCollectionRunTrackerIT method createTableAndGetRegion.
private HRegionInfo createTableAndGetRegion(String tableName) throws Exception {
byte[] tableNameBytes = Bytes.toBytes(tableName);
String ddl = "CREATE TABLE " + tableName + " (PK1 VARCHAR PRIMARY KEY, KV1 VARCHAR)";
try (Connection conn = DriverManager.getConnection(getUrl())) {
conn.createStatement().execute(ddl);
PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class);
try (HBaseAdmin admin = phxConn.getQueryServices().getAdmin()) {
List<HRegionInfo> tableRegions = admin.getTableRegions(tableNameBytes);
return tableRegions.get(0);
}
}
}
use of org.apache.hadoop.hbase.client.HBaseAdmin in project phoenix by apache.
the class StatisticsCollectionRunTrackerIT method runMajorCompaction.
private void runMajorCompaction(String tableName) throws Exception {
try (PhoenixConnection conn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class)) {
try (HBaseAdmin admin = conn.getQueryServices().getAdmin()) {
TableName t = TableName.valueOf(tableName);
admin.flush(t);
admin.majorCompact(t);
Thread.sleep(10000);
}
}
}
use of org.apache.hadoop.hbase.client.HBaseAdmin in project phoenix by apache.
the class AlterTableIT method testSetTTLForTableWithOnlyPKCols.
@Test
public void testSetTTLForTableWithOnlyPKCols() throws Exception {
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(getUrl(), props);
conn.setAutoCommit(false);
try {
String ddl = "create table " + dataTableFullName + " (" + " id char(1) NOT NULL," + " col1 integer NOT NULL," + " col2 bigint NOT NULL," + " CONSTRAINT NAME_PK PRIMARY KEY (id, col1, col2)" + " ) " + generateDDLOptions("TTL=86400, SALT_BUCKETS = 4, DEFAULT_COLUMN_FAMILY='XYZ'");
conn.createStatement().execute(ddl);
try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName));
HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(1, columnFamilies.length);
assertEquals("XYZ", columnFamilies[0].getNameAsString());
assertEquals(86400, columnFamilies[0].getTimeToLive());
}
ddl = "ALTER TABLE " + dataTableFullName + " SET TTL=30";
conn.createStatement().execute(ddl);
conn.commit();
try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName));
HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(1, columnFamilies.length);
assertEquals(30, columnFamilies[0].getTimeToLive());
assertEquals("XYZ", columnFamilies[0].getNameAsString());
}
} finally {
conn.close();
}
}
use of org.apache.hadoop.hbase.client.HBaseAdmin in project phoenix by apache.
the class FlappingAlterTableIT method testNewColumnFamilyInheritsTTLOfEmptyCF.
@Test
public void testNewColumnFamilyInheritsTTLOfEmptyCF() throws Exception {
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
String ddl = "CREATE TABLE " + dataTableFullName + " (\n" + "ID1 VARCHAR(15) NOT NULL,\n" + "ID2 VARCHAR(15) NOT NULL,\n" + "CREATED_DATE DATE,\n" + "CREATION_TIME BIGINT,\n" + "LAST_USED DATE,\n" + "CONSTRAINT PK PRIMARY KEY (ID1, ID2)) SALT_BUCKETS = 8, TTL = 1000";
Connection conn1 = DriverManager.getConnection(getUrl(), props);
conn1.createStatement().execute(ddl);
ddl = "ALTER TABLE " + dataTableFullName + " ADD CF.STRING VARCHAR";
conn1.createStatement().execute(ddl);
try (HBaseAdmin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName));
HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("0", columnFamilies[0].getNameAsString());
assertEquals(1000, columnFamilies[0].getTimeToLive());
assertEquals("CF", columnFamilies[1].getNameAsString());
assertEquals(1000, columnFamilies[1].getTimeToLive());
}
}
Aggregations