use of org.apache.hadoop.hbase.client.HBaseAdmin in project phoenix by apache.
the class IndexHandlerIT method setup.
@Before
public void setup() throws Exception {
HTableDescriptor desc = new HTableDescriptor(org.apache.hadoop.hbase.TableName.valueOf(TestTable.getTableNameString()));
desc.addFamily(FAM1);
// create the table
HBaseAdmin admin = UTIL.getHBaseAdmin();
admin.createTable(desc);
}
use of org.apache.hadoop.hbase.client.HBaseAdmin in project phoenix by apache.
the class SkipScanAfterManualSplitIT method initTable.
private static void initTable(String tableName) throws Exception {
Connection conn = getConnection();
conn.createStatement().execute("CREATE TABLE " + tableName + "(" + "a VARCHAR PRIMARY KEY, b VARCHAR) " + HTableDescriptor.MAX_FILESIZE + "=" + MAX_FILESIZE + "," + " SALT_BUCKETS = 4");
PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + tableName + " VALUES(?,?)");
int rowCount = 0;
for (int c1 = MIN_CHAR; c1 <= MAX_CHAR; c1++) {
for (int c2 = MIN_CHAR; c2 <= MAX_CHAR; c2++) {
String pk = Character.toString((char) c1) + Character.toString((char) c2);
stmt.setString(1, pk);
stmt.setString(2, PAYLOAD);
stmt.execute();
rowCount++;
if (rowCount % BATCH_SIZE == 0) {
conn.commit();
}
}
}
conn.commit();
ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
HBaseAdmin admin = services.getAdmin();
try {
admin.flush(tableName);
} finally {
admin.close();
}
conn.close();
}
use of org.apache.hadoop.hbase.client.HBaseAdmin in project phoenix by apache.
the class SkipScanAfterManualSplitIT method testManualSplit.
@Test
public void testManualSplit() throws Exception {
String tableName = generateUniqueName();
byte[] tableNameBytes = Bytes.toBytes(tableName);
initTable(tableName);
Connection conn = getConnection();
ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
int nRegions = services.getAllTableRegions(tableNameBytes).size();
int nInitialRegions = nRegions;
HBaseAdmin admin = services.getAdmin();
try {
admin.split(tableName);
int nTries = 0;
while (nRegions == nInitialRegions && nTries < 10) {
Thread.sleep(1000);
nRegions = services.getAllTableRegions(tableNameBytes).size();
nTries++;
}
// Split finished by this time, but cache isn't updated until
// table is accessed
assertEquals(nRegions, nInitialRegions);
int nRows = 2;
String query = "SELECT count(*) FROM " + tableName + " WHERE a IN ('tl','jt',' a',' b',' c',' d')";
ResultSet rs1 = conn.createStatement().executeQuery(query);
assertTrue(rs1.next());
nRegions = services.getAllTableRegions(tableNameBytes).size();
// Region cache has been updated, as there are more regions now
assertNotEquals(nRegions, nInitialRegions);
/*
if (nRows != rs1.getInt(1)) {
// Run the same query again and it always passes now
// (as region cache is up-to-date)
ResultSet r2 = conn.createStatement().executeQuery(query);
assertTrue(r2.next());
assertEquals(nRows, r2.getInt(1));
}
*/
assertEquals(nRows, rs1.getInt(1));
} finally {
admin.close();
}
}
use of org.apache.hadoop.hbase.client.HBaseAdmin in project phoenix by apache.
the class FailForUnsupportedHBaseVersionsIT method testDoesNotStartRegionServerForUnsupportedCompressionAndVersion.
/**
* Test that we correctly abort a RegionServer when we run tests with an unsupported HBase
* version. The 'completeness' of this test requires that we run the test with both a version of
* HBase that wouldn't be supported with WAL Compression. Currently, this is the default version
* (0.94.4) so just running 'mvn test' will run the full test. However, this test will not fail
* when running against a version of HBase with WALCompression enabled. Therefore, to fully test
* this functionality, we need to run the test against both a supported and an unsupported version
* of HBase (as long as we want to support an version of HBase that doesn't support custom WAL
* Codecs).
* @throws Exception on failure
*/
@Test(timeout = 300000)
public void testDoesNotStartRegionServerForUnsupportedCompressionAndVersion() throws Exception {
Configuration conf = HBaseConfiguration.create();
setUpConfigForMiniCluster(conf);
IndexTestingUtils.setupConfig(conf);
// enable WAL Compression
conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true);
// check the version to see if it isn't supported
String version = VersionInfo.getVersion();
boolean supported = false;
if (Indexer.validateVersion(version, conf) == null) {
supported = true;
}
// start the minicluster
HBaseTestingUtility util = new HBaseTestingUtility(conf);
util.startMiniCluster();
try {
// setup the primary table
@SuppressWarnings("deprecation") HTableDescriptor desc = new HTableDescriptor("testDoesNotStartRegionServerForUnsupportedCompressionAndVersion");
byte[] family = Bytes.toBytes("f");
desc.addFamily(new HColumnDescriptor(family));
// enable indexing to a non-existant index table
String indexTableName = "INDEX_TABLE";
ColumnGroup fam1 = new ColumnGroup(indexTableName);
fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS));
CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder();
builder.addIndexGroup(fam1);
builder.build(desc);
// get a reference to the regionserver, so we can ensure it aborts
HRegionServer server = util.getMiniHBaseCluster().getRegionServer(0);
// create the primary table
HBaseAdmin admin = util.getHBaseAdmin();
if (supported) {
admin.createTable(desc);
assertFalse("Hosting regeion server failed, even the HBase version (" + version + ") supports WAL Compression.", server.isAborted());
} else {
admin.createTableAsync(desc, null);
// broken.
while (!server.isAborted()) {
LOG.debug("Waiting on regionserver to abort..");
}
}
} finally {
// cleanup
util.shutdownMiniCluster();
}
}
use of org.apache.hadoop.hbase.client.HBaseAdmin in project phoenix by apache.
the class EndToEndCoveredColumnsIndexBuilderIT method setupTest.
/**
* @param tableName name of the table to create for the test
* @return the supporting state for the test
*/
private TestState setupTest(String tableName) throws IOException {
byte[] tableNameBytes = Bytes.toBytes(tableName);
@SuppressWarnings("deprecation") HTableDescriptor desc = new HTableDescriptor(tableNameBytes);
desc.addFamily(FAM1);
// add the necessary simple options to create the builder
Map<String, String> indexerOpts = new HashMap<String, String>();
// just need to set the codec - we are going to set it later, but we need something here or the
// initializer blows up.
indexerOpts.put(NonTxIndexBuilder.CODEC_CLASS_NAME_KEY, CoveredIndexCodecForTesting.class.getName());
Indexer.enableIndexing(desc, NonTxIndexBuilder.class, indexerOpts, Coprocessor.PRIORITY_USER);
// create the table
HBaseAdmin admin = UTIL.getHBaseAdmin();
admin.createTable(desc);
HTable primary = new HTable(UTIL.getConfiguration(), tableNameBytes);
// overwrite the codec so we can verify the current state
Region region = UTIL.getMiniHBaseCluster().getRegions(tableNameBytes).get(0);
Indexer indexer = (Indexer) region.getCoprocessorHost().findCoprocessor(Indexer.class.getName());
NonTxIndexBuilder builder = (NonTxIndexBuilder) indexer.getBuilderForTesting();
VerifyingIndexCodec codec = new VerifyingIndexCodec();
builder.setIndexCodecForTesting(codec);
// setup the Puts we want to write
final long ts = System.currentTimeMillis();
EnvironmentEdge edge = new EnvironmentEdge() {
@Override
public long currentTime() {
return ts;
}
};
EnvironmentEdgeManager.injectEdge(edge);
return new TestState(primary, codec, ts);
}
Aggregations