use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.
the class TestIndexWriter method testShutdownInterruptsAsExpected.
/**
* Test that if we get an interruption to to the thread while doing a batch (e.g. via shutdown),
* that we correctly end the task
* @throws Exception on failure
*/
@SuppressWarnings({ "unchecked", "deprecation" })
@Test
public void testShutdownInterruptsAsExpected() throws Exception {
Stoppable stop = Mockito.mock(Stoppable.class);
Abortable abort = new StubAbortable();
// single thread factory so the older request gets queued
ExecutorService exec = Executors.newFixedThreadPool(1);
Map<ImmutableBytesPtr, HTableInterface> tables = new HashMap<ImmutableBytesPtr, HTableInterface>();
RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class);
Configuration conf = new Configuration();
Mockito.when(e.getConfiguration()).thenReturn(conf);
Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String, Object>());
FakeTableFactory factory = new FakeTableFactory(tables);
byte[] tableName = this.testName.getTableName();
HTableInterface table = Mockito.mock(HTableInterface.class);
Mockito.when(table.getTableName()).thenReturn(tableName);
final CountDownLatch writeStartedLatch = new CountDownLatch(1);
// latch never gets counted down, so we wait forever
final CountDownLatch waitOnAbortedLatch = new CountDownLatch(1);
Mockito.when(table.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
LOG.info("Write started");
writeStartedLatch.countDown();
// when we interrupt the thread for shutdown, we should see this throw an interrupt too
try {
waitOnAbortedLatch.await();
} catch (InterruptedException e) {
LOG.info("Correctly interrupted while writing!");
throw e;
}
return null;
}
});
// add the tables to the set of tables, so its returned to the writer
tables.put(new ImmutableBytesPtr(tableName), table);
// update a single table
Put m = new Put(row);
m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
final List<Pair<Mutation, byte[]>> indexUpdates = new ArrayList<Pair<Mutation, byte[]>>();
indexUpdates.add(new Pair<Mutation, byte[]>(m, tableName));
// setup the writer
ParallelWriterIndexCommitter committer = new ParallelWriterIndexCommitter(VersionInfo.getVersion());
committer.setup(factory, exec, abort, stop, 2, e);
KillServerOnFailurePolicy policy = new KillServerOnFailurePolicy();
policy.setup(stop, abort);
final IndexWriter writer = new IndexWriter(committer, policy);
final boolean[] failedWrite = new boolean[] { false };
Thread primaryWriter = new Thread() {
@Override
public void run() {
try {
writer.write(indexUpdates);
} catch (IndexWriteException e) {
failedWrite[0] = true;
}
}
};
primaryWriter.start();
// wait for the write to start before intentionally shutdown the pool
writeStartedLatch.await();
writer.stop("Shutting down writer for test " + this.testName.getTableNameString());
primaryWriter.join();
assertTrue("Writer should have failed because of the stop we issued", failedWrite[0]);
}
use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.
the class UpgradeIT method putUnlockKVInSysMutex.
private void putUnlockKVInSysMutex(byte[] row) throws Exception {
try (Connection conn = getConnection(false, null)) {
ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
try (HTableInterface sysMutexTable = services.getTable(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME_BYTES)) {
byte[] family = PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES;
byte[] qualifier = UPGRADE_MUTEX;
Put put = new Put(row);
put.add(family, qualifier, UPGRADE_MUTEX_UNLOCKED);
sysMutexTable.put(put);
sysMutexTable.flushCommits();
}
}
}
use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.
the class UpsertValuesIT method testColumnQualifierForUpsertedValues.
public void testColumnQualifierForUpsertedValues() throws Exception {
String schemaName = "A";
String tableName = "TEST";
String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
String ddl = "create table " + fullTableName + " (" + " K varchar primary key," + " CF1.V1 varchar, CF2.V2 VARCHAR, CF2.V3 VARCHAR)";
try (Connection conn = getConnection(nextTimestamp())) {
conn.createStatement().execute(ddl);
}
String dml = "UPSERT INTO " + fullTableName + " VALUES (?, ?, ?, ?)";
try (Connection conn = getConnection(nextTimestamp())) {
PreparedStatement stmt = conn.prepareStatement(dml);
stmt.setString(1, "KEY1");
stmt.setString(2, "VALUE1");
stmt.setString(3, "VALUE2");
stmt.setString(4, "VALUE3");
stmt.executeUpdate();
conn.commit();
}
// Issue a raw hbase scan and assert that key values have the expected column qualifiers.
try (Connection conn = getConnection(nextTimestamp())) {
HTableInterface table = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(fullTableName));
ResultScanner scanner = table.getScanner(new Scan());
Result next = scanner.next();
assertTrue(next.containsColumn(Bytes.toBytes("CF1"), PInteger.INSTANCE.toBytes(1)));
assertTrue(next.containsColumn(Bytes.toBytes("CF2"), PInteger.INSTANCE.toBytes(2)));
assertTrue(next.containsColumn(Bytes.toBytes("CF2"), PInteger.INSTANCE.toBytes(3)));
}
}
use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.
the class MetaDataEndpointImpl method findChildViews_4_11.
private TableViewFinder findChildViews_4_11(Region region, byte[] tenantId, byte[] schemaName, byte[] tableName) throws IOException {
Scan scan = new Scan();
byte[] startRow = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
byte[] stopRow = ByteUtil.nextKey(startRow);
scan.setStartRow(startRow);
scan.setStopRow(stopRow);
SingleColumnValueFilter linkFilter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES, CompareOp.EQUAL, CHILD_TABLE_BYTES);
linkFilter.setFilterIfMissing(true);
scan.setFilter(linkFilter);
scan.addColumn(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES);
scan.addColumn(TABLE_FAMILY_BYTES, PARENT_TENANT_ID_BYTES);
// Original region-only scanner modified due to PHOENIX-1208
// RegionScanner scanner = region.getScanner(scan);
// The following *should* work, but doesn't due to HBASE-11837
// TableName systemCatalogTableName = region.getTableDesc().getTableName();
// HTableInterface hTable = env.getTable(systemCatalogTableName);
// These deprecated calls work around the issue
HTableInterface hTable = ServerUtil.getHTableForCoprocessorScan(env, region.getTableDesc().getTableName().getName());
try {
boolean allViewsInCurrentRegion = true;
int numOfChildViews = 0;
List<ViewInfo> viewInfoList = Lists.newArrayList();
ResultScanner scanner = hTable.getScanner(scan);
try {
for (Result result = scanner.next(); (result != null); result = scanner.next()) {
numOfChildViews++;
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
ResultTuple resultTuple = new ResultTuple(result);
resultTuple.getKey(ptr);
byte[] key = ptr.copyBytes();
if (checkTableKeyInRegion(key, region) != null) {
allViewsInCurrentRegion = false;
}
byte[][] rowViewKeyMetaData = new byte[5][];
getVarChars(result.getRow(), 5, rowViewKeyMetaData);
byte[] viewTenantId = rowViewKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX];
byte[] viewSchemaName = SchemaUtil.getSchemaNameFromFullName(rowViewKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]).getBytes();
byte[] viewName = SchemaUtil.getTableNameFromFullName(rowViewKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]).getBytes();
viewInfoList.add(new ViewInfo(viewTenantId, viewSchemaName, viewName));
}
TableViewFinder tableViewFinderResult = new TableViewFinder(viewInfoList);
if (numOfChildViews > 0 && !allViewsInCurrentRegion) {
tableViewFinderResult.setAllViewsNotInSingleRegion();
}
return tableViewFinderResult;
} finally {
scanner.close();
}
} finally {
hTable.close();
}
}
use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.
the class DynamicColumnIT method initTable.
@Before
public void initTable() throws Exception {
tableName = generateUniqueName();
try (PhoenixConnection pconn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class)) {
ConnectionQueryServices services = pconn.getQueryServices();
try (HBaseAdmin admin = services.getAdmin()) {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
htd.addFamily(new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES));
htd.addFamily(new HColumnDescriptor(FAMILY_NAME_A));
htd.addFamily(new HColumnDescriptor(FAMILY_NAME_B));
admin.createTable(htd);
}
try (HTableInterface hTable = services.getTable(Bytes.toBytes(tableName))) {
// Insert rows using standard HBase mechanism with standard HBase "types"
List<Row> mutations = new ArrayList<Row>();
byte[] dv = Bytes.toBytes("DV");
byte[] first = Bytes.toBytes("F");
byte[] f1v1 = Bytes.toBytes("F1V1");
byte[] f1v2 = Bytes.toBytes("F1V2");
byte[] f2v1 = Bytes.toBytes("F2V1");
byte[] f2v2 = Bytes.toBytes("F2V2");
byte[] key = Bytes.toBytes("entry1");
Put put = new Put(key);
put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, dv, Bytes.toBytes("default"));
put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, first, Bytes.toBytes("first"));
put.add(FAMILY_NAME_A, f1v1, Bytes.toBytes("f1value1"));
put.add(FAMILY_NAME_A, f1v2, Bytes.toBytes("f1value2"));
put.add(FAMILY_NAME_B, f2v1, Bytes.toBytes("f2value1"));
put.add(FAMILY_NAME_B, f2v2, Bytes.toBytes("f2value2"));
mutations.add(put);
hTable.batch(mutations);
// Create Phoenix table after HBase table was created through the native APIs
// The timestamp of the table creation must be later than the timestamp of the data
pconn.createStatement().execute("create table " + tableName + " (entry varchar not null," + " F varchar," + " A.F1v1 varchar," + " A.F1v2 varchar," + " B.F2v1 varchar" + " CONSTRAINT pk PRIMARY KEY (entry))");
}
}
}
Aggregations