use of org.apache.hadoop.hbase.client.HTableInterface in project Solbase by Photobucket.
the class IndexWriter method deleteDocKeyIdMap.
public void deleteDocKeyIdMap(Put mappingPut) {
// for remote server update via solr update, we want to use
// getDocTable(), but for now map/red can use local htable
HTableInterface mappingTable = SolbaseUtil.getDocKeyIdMapTable();
// insert document to doctable
try {
Delete delete = new Delete(mappingPut.getRow());
mappingTable.delete(delete);
} catch (IOException e) {
throw new SolbaseException(SolbaseException.ErrorCode.SERVER_ERROR, e.getMessage());
} finally {
SolbaseUtil.releaseTable(mappingTable);
}
}
use of org.apache.hadoop.hbase.client.HTableInterface in project honeycomb by altamiracorp.
the class HBaseMetadataTest method testRenameExistingTableNoAutoFlush.
@Test(expected = TableNotFoundException.class)
public void testRenameExistingTableNoAutoFlush() throws Exception {
String originalName = "OriginalName";
String newName = "NewName";
TableSchema origSchema = TABLE_SCHEMA_GEN.next();
// Configure the table to disable auto flush
HTableInterface hTableSpy = PowerMockito.spy(MockHTable.create());
Mockito.when(hTableSpy.isAutoFlush()).thenReturn(false);
hbaseMetadata.createTable(originalName, origSchema);
long origId = hbaseMetadata.getTableId(originalName);
hbaseMetadata.renameExistingTable(originalName, newName);
long newId = hbaseMetadata.getTableId(newName);
assertEquals(origId, newId);
Collection<ColumnSchema> origSchemaColumns = origSchema.getColumns();
TableSchema newSchema = hbaseMetadata.getSchema(newId);
for (ColumnSchema columnSchema : newSchema.getColumns()) {
assertTrue(origSchemaColumns.contains(columnSchema));
}
// Trying to access the id of the old table name will result in an exception
hbaseMetadata.getTableId(originalName);
hTableSpy.close();
}
use of org.apache.hadoop.hbase.client.HTableInterface in project Cloud9 by lintool.
the class HBaseWordCountFetch method run.
/**
* Runs this tool.
*/
@SuppressWarnings({ "static-access" })
public int run(String[] args) throws Exception {
Options options = new Options();
options.addOption(OptionBuilder.withArgName("table").hasArg().withDescription("HBase table name").create(TABLE));
options.addOption(OptionBuilder.withArgName("word").hasArg().withDescription("word to look up").create(WORD));
CommandLine cmdline;
CommandLineParser parser = new GnuParser();
try {
cmdline = parser.parse(options, args);
} catch (ParseException exp) {
System.err.println("Error parsing command line: " + exp.getMessage());
return -1;
}
if (!cmdline.hasOption(TABLE) || !cmdline.hasOption(WORD)) {
System.out.println("args: " + Arrays.toString(args));
HelpFormatter formatter = new HelpFormatter();
formatter.setWidth(120);
formatter.printHelp(this.getClass().getName(), options);
ToolRunner.printGenericCommandUsage(System.out);
return -1;
}
String tableName = cmdline.getOptionValue(TABLE);
String word = cmdline.getOptionValue(WORD);
Configuration conf = getConf();
conf.addResource(new Path("/etc/hbase/conf/hbase-site.xml"));
Configuration hbaseConfig = HBaseConfiguration.create(conf);
HConnection hbaseConnection = HConnectionManager.createConnection(hbaseConfig);
HTableInterface table = hbaseConnection.getTable(tableName);
Get get = new Get(Bytes.toBytes(word));
Result result = table.get(get);
int count = Bytes.toInt(result.getValue(HBaseWordCount.CF, HBaseWordCount.COUNT));
LOG.info("word: " + word + ", count: " + count);
return 0;
}
use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.
the class AlterTableWithViewsIT method testMakeBaseTableTransactional.
@Test
public void testMakeBaseTableTransactional() throws Exception {
try (Connection conn = DriverManager.getConnection(getUrl());
Connection viewConn = isMultiTenant ? DriverManager.getConnection(TENANT_SPECIFIC_URL1) : conn) {
String baseTableName = "NONTXNTBL_" + generateUniqueName() + (isMultiTenant ? "0" : "1");
String viewOfTable = baseTableName + "_VIEW";
String ddlFormat = "CREATE TABLE IF NOT EXISTS " + baseTableName + " (" + " %s ID char(1) NOT NULL," + " COL1 integer NOT NULL," + " COL2 bigint NOT NULL," + " CONSTRAINT NAME_PK PRIMARY KEY (%s ID, COL1, COL2)" + " ) %s";
conn.createStatement().execute(generateDDL(ddlFormat));
assertTableDefinition(conn, baseTableName, PTableType.TABLE, null, 0, 3, QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT, "ID", "COL1", "COL2");
viewConn.createStatement().execute("CREATE VIEW " + viewOfTable + " ( VIEW_COL1 DECIMAL(10,2), VIEW_COL2 VARCHAR ) AS SELECT * FROM " + baseTableName);
assertTableDefinition(conn, viewOfTable, PTableType.VIEW, baseTableName, 0, 5, 3, "ID", "COL1", "COL2", "VIEW_COL1", "VIEW_COL2");
PName tenantId = isMultiTenant ? PNameFactory.newName("tenant1") : null;
PhoenixConnection phoenixConn = conn.unwrap(PhoenixConnection.class);
HTableInterface htable = phoenixConn.getQueryServices().getTable(Bytes.toBytes(baseTableName));
assertFalse(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
assertFalse(phoenixConn.getTable(new PTableKey(null, baseTableName)).isTransactional());
assertFalse(viewConn.unwrap(PhoenixConnection.class).getTable(new PTableKey(tenantId, viewOfTable)).isTransactional());
// make the base table transactional
conn.createStatement().execute("ALTER TABLE " + baseTableName + " SET TRANSACTIONAL=true");
// query the view to force the table cache to be updated
viewConn.createStatement().execute("SELECT * FROM " + viewOfTable);
htable = phoenixConn.getQueryServices().getTable(Bytes.toBytes(baseTableName));
assertTrue(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
assertTrue(phoenixConn.getTable(new PTableKey(null, baseTableName)).isTransactional());
assertTrue(viewConn.unwrap(PhoenixConnection.class).getTable(new PTableKey(tenantId, viewOfTable)).isTransactional());
}
}
use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.
the class ConnectionQueryServicesImpl method returnSequences.
@SuppressWarnings("deprecation")
@Override
public void returnSequences(List<SequenceKey> keys, long timestamp, SQLException[] exceptions) throws SQLException {
List<Sequence> sequences = Lists.newArrayListWithExpectedSize(keys.size());
for (SequenceKey key : keys) {
Sequence newSequences = new Sequence(key);
Sequence sequence = sequenceMap.putIfAbsent(key, newSequences);
if (sequence == null) {
sequence = newSequences;
}
sequences.add(sequence);
}
try {
for (Sequence sequence : sequences) {
sequence.getLock().lock();
}
// Now that we have all the locks we need, attempt to return the unused sequence values
List<Append> mutations = Lists.newArrayListWithExpectedSize(sequences.size());
List<Sequence> toReturnList = Lists.newArrayListWithExpectedSize(sequences.size());
int[] indexes = new int[sequences.size()];
for (int i = 0; i < sequences.size(); i++) {
Sequence sequence = sequences.get(i);
try {
Append append = sequence.newReturn(timestamp);
toReturnList.add(sequence);
mutations.add(append);
} catch (EmptySequenceCacheException ignore) {
// Nothing to return, so ignore
}
}
if (toReturnList.isEmpty()) {
return;
}
HTableInterface hTable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
Object[] resultObjects = null;
SQLException sqlE = null;
try {
resultObjects = hTable.batch(mutations);
} catch (IOException e) {
sqlE = ServerUtil.parseServerException(e);
} catch (InterruptedException e) {
// restore the interrupt status
Thread.currentThread().interrupt();
sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build().buildException();
} finally {
try {
hTable.close();
} catch (IOException e) {
if (sqlE == null) {
sqlE = ServerUtil.parseServerException(e);
} else {
sqlE.setNextException(ServerUtil.parseServerException(e));
}
}
if (sqlE != null) {
throw sqlE;
}
}
for (int i = 0; i < resultObjects.length; i++) {
Sequence sequence = toReturnList.get(i);
Result result = (Result) resultObjects[i];
try {
sequence.returnValue(result);
} catch (SQLException e) {
exceptions[indexes[i]] = e;
}
}
} finally {
for (Sequence sequence : sequences) {
sequence.getLock().unlock();
}
}
}
Aggregations