use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.
the class IndexUtil method wrapResultUsingOffset.
public static void wrapResultUsingOffset(final RegionCoprocessorEnvironment environment, List<Cell> result, final int offset, ColumnReference[] dataColumns, TupleProjector tupleProjector, Region dataRegion, IndexMaintainer indexMaintainer, byte[][] viewConstants, ImmutableBytesWritable ptr) throws IOException {
if (tupleProjector != null) {
// Join back to data table here by issuing a local get projecting
// all of the cq:cf from the KeyValueColumnExpression into the Get.
Cell firstCell = result.get(0);
byte[] indexRowKey = firstCell.getRowArray();
ptr.set(indexRowKey, firstCell.getRowOffset() + offset, firstCell.getRowLength() - offset);
byte[] dataRowKey = indexMaintainer.buildDataRowKey(ptr, viewConstants);
Get get = new Get(dataRowKey);
ImmutableStorageScheme storageScheme = indexMaintainer.getIndexStorageScheme();
for (int i = 0; i < dataColumns.length; i++) {
if (storageScheme == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS) {
get.addFamily(dataColumns[i].getFamily());
} else {
get.addColumn(dataColumns[i].getFamily(), dataColumns[i].getQualifier());
}
}
Result joinResult = null;
if (dataRegion != null) {
joinResult = dataRegion.get(get);
} else {
TableName dataTable = TableName.valueOf(MetaDataUtil.getLocalIndexUserTableName(environment.getRegion().getTableDesc().getNameAsString()));
HTableInterface table = null;
try {
table = environment.getTable(dataTable);
joinResult = table.get(get);
} finally {
if (table != null)
table.close();
}
}
// at this point join result has data from the data table. We now need to take this result and
// add it to the cells that we are returning.
// TODO: handle null case (but shouldn't happen)
Tuple joinTuple = new ResultTuple(joinResult);
// This will create a byte[] that captures all of the values from the data table
byte[] value = tupleProjector.getSchema().toBytes(joinTuple, tupleProjector.getExpressions(), tupleProjector.getValueBitSet(), ptr);
KeyValue keyValue = KeyValueUtil.newKeyValue(firstCell.getRowArray(), firstCell.getRowOffset(), firstCell.getRowLength(), VALUE_COLUMN_FAMILY, VALUE_COLUMN_QUALIFIER, firstCell.getTimestamp(), value, 0, value.length);
result.add(keyValue);
}
ListIterator<Cell> itr = result.listIterator();
while (itr.hasNext()) {
final Cell cell = itr.next();
// TODO: Create DelegateCell class instead
Cell newCell = new Cell() {
@Override
public byte[] getRowArray() {
return cell.getRowArray();
}
@Override
public int getRowOffset() {
return cell.getRowOffset() + offset;
}
@Override
public short getRowLength() {
return (short) (cell.getRowLength() - offset);
}
@Override
public byte[] getFamilyArray() {
return cell.getFamilyArray();
}
@Override
public int getFamilyOffset() {
return cell.getFamilyOffset();
}
@Override
public byte getFamilyLength() {
return cell.getFamilyLength();
}
@Override
public byte[] getQualifierArray() {
return cell.getQualifierArray();
}
@Override
public int getQualifierOffset() {
return cell.getQualifierOffset();
}
@Override
public int getQualifierLength() {
return cell.getQualifierLength();
}
@Override
public long getTimestamp() {
return cell.getTimestamp();
}
@Override
public byte getTypeByte() {
return cell.getTypeByte();
}
@Override
public long getMvccVersion() {
return cell.getMvccVersion();
}
@Override
public long getSequenceId() {
return cell.getSequenceId();
}
@Override
public byte[] getValueArray() {
return cell.getValueArray();
}
@Override
public int getValueOffset() {
return cell.getValueOffset();
}
@Override
public int getValueLength() {
return cell.getValueLength();
}
@Override
public byte[] getTagsArray() {
return cell.getTagsArray();
}
@Override
public int getTagsOffset() {
return cell.getTagsOffset();
}
@Override
public int getTagsLength() {
return cell.getTagsLength();
}
@Override
public byte[] getValue() {
return cell.getValue();
}
@Override
public byte[] getFamily() {
return cell.getFamily();
}
@Override
public byte[] getQualifier() {
return cell.getQualifier();
}
@Override
public byte[] getRow() {
return cell.getRow();
}
};
itr.set(newCell);
}
}
use of org.apache.hadoop.hbase.client.HTableInterface in project cdap by caskdata.
the class LastReplicateTimeObserver method start.
@Override
public void start(CoprocessorEnvironment env) throws IOException {
LOG.info("LastReplicateTimeObserver Start received.");
String tableName = StatusUtils.getReplicationStateTableName(env.getConfiguration());
HTableInterface htableInterface = env.getTable(TableName.valueOf(tableName));
hBase12CDH570TableUpdater = new HBase12CDH570TableUpdater(ReplicationConstants.ReplicationStatusTool.REPLICATE_TIME_ROW_TYPE, env.getConfiguration(), htableInterface);
}
use of org.apache.hadoop.hbase.client.HTableInterface in project cdap by caskdata.
the class LastWriteTimeObserver method start.
@Override
public void start(CoprocessorEnvironment env) throws IOException {
LOG.info("LastWriteTimeObserver Start received.");
String tableName = StatusUtils.getReplicationStateTableName(env.getConfiguration());
HTableInterface htableInterface = env.getTable(TableName.valueOf(tableName));
hBase12CDH570TableUpdater = new HBase12CDH570TableUpdater(ReplicationConstants.ReplicationStatusTool.WRITE_TIME_ROW_TYPE, env.getConfiguration(), htableInterface);
}
use of org.apache.hadoop.hbase.client.HTableInterface in project cdap by caskdata.
the class TopicMetadataCache method updateCache.
/**
* Called in unit tests and since the refresh thread might invoke cache update at the same time, we make this method
* synchronized. Aside from unit tests, synchronization is not required.
*/
@VisibleForTesting
public synchronized void updateCache() throws IOException {
HTableInterface metadataTable = null;
long now = System.currentTimeMillis();
long topicCount = 0;
try {
CConfiguration cConf = cConfReader.read();
if (cConf != null) {
this.cConf = cConf;
int metadataScanSize = cConf.getInt(Constants.MessagingSystem.HBASE_SCAN_CACHE_ROWS);
metadataCacheUpdateFreqInMillis = TimeUnit.SECONDS.toMillis(cConf.getLong(Constants.MessagingSystem.COPROCESSOR_METADATA_CACHE_UPDATE_FREQUENCY_SECONDS, MessagingUtils.Constants.METADATA_CACHE_UPDATE_FREQUENCY_SECS));
String tableName = cConf.get(Constants.MessagingSystem.METADATA_TABLE_NAME);
metadataTable = getMetadataTable(tableName);
if (metadataTable == null) {
LOG.warn(String.format("Could not find HTableInterface of metadataTable %s:%s. Cannot update metadata cache", hbaseNamespacePrefix, tableName));
return;
}
Map<ByteBuffer, Map<String, String>> newTopicCache = new HashMap<>();
Scan scan = scanBuilder.setCaching(metadataScanSize).build();
try (ResultScanner scanner = metadataTable.getScanner(scan)) {
for (Result result : scanner) {
ByteBuffer topicId = ByteBuffer.wrap(result.getRow());
byte[] value = result.getValue(COL_FAMILY, COL);
Map<String, String> properties = GSON.fromJson(Bytes.toString(value), MAP_TYPE);
String ttl = properties.get(MessagingUtils.Constants.TTL_KEY);
long ttlInMes = TimeUnit.SECONDS.toMillis(Long.parseLong(ttl));
properties.put(MessagingUtils.Constants.TTL_KEY, Long.toString(ttlInMes));
newTopicCache.put(topicId, properties);
topicCount++;
}
}
long elapsed = System.currentTimeMillis() - now;
this.metadataCache = newTopicCache;
this.lastUpdated = now;
LOG.debug(String.format("Updated consumer config cache with %d topics, took %d msec", topicCount, elapsed));
}
} finally {
if (metadataTable != null) {
try {
metadataTable.close();
} catch (IOException ex) {
LOG.error("Error closing table. ", ex);
}
}
}
}
use of org.apache.hadoop.hbase.client.HTableInterface in project bagheera by mozilla-metrics.
the class FlushResult method flush.
public void flush() throws IOException {
IOException lastException = null;
this.currentTimeMillis = System.currentTimeMillis();
int i;
for (i = 0; i < getRetryCount(); i++) {
HTableInterface table = hbasePool.getTable(tableName);
try {
table.setAutoFlush(false);
final TimerContext flushTimerContext = flushTimer.time();
try {
List<Row> rows = new ArrayList<Row>(batchSize);
while (!rowQueue.isEmpty() && rows.size() < batchSize) {
Row row = rowQueue.poll();
if (row != null) {
rows.add(row);
rowQueueSize.decrementAndGet();
}
}
try {
FlushResult result = flushTable(table, rows);
stored.mark(result.successfulPutCount);
storeFailed.mark(result.failedPutCount);
deleted.mark(result.successfulDeleteCount);
deleteFailed.mark(result.failedDeleteCount);
} catch (InterruptedException e) {
LOG.error("Error flushing batch of " + batchSize + " messages", e);
}
} finally {
flushTimerContext.stop();
if (table != null) {
table.close();
}
}
break;
} catch (IOException e) {
LOG.warn(String.format("Error in flush attempt %d of %d, clearing Region cache", (i + 1), getRetryCount()), e);
lastException = e;
// connection.clearRegionCache();
try {
Thread.sleep(getRetrySleepSeconds() * 1000);
} catch (InterruptedException e1) {
// wake up
LOG.info("woke up by interruption", e1);
}
}
}
if (i >= getRetryCount() && lastException != null) {
LOG.error("Error in final flush attempt, giving up.");
throw lastException;
}
LOG.debug("Flush finished");
}
Aggregations