Search in sources :

Example 56 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class IndexUtil method wrapResultUsingOffset.

public static void wrapResultUsingOffset(final RegionCoprocessorEnvironment environment, List<Cell> result, final int offset, ColumnReference[] dataColumns, TupleProjector tupleProjector, Region dataRegion, IndexMaintainer indexMaintainer, byte[][] viewConstants, ImmutableBytesWritable ptr) throws IOException {
    if (tupleProjector != null) {
        // Join back to data table here by issuing a local get projecting
        // all of the cq:cf from the KeyValueColumnExpression into the Get.
        Cell firstCell = result.get(0);
        byte[] indexRowKey = firstCell.getRowArray();
        ptr.set(indexRowKey, firstCell.getRowOffset() + offset, firstCell.getRowLength() - offset);
        byte[] dataRowKey = indexMaintainer.buildDataRowKey(ptr, viewConstants);
        Get get = new Get(dataRowKey);
        ImmutableStorageScheme storageScheme = indexMaintainer.getIndexStorageScheme();
        for (int i = 0; i < dataColumns.length; i++) {
            if (storageScheme == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS) {
                get.addFamily(dataColumns[i].getFamily());
            } else {
                get.addColumn(dataColumns[i].getFamily(), dataColumns[i].getQualifier());
            }
        }
        Result joinResult = null;
        if (dataRegion != null) {
            joinResult = dataRegion.get(get);
        } else {
            TableName dataTable = TableName.valueOf(MetaDataUtil.getLocalIndexUserTableName(environment.getRegion().getTableDesc().getNameAsString()));
            HTableInterface table = null;
            try {
                table = environment.getTable(dataTable);
                joinResult = table.get(get);
            } finally {
                if (table != null)
                    table.close();
            }
        }
        // at this point join result has data from the data table. We now need to take this result and
        // add it to the cells that we are returning. 
        // TODO: handle null case (but shouldn't happen)
        Tuple joinTuple = new ResultTuple(joinResult);
        // This will create a byte[] that captures all of the values from the data table
        byte[] value = tupleProjector.getSchema().toBytes(joinTuple, tupleProjector.getExpressions(), tupleProjector.getValueBitSet(), ptr);
        KeyValue keyValue = KeyValueUtil.newKeyValue(firstCell.getRowArray(), firstCell.getRowOffset(), firstCell.getRowLength(), VALUE_COLUMN_FAMILY, VALUE_COLUMN_QUALIFIER, firstCell.getTimestamp(), value, 0, value.length);
        result.add(keyValue);
    }
    ListIterator<Cell> itr = result.listIterator();
    while (itr.hasNext()) {
        final Cell cell = itr.next();
        // TODO: Create DelegateCell class instead
        Cell newCell = new Cell() {

            @Override
            public byte[] getRowArray() {
                return cell.getRowArray();
            }

            @Override
            public int getRowOffset() {
                return cell.getRowOffset() + offset;
            }

            @Override
            public short getRowLength() {
                return (short) (cell.getRowLength() - offset);
            }

            @Override
            public byte[] getFamilyArray() {
                return cell.getFamilyArray();
            }

            @Override
            public int getFamilyOffset() {
                return cell.getFamilyOffset();
            }

            @Override
            public byte getFamilyLength() {
                return cell.getFamilyLength();
            }

            @Override
            public byte[] getQualifierArray() {
                return cell.getQualifierArray();
            }

            @Override
            public int getQualifierOffset() {
                return cell.getQualifierOffset();
            }

            @Override
            public int getQualifierLength() {
                return cell.getQualifierLength();
            }

            @Override
            public long getTimestamp() {
                return cell.getTimestamp();
            }

            @Override
            public byte getTypeByte() {
                return cell.getTypeByte();
            }

            @Override
            public long getMvccVersion() {
                return cell.getMvccVersion();
            }

            @Override
            public long getSequenceId() {
                return cell.getSequenceId();
            }

            @Override
            public byte[] getValueArray() {
                return cell.getValueArray();
            }

            @Override
            public int getValueOffset() {
                return cell.getValueOffset();
            }

            @Override
            public int getValueLength() {
                return cell.getValueLength();
            }

            @Override
            public byte[] getTagsArray() {
                return cell.getTagsArray();
            }

            @Override
            public int getTagsOffset() {
                return cell.getTagsOffset();
            }

            @Override
            public int getTagsLength() {
                return cell.getTagsLength();
            }

            @Override
            public byte[] getValue() {
                return cell.getValue();
            }

            @Override
            public byte[] getFamily() {
                return cell.getFamily();
            }

            @Override
            public byte[] getQualifier() {
                return cell.getQualifier();
            }

            @Override
            public byte[] getRow() {
                return cell.getRow();
            }
        };
        itr.set(newCell);
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) KeyValue(org.apache.hadoop.hbase.KeyValue) Get(org.apache.hadoop.hbase.client.Get) ResultTuple(org.apache.phoenix.schema.tuple.ResultTuple) ImmutableStorageScheme(org.apache.phoenix.schema.PTable.ImmutableStorageScheme) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Cell(org.apache.hadoop.hbase.Cell) Tuple(org.apache.phoenix.schema.tuple.Tuple) ResultTuple(org.apache.phoenix.schema.tuple.ResultTuple) Result(org.apache.hadoop.hbase.client.Result) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)

Example 57 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project cdap by caskdata.

the class LastReplicateTimeObserver method start.

@Override
public void start(CoprocessorEnvironment env) throws IOException {
    LOG.info("LastReplicateTimeObserver Start received.");
    String tableName = StatusUtils.getReplicationStateTableName(env.getConfiguration());
    HTableInterface htableInterface = env.getTable(TableName.valueOf(tableName));
    hBase12CDH570TableUpdater = new HBase12CDH570TableUpdater(ReplicationConstants.ReplicationStatusTool.REPLICATE_TIME_ROW_TYPE, env.getConfiguration(), htableInterface);
}
Also used : HTableInterface(org.apache.hadoop.hbase.client.HTableInterface)

Example 58 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project cdap by caskdata.

the class LastWriteTimeObserver method start.

@Override
public void start(CoprocessorEnvironment env) throws IOException {
    LOG.info("LastWriteTimeObserver Start received.");
    String tableName = StatusUtils.getReplicationStateTableName(env.getConfiguration());
    HTableInterface htableInterface = env.getTable(TableName.valueOf(tableName));
    hBase12CDH570TableUpdater = new HBase12CDH570TableUpdater(ReplicationConstants.ReplicationStatusTool.WRITE_TIME_ROW_TYPE, env.getConfiguration(), htableInterface);
}
Also used : HTableInterface(org.apache.hadoop.hbase.client.HTableInterface)

Example 59 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project cdap by caskdata.

the class TopicMetadataCache method updateCache.

/**
   * Called in unit tests and since the refresh thread might invoke cache update at the same time, we make this method
   * synchronized. Aside from unit tests, synchronization is not required.
   */
@VisibleForTesting
public synchronized void updateCache() throws IOException {
    HTableInterface metadataTable = null;
    long now = System.currentTimeMillis();
    long topicCount = 0;
    try {
        CConfiguration cConf = cConfReader.read();
        if (cConf != null) {
            this.cConf = cConf;
            int metadataScanSize = cConf.getInt(Constants.MessagingSystem.HBASE_SCAN_CACHE_ROWS);
            metadataCacheUpdateFreqInMillis = TimeUnit.SECONDS.toMillis(cConf.getLong(Constants.MessagingSystem.COPROCESSOR_METADATA_CACHE_UPDATE_FREQUENCY_SECONDS, MessagingUtils.Constants.METADATA_CACHE_UPDATE_FREQUENCY_SECS));
            String tableName = cConf.get(Constants.MessagingSystem.METADATA_TABLE_NAME);
            metadataTable = getMetadataTable(tableName);
            if (metadataTable == null) {
                LOG.warn(String.format("Could not find HTableInterface of metadataTable %s:%s. Cannot update metadata cache", hbaseNamespacePrefix, tableName));
                return;
            }
            Map<ByteBuffer, Map<String, String>> newTopicCache = new HashMap<>();
            Scan scan = scanBuilder.setCaching(metadataScanSize).build();
            try (ResultScanner scanner = metadataTable.getScanner(scan)) {
                for (Result result : scanner) {
                    ByteBuffer topicId = ByteBuffer.wrap(result.getRow());
                    byte[] value = result.getValue(COL_FAMILY, COL);
                    Map<String, String> properties = GSON.fromJson(Bytes.toString(value), MAP_TYPE);
                    String ttl = properties.get(MessagingUtils.Constants.TTL_KEY);
                    long ttlInMes = TimeUnit.SECONDS.toMillis(Long.parseLong(ttl));
                    properties.put(MessagingUtils.Constants.TTL_KEY, Long.toString(ttlInMes));
                    newTopicCache.put(topicId, properties);
                    topicCount++;
                }
            }
            long elapsed = System.currentTimeMillis() - now;
            this.metadataCache = newTopicCache;
            this.lastUpdated = now;
            LOG.debug(String.format("Updated consumer config cache with %d topics, took %d msec", topicCount, elapsed));
        }
    } finally {
        if (metadataTable != null) {
            try {
                metadataTable.close();
            } catch (IOException ex) {
                LOG.error("Error closing table. ", ex);
            }
        }
    }
}
Also used : ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) HashMap(java.util.HashMap) IOException(java.io.IOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) CConfiguration(co.cask.cdap.common.conf.CConfiguration) ByteBuffer(java.nio.ByteBuffer) Result(org.apache.hadoop.hbase.client.Result) Scan(org.apache.hadoop.hbase.client.Scan) HashMap(java.util.HashMap) Map(java.util.Map) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 60 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project bagheera by mozilla-metrics.

the class FlushResult method flush.

public void flush() throws IOException {
    IOException lastException = null;
    this.currentTimeMillis = System.currentTimeMillis();
    int i;
    for (i = 0; i < getRetryCount(); i++) {
        HTableInterface table = hbasePool.getTable(tableName);
        try {
            table.setAutoFlush(false);
            final TimerContext flushTimerContext = flushTimer.time();
            try {
                List<Row> rows = new ArrayList<Row>(batchSize);
                while (!rowQueue.isEmpty() && rows.size() < batchSize) {
                    Row row = rowQueue.poll();
                    if (row != null) {
                        rows.add(row);
                        rowQueueSize.decrementAndGet();
                    }
                }
                try {
                    FlushResult result = flushTable(table, rows);
                    stored.mark(result.successfulPutCount);
                    storeFailed.mark(result.failedPutCount);
                    deleted.mark(result.successfulDeleteCount);
                    deleteFailed.mark(result.failedDeleteCount);
                } catch (InterruptedException e) {
                    LOG.error("Error flushing batch of " + batchSize + " messages", e);
                }
            } finally {
                flushTimerContext.stop();
                if (table != null) {
                    table.close();
                }
            }
            break;
        } catch (IOException e) {
            LOG.warn(String.format("Error in flush attempt %d of %d, clearing Region cache", (i + 1), getRetryCount()), e);
            lastException = e;
            // connection.clearRegionCache();
            try {
                Thread.sleep(getRetrySleepSeconds() * 1000);
            } catch (InterruptedException e1) {
                // wake up
                LOG.info("woke up by interruption", e1);
            }
        }
    }
    if (i >= getRetryCount() && lastException != null) {
        LOG.error("Error in final flush attempt, giving up.");
        throw lastException;
    }
    LOG.debug("Flush finished");
}
Also used : TimerContext(com.yammer.metrics.core.TimerContext) ArrayList(java.util.ArrayList) IOException(java.io.IOException) Row(org.apache.hadoop.hbase.client.Row) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface)

Aggregations

HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)122 Result (org.apache.hadoop.hbase.client.Result)43 Put (org.apache.hadoop.hbase.client.Put)42 IOException (java.io.IOException)38 ArrayList (java.util.ArrayList)27 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)23 Get (org.apache.hadoop.hbase.client.Get)21 Scan (org.apache.hadoop.hbase.client.Scan)21 Test (org.junit.Test)20 SQLException (java.sql.SQLException)19 HashMap (java.util.HashMap)17 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)17 Connection (java.sql.Connection)15 Delete (org.apache.hadoop.hbase.client.Delete)12 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)12 Mutation (org.apache.hadoop.hbase.client.Mutation)12 PhoenixIOException (org.apache.phoenix.exception.PhoenixIOException)11 ResultSet (java.sql.ResultSet)10 Map (java.util.Map)9 Configuration (org.apache.hadoop.conf.Configuration)9