Search in sources :

Example 36 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class MetaDataEndpointImpl method doGetTable.

private PTable doGetTable(byte[] key, long clientTimeStamp, RowLock rowLock) throws IOException, SQLException {
    ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
    Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
    // Ask Lars about the expense of this call - if we don't take the lock, we still won't get
    // partial results
    // get the co-processor environment
    // TODO: check that key is within region.getStartKey() and region.getEndKey()
    // and return special code to force client to lookup region from meta.
    Region region = env.getRegion();
    /*
         * Lock directly on key, though it may be an index table. This will just prevent a table
         * from getting rebuilt too often.
         */
    final boolean wasLocked = (rowLock != null);
    boolean blockWriteRebuildIndex = env.getConfiguration().getBoolean(QueryServices.INDEX_FAILURE_BLOCK_WRITE, QueryServicesOptions.DEFAULT_INDEX_FAILURE_BLOCK_WRITE);
    if (!wasLocked) {
        rowLock = region.getRowLock(key, false);
        if (rowLock == null) {
            throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key));
        }
    }
    try {
        PTable table = (PTable) metaDataCache.getIfPresent(cacheKey);
        // can safely not call this, since we only allow modifications to the latest.
        if (table != null && table.getTimeStamp() < clientTimeStamp) {
            // Table on client is up-to-date with table on server, so just return
            if (isTableDeleted(table)) {
                return null;
            }
            return table;
        }
        // Try cache again in case we were waiting on a lock
        table = (PTable) metaDataCache.getIfPresent(cacheKey);
        // can safely not call this, since we only allow modifications to the latest.
        if (table != null && table.getTimeStamp() < clientTimeStamp) {
            // Table on client is up-to-date with table on server, so just return
            if (isTableDeleted(table)) {
                return null;
            }
            return table;
        }
        // Query for the latest table first, since it's not cached
        table = buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP);
        if ((table != null && table.getTimeStamp() < clientTimeStamp) || (blockWriteRebuildIndex && table.getIndexDisableTimestamp() > 0)) {
            return table;
        }
        // Otherwise, query for an older version of the table - it won't be cached
        return buildTable(key, cacheKey, region, clientTimeStamp);
    } finally {
        if (!wasLocked)
            rowLock.release();
    }
}
Also used : PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Region(org.apache.hadoop.hbase.regionserver.Region) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) PTable(org.apache.phoenix.schema.PTable)

Example 37 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class MetaDataEndpointImpl method findChildViews.

private TableViewFinder findChildViews(Region region, byte[] tenantId, PTable table) throws IOException, SQLException {
    byte[] tableKey = SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA_BYTES, PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE_BYTES);
    ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(tableKey);
    PTable systemCatalog = loadTable(env, tableKey, cacheKey, MIN_SYSTEM_TABLE_TIMESTAMP, HConstants.LATEST_TIMESTAMP);
    if (systemCatalog.getTimeStamp() < MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0) {
        return findChildViews_deprecated(region, tenantId, table, PHYSICAL_TABLE_BYTES);
    } else {
        return findChildViews_4_11(region, tenantId, table.getSchemaName().getBytes(), table.getTableName().getBytes());
    }
}
Also used : ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PTable(org.apache.phoenix.schema.PTable)

Example 38 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class Indexer method preBatchMutateWithExceptions.

public void preBatchMutateWithExceptions(ObserverContext<RegionCoprocessorEnvironment> c, MiniBatchOperationInProgress<Mutation> miniBatchOp) throws Throwable {
    // first group all the updates for a single row into a single update to be processed
    Map<ImmutableBytesPtr, MultiMutation> mutations = new HashMap<ImmutableBytesPtr, MultiMutation>();
    Durability defaultDurability = Durability.SYNC_WAL;
    if (c.getEnvironment().getRegion() != null) {
        defaultDurability = c.getEnvironment().getRegion().getTableDesc().getDurability();
        defaultDurability = (defaultDurability == Durability.USE_DEFAULT) ? Durability.SYNC_WAL : defaultDurability;
    }
    Durability durability = Durability.SKIP_WAL;
    for (int i = 0; i < miniBatchOp.size(); i++) {
        Mutation m = miniBatchOp.getOperation(i);
        if (this.builder.isAtomicOp(m)) {
            miniBatchOp.setOperationStatus(i, SUCCESS);
            continue;
        }
        // way optimization go though.
        if (this.builder.isEnabled(m)) {
            Durability effectiveDurablity = (m.getDurability() == Durability.USE_DEFAULT) ? defaultDurability : m.getDurability();
            if (effectiveDurablity.ordinal() > durability.ordinal()) {
                durability = effectiveDurablity;
            }
            // add the mutation to the batch set
            ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow());
            MultiMutation stored = mutations.get(row);
            // we haven't seen this row before, so add it
            if (stored == null) {
                stored = new MultiMutation(row);
                mutations.put(row, stored);
            }
            stored.addAll(m);
        }
    }
    // early exit if it turns out we don't have any edits
    if (mutations.isEmpty()) {
        return;
    }
    // dump all the index updates into a single WAL. They will get combined in the end anyways, so
    // don't worry which one we get
    WALEdit edit = miniBatchOp.getWalEdit(0);
    if (edit == null) {
        edit = new WALEdit();
        miniBatchOp.setWalEdit(0, edit);
    }
    // get the current span, or just use a null-span to avoid a bunch of if statements
    try (TraceScope scope = Trace.startSpan("Starting to build index updates")) {
        Span current = scope.getSpan();
        if (current == null) {
            current = NullSpan.INSTANCE;
        }
        // get the index updates for all elements in this batch
        Collection<Pair<Mutation, byte[]>> indexUpdates = this.builder.getIndexUpdate(miniBatchOp, mutations.values());
        current.addTimelineAnnotation("Built index updates, doing preStep");
        TracingUtils.addAnnotation(current, "index update count", indexUpdates.size());
        byte[] tableName = c.getEnvironment().getRegion().getTableDesc().getTableName().getName();
        Iterator<Pair<Mutation, byte[]>> indexUpdatesItr = indexUpdates.iterator();
        List<Mutation> localUpdates = new ArrayList<Mutation>(indexUpdates.size());
        while (indexUpdatesItr.hasNext()) {
            Pair<Mutation, byte[]> next = indexUpdatesItr.next();
            if (Bytes.compareTo(next.getSecond(), tableName) == 0) {
                localUpdates.add(next.getFirst());
                indexUpdatesItr.remove();
            }
        }
        if (!localUpdates.isEmpty()) {
            miniBatchOp.addOperationsFromCP(0, localUpdates.toArray(new Mutation[localUpdates.size()]));
        }
        // write them, either to WAL or the index tables
        doPre(indexUpdates, edit, durability);
    }
}
Also used : HashMap(java.util.HashMap) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) TraceScope(org.apache.htrace.TraceScope) ArrayList(java.util.ArrayList) Durability(org.apache.hadoop.hbase.client.Durability) Span(org.apache.htrace.Span) NullSpan(org.apache.phoenix.trace.util.NullSpan) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) Mutation(org.apache.hadoop.hbase.client.Mutation) Pair(org.apache.hadoop.hbase.util.Pair)

Example 39 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class ColumnProjectionFilter method readFields.

@Override
public void readFields(DataInput input) throws IOException {
    this.emptyCFName = WritableUtils.readCompressedByteArray(input);
    int familyMapSize = WritableUtils.readVInt(input);
    assert familyMapSize > 0;
    columnsTracker = new TreeMap<ImmutableBytesPtr, NavigableSet<ImmutableBytesPtr>>();
    while (familyMapSize > 0) {
        byte[] cf = WritableUtils.readCompressedByteArray(input);
        int qualifiersSize = WritableUtils.readVInt(input);
        NavigableSet<ImmutableBytesPtr> qualifiers = null;
        if (qualifiersSize > 0) {
            qualifiers = new TreeSet<ImmutableBytesPtr>();
            while (qualifiersSize > 0) {
                qualifiers.add(new ImmutableBytesPtr(WritableUtils.readCompressedByteArray(input)));
                qualifiersSize--;
            }
        }
        columnsTracker.put(new ImmutableBytesPtr(cf), qualifiers);
        familyMapSize--;
    }
    int conditionOnlyCfsSize = WritableUtils.readVInt(input);
    usesEncodedColumnNames = conditionOnlyCfsSize > 0;
    emptyKVQualifier = EncodedColumnsUtil.getEmptyKeyValueInfo(usesEncodedColumnNames).getFirst();
    // restore to the actual value.
    conditionOnlyCfsSize = Math.abs(conditionOnlyCfsSize) - 1;
    this.conditionOnlyCfs = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
    while (conditionOnlyCfsSize > 0) {
        this.conditionOnlyCfs.add(WritableUtils.readCompressedByteArray(input));
        conditionOnlyCfsSize--;
    }
}
Also used : NavigableSet(java.util.NavigableSet) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)

Example 40 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class ColumnProjectionFilter method write.

@Override
public void write(DataOutput output) throws IOException {
    WritableUtils.writeCompressedByteArray(output, this.emptyCFName);
    WritableUtils.writeVInt(output, this.columnsTracker.size());
    for (Entry<ImmutableBytesPtr, NavigableSet<ImmutableBytesPtr>> entry : this.columnsTracker.entrySet()) {
        // write family name
        WritableUtils.writeCompressedByteArray(output, entry.getKey().copyBytes());
        int qaulsSize = entry.getValue() == null ? 0 : entry.getValue().size();
        WritableUtils.writeVInt(output, qaulsSize);
        if (qaulsSize > 0) {
            for (ImmutableBytesPtr cq : entry.getValue()) {
                // write qualifier name
                WritableUtils.writeCompressedByteArray(output, cq.copyBytes());
            }
        }
    }
    // Encode usesEncodedColumnNames in conditionOnlyCfs size.
    WritableUtils.writeVInt(output, (this.conditionOnlyCfs.size() + 1) * (usesEncodedColumnNames ? 1 : -1));
    for (byte[] f : this.conditionOnlyCfs) {
        WritableUtils.writeCompressedByteArray(output, f);
    }
}
Also used : NavigableSet(java.util.NavigableSet) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)

Aggregations

ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)120 Mutation (org.apache.hadoop.hbase.client.Mutation)31 PTable (org.apache.phoenix.schema.PTable)28 ArrayList (java.util.ArrayList)27 Region (org.apache.hadoop.hbase.regionserver.Region)22 PMetaDataEntity (org.apache.phoenix.schema.PMetaDataEntity)22 Test (org.junit.Test)21 Cell (org.apache.hadoop.hbase.Cell)20 Put (org.apache.hadoop.hbase.client.Put)18 List (java.util.List)15 Scan (org.apache.hadoop.hbase.client.Scan)15 Pair (org.apache.hadoop.hbase.util.Pair)15 IOException (java.io.IOException)14 Expression (org.apache.phoenix.expression.Expression)14 PColumn (org.apache.phoenix.schema.PColumn)14 RowLock (org.apache.hadoop.hbase.regionserver.Region.RowLock)13 PSmallint (org.apache.phoenix.schema.types.PSmallint)12 HashMap (java.util.HashMap)11 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)11 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)11