Search in sources :

Example 56 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class TestIndexWriter method testShutdownInterruptsAsExpected.

/**
   * Test that if we get an interruption to to the thread while doing a batch (e.g. via shutdown),
   * that we correctly end the task
   * @throws Exception on failure
   */
@SuppressWarnings({ "unchecked", "deprecation" })
@Test
public void testShutdownInterruptsAsExpected() throws Exception {
    Stoppable stop = Mockito.mock(Stoppable.class);
    Abortable abort = new StubAbortable();
    // single thread factory so the older request gets queued
    ExecutorService exec = Executors.newFixedThreadPool(1);
    Map<ImmutableBytesPtr, HTableInterface> tables = new HashMap<ImmutableBytesPtr, HTableInterface>();
    RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class);
    Configuration conf = new Configuration();
    Mockito.when(e.getConfiguration()).thenReturn(conf);
    Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String, Object>());
    FakeTableFactory factory = new FakeTableFactory(tables);
    byte[] tableName = this.testName.getTableName();
    HTableInterface table = Mockito.mock(HTableInterface.class);
    Mockito.when(table.getTableName()).thenReturn(tableName);
    final CountDownLatch writeStartedLatch = new CountDownLatch(1);
    // latch never gets counted down, so we wait forever
    final CountDownLatch waitOnAbortedLatch = new CountDownLatch(1);
    Mockito.when(table.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            LOG.info("Write started");
            writeStartedLatch.countDown();
            // when we interrupt the thread for shutdown, we should see this throw an interrupt too
            try {
                waitOnAbortedLatch.await();
            } catch (InterruptedException e) {
                LOG.info("Correctly interrupted while writing!");
                throw e;
            }
            return null;
        }
    });
    // add the tables to the set of tables, so its returned to the writer
    tables.put(new ImmutableBytesPtr(tableName), table);
    // update a single table
    Put m = new Put(row);
    m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
    final List<Pair<Mutation, byte[]>> indexUpdates = new ArrayList<Pair<Mutation, byte[]>>();
    indexUpdates.add(new Pair<Mutation, byte[]>(m, tableName));
    // setup the writer
    ParallelWriterIndexCommitter committer = new ParallelWriterIndexCommitter(VersionInfo.getVersion());
    committer.setup(factory, exec, abort, stop, 2, e);
    KillServerOnFailurePolicy policy = new KillServerOnFailurePolicy();
    policy.setup(stop, abort);
    final IndexWriter writer = new IndexWriter(committer, policy);
    final boolean[] failedWrite = new boolean[] { false };
    Thread primaryWriter = new Thread() {

        @Override
        public void run() {
            try {
                writer.write(indexUpdates);
            } catch (IndexWriteException e) {
                failedWrite[0] = true;
            }
        }
    };
    primaryWriter.start();
    // wait for the write to start before intentionally shutdown the pool
    writeStartedLatch.await();
    writer.stop("Shutting down writer for test " + this.testName.getTableNameString());
    primaryWriter.join();
    assertTrue("Writer should have failed because of the stop we issued", failedWrite[0]);
}
Also used : StubAbortable(org.apache.phoenix.hbase.index.StubAbortable) Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ArrayList(java.util.ArrayList) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) Abortable(org.apache.hadoop.hbase.Abortable) StubAbortable(org.apache.phoenix.hbase.index.StubAbortable) Pair(org.apache.hadoop.hbase.util.Pair) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Stoppable(org.apache.hadoop.hbase.Stoppable) CountDownLatch(java.util.concurrent.CountDownLatch) Put(org.apache.hadoop.hbase.client.Put) IndexWriteException(org.apache.phoenix.hbase.index.exception.IndexWriteException) InvocationOnMock(org.mockito.invocation.InvocationOnMock) ExecutorService(java.util.concurrent.ExecutorService) Mutation(org.apache.hadoop.hbase.client.Mutation) Test(org.junit.Test)

Example 57 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class TestIndexUpdateManager method testMutationComparator.

@Test
public void testMutationComparator() throws Exception {
    IndexUpdateManager manager = new IndexUpdateManager(mockIndexMetaData);
    Comparator<Mutation> comparator = manager.COMPARATOR;
    Put p = new Put(row, 10);
    // lexigraphically earlier should sort earlier
    Put p1 = new Put(Bytes.toBytes("ro"), 10);
    assertTrue("lexigraphically later sorting first, should be earlier first.", comparator.compare(p, p1) > 0);
    p1 = new Put(Bytes.toBytes("row1"), 10);
    assertTrue("lexigraphically later sorting first, should be earlier first.", comparator.compare(p1, p) > 0);
    // larger ts sorts before smaller, for the same row
    p1 = new Put(row, 11);
    assertTrue("Smaller timestamp sorting first, should be larger first.", comparator.compare(p, p1) > 0);
    // still true, even for deletes
    Delete d = new Delete(row, 11);
    assertTrue("Smaller timestamp sorting first, should be larger first.", comparator.compare(p, d) > 0);
    // for the same row, t1, the delete should sort earlier
    d = new Delete(row, 10);
    assertTrue("Delete doesn't sort before put, for the same row and ts", comparator.compare(p, d) > 0);
    // but for different rows, we still respect the row sorting.
    d = new Delete(Bytes.toBytes("row1"), 10);
    assertTrue("Delete doesn't sort before put, for the same row and ts", comparator.compare(p, d) < 0);
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) IndexUpdateManager(org.apache.phoenix.hbase.index.covered.update.IndexUpdateManager) Mutation(org.apache.hadoop.hbase.client.Mutation) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Example 58 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class TestIndexUpdateManager method validate.

private void validate(IndexUpdateManager manager, List<Mutation> pending) {
    for (Pair<Mutation, byte[]> entry : manager.toMap()) {
        assertEquals("Table name didn't match for stored entry!", table, entry.getSecond());
        Mutation m = pending.remove(0);
        // test with == to match the exact entries, Mutation.equals just checks the row
        assertTrue("Didn't get the expected mutation! Expected: " + m + ", but got: " + entry.getFirst(), m == entry.getFirst());
    }
    assertTrue("Missing pending updates: " + pending, pending.isEmpty());
}
Also used : Mutation(org.apache.hadoop.hbase.client.Mutation)

Example 59 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class MutationTest method assertDurability.

private void assertDurability(Connection conn, Durability durability) throws SQLException {
    PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    Iterator<Pair<byte[], List<Mutation>>> it = pconn.getMutationState().toMutations();
    assertTrue(it.hasNext());
    while (it.hasNext()) {
        Pair<byte[], List<Mutation>> pair = it.next();
        assertFalse(pair.getSecond().isEmpty());
        for (Mutation m : pair.getSecond()) {
            assertEquals(durability, m.getDurability());
        }
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) List(java.util.List) Mutation(org.apache.hadoop.hbase.client.Mutation) Pair(org.apache.hadoop.hbase.util.Pair)

Example 60 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class SequenceRegionObserver method preIncrement.

/**
     * Use PreIncrement hook of BaseRegionObserver to overcome deficiencies in Increment
     * implementation (HBASE-10254):
     * 1) Lack of recognition and identification of when the key value to increment doesn't exist
     * 2) Lack of the ability to set the timestamp of the updated key value.
     * Works the same as existing region.increment(), except assumes there is a single column to
     * increment and uses Phoenix LONG encoding.
     * 
     * @since 3.0.0
     */
@Override
public Result preIncrement(final ObserverContext<RegionCoprocessorEnvironment> e, final Increment increment) throws IOException {
    RegionCoprocessorEnvironment env = e.getEnvironment();
    // We need to set this to prevent region.increment from being called
    e.bypass();
    e.complete();
    Region region = env.getRegion();
    byte[] row = increment.getRow();
    List<RowLock> locks = Lists.newArrayList();
    TimeRange tr = increment.getTimeRange();
    region.startRegionOperation();
    try {
        acquireLock(region, row, locks);
        try {
            long maxTimestamp = tr.getMax();
            boolean validateOnly = true;
            Get get = new Get(row);
            get.setTimeRange(tr.getMin(), tr.getMax());
            for (Map.Entry<byte[], List<Cell>> entry : increment.getFamilyCellMap().entrySet()) {
                byte[] cf = entry.getKey();
                for (Cell cq : entry.getValue()) {
                    long value = Bytes.toLong(cq.getValueArray(), cq.getValueOffset());
                    get.addColumn(cf, CellUtil.cloneQualifier(cq));
                    long cellTimestamp = cq.getTimestamp();
                    // on the Increment or any of its Cells.
                    if (cellTimestamp > 0 && cellTimestamp < maxTimestamp) {
                        maxTimestamp = cellTimestamp;
                        get.setTimeRange(MetaDataProtocol.MIN_TABLE_TIMESTAMP, maxTimestamp);
                    }
                    validateOnly &= (Sequence.ValueOp.VALIDATE_SEQUENCE.ordinal() == value);
                }
            }
            Result result = region.get(get);
            if (result.isEmpty()) {
                return getErrorResult(row, maxTimestamp, SQLExceptionCode.SEQUENCE_UNDEFINED.getErrorCode());
            }
            KeyValue currentValueKV = Sequence.getCurrentValueKV(result);
            KeyValue incrementByKV = Sequence.getIncrementByKV(result);
            KeyValue cacheSizeKV = Sequence.getCacheSizeKV(result);
            long currentValue = PLong.INSTANCE.getCodec().decodeLong(currentValueKV.getValueArray(), currentValueKV.getValueOffset(), SortOrder.getDefault());
            long incrementBy = PLong.INSTANCE.getCodec().decodeLong(incrementByKV.getValueArray(), incrementByKV.getValueOffset(), SortOrder.getDefault());
            long cacheSize = PLong.INSTANCE.getCodec().decodeLong(cacheSizeKV.getValueArray(), cacheSizeKV.getValueOffset(), SortOrder.getDefault());
            // Hold timestamp constant for sequences, so that clients always only see the latest
            // value regardless of when they connect.
            long timestamp = currentValueKV.getTimestamp();
            Put put = new Put(row, timestamp);
            int numIncrementKVs = increment.getFamilyCellMap().get(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES).size();
            // creates the list of KeyValues used for the Result that will be returned
            List<Cell> cells = Sequence.getCells(result, numIncrementKVs);
            //if client is 3.0/4.0 preserve the old behavior (older clients won't have newer columns present in the increment)
            if (numIncrementKVs != Sequence.NUM_SEQUENCE_KEY_VALUES) {
                currentValue += incrementBy * cacheSize;
                // Hold timestamp constant for sequences, so that clients always only see the latest value
                // regardless of when they connect.
                KeyValue newCurrentValueKV = createKeyValue(row, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, currentValue, timestamp);
                put.add(newCurrentValueKV);
                Sequence.replaceCurrentValueKV(cells, newCurrentValueKV);
            } else {
                KeyValue cycleKV = Sequence.getCycleKV(result);
                KeyValue limitReachedKV = Sequence.getLimitReachedKV(result);
                KeyValue minValueKV = Sequence.getMinValueKV(result);
                KeyValue maxValueKV = Sequence.getMaxValueKV(result);
                boolean increasingSeq = incrementBy > 0 ? true : false;
                // if the minValue, maxValue, cycle and limitReached is null this sequence has been upgraded from
                // a lower version. Set minValue, maxValue, cycle and limitReached to Long.MIN_VALUE, Long.MAX_VALUE, true and false
                // respectively in order to maintain existing behavior and also update the KeyValues on the server 
                boolean limitReached;
                if (limitReachedKV == null) {
                    limitReached = false;
                    KeyValue newLimitReachedKV = createKeyValue(row, PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, limitReached, timestamp);
                    put.add(newLimitReachedKV);
                    Sequence.replaceLimitReachedKV(cells, newLimitReachedKV);
                } else {
                    limitReached = (Boolean) PBoolean.INSTANCE.toObject(limitReachedKV.getValueArray(), limitReachedKV.getValueOffset(), limitReachedKV.getValueLength());
                }
                long minValue;
                if (minValueKV == null) {
                    minValue = Long.MIN_VALUE;
                    KeyValue newMinValueKV = createKeyValue(row, PhoenixDatabaseMetaData.MIN_VALUE_BYTES, minValue, timestamp);
                    put.add(newMinValueKV);
                    Sequence.replaceMinValueKV(cells, newMinValueKV);
                } else {
                    minValue = PLong.INSTANCE.getCodec().decodeLong(minValueKV.getValueArray(), minValueKV.getValueOffset(), SortOrder.getDefault());
                }
                long maxValue;
                if (maxValueKV == null) {
                    maxValue = Long.MAX_VALUE;
                    KeyValue newMaxValueKV = createKeyValue(row, PhoenixDatabaseMetaData.MAX_VALUE_BYTES, maxValue, timestamp);
                    put.add(newMaxValueKV);
                    Sequence.replaceMaxValueKV(cells, newMaxValueKV);
                } else {
                    maxValue = PLong.INSTANCE.getCodec().decodeLong(maxValueKV.getValueArray(), maxValueKV.getValueOffset(), SortOrder.getDefault());
                }
                boolean cycle;
                if (cycleKV == null) {
                    cycle = false;
                    KeyValue newCycleKV = createKeyValue(row, PhoenixDatabaseMetaData.CYCLE_FLAG_BYTES, cycle, timestamp);
                    put.add(newCycleKV);
                    Sequence.replaceCycleValueKV(cells, newCycleKV);
                } else {
                    cycle = (Boolean) PBoolean.INSTANCE.toObject(cycleKV.getValueArray(), cycleKV.getValueOffset(), cycleKV.getValueLength());
                }
                long numSlotsToAllocate = calculateNumSlotsToAllocate(increment);
                // We don't support Bulk Allocations on sequences that have the CYCLE flag set to true
                if (cycle && !SequenceUtil.isCycleAllowed(numSlotsToAllocate)) {
                    return getErrorResult(row, maxTimestamp, SQLExceptionCode.NUM_SEQ_TO_ALLOCATE_NOT_SUPPORTED.getErrorCode());
                }
                // Bulk Allocations are expressed by NEXT <n> VALUES FOR
                if (SequenceUtil.isBulkAllocation(numSlotsToAllocate)) {
                    if (SequenceUtil.checkIfLimitReached(currentValue, minValue, maxValue, incrementBy, cacheSize, numSlotsToAllocate)) {
                        // all the slots requested.
                        return getErrorResult(row, maxTimestamp, SequenceUtil.getLimitReachedErrorCode(increasingSeq).getErrorCode());
                    }
                }
                if (validateOnly) {
                    return result;
                }
                // return if we have run out of sequence values 
                if (limitReached) {
                    if (cycle) {
                        // reset currentValue of the Sequence row to minValue/maxValue
                        currentValue = increasingSeq ? minValue : maxValue;
                    } else {
                        return getErrorResult(row, maxTimestamp, SequenceUtil.getLimitReachedErrorCode(increasingSeq).getErrorCode());
                    }
                }
                // check if the limit was reached
                limitReached = SequenceUtil.checkIfLimitReached(currentValue, minValue, maxValue, incrementBy, cacheSize, numSlotsToAllocate);
                // update currentValue
                currentValue += incrementBy * (SequenceUtil.isBulkAllocation(numSlotsToAllocate) ? numSlotsToAllocate : cacheSize);
                // update the currentValue of the Result row
                KeyValue newCurrentValueKV = createKeyValue(row, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, currentValue, timestamp);
                Sequence.replaceCurrentValueKV(cells, newCurrentValueKV);
                put.add(newCurrentValueKV);
                // set the LIMIT_REACHED column to true, so that no new values can be used
                KeyValue newLimitReachedKV = createKeyValue(row, PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, limitReached, timestamp);
                put.add(newLimitReachedKV);
            }
            // update the KeyValues on the server
            Mutation[] mutations = new Mutation[] { put };
            region.batchMutate(mutations, HConstants.NO_NONCE, HConstants.NO_NONCE);
            // return a Result with the updated KeyValues
            return Result.create(cells);
        } finally {
            region.releaseRowLocks(locks);
        }
    } catch (Throwable t) {
        ServerUtil.throwIOException("Increment of sequence " + Bytes.toStringBinary(row), t);
        // Impossible
        return null;
    } finally {
        region.closeRegionOperation();
    }
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) TimeRange(org.apache.hadoop.hbase.io.TimeRange) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) Get(org.apache.hadoop.hbase.client.Get) Region(org.apache.hadoop.hbase.regionserver.Region) List(java.util.List) Mutation(org.apache.hadoop.hbase.client.Mutation) Map(java.util.Map) Cell(org.apache.hadoop.hbase.Cell) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock)

Aggregations

Mutation (org.apache.hadoop.hbase.client.Mutation)139 Put (org.apache.hadoop.hbase.client.Put)53 ArrayList (java.util.ArrayList)46 IOException (java.io.IOException)35 Delete (org.apache.hadoop.hbase.client.Delete)32 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)31 List (java.util.List)28 Cell (org.apache.hadoop.hbase.Cell)25 Pair (org.apache.hadoop.hbase.util.Pair)23 MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)23 HashMap (java.util.HashMap)19 PTable (org.apache.phoenix.schema.PTable)18 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)17 MetaDataResponse (org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse)15 Region (org.apache.hadoop.hbase.regionserver.Region)14 RowLock (org.apache.hadoop.hbase.regionserver.Region.RowLock)14 Test (org.junit.Test)14 MutationCode (org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode)13 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)12 MutationProto (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto)12