Search in sources :

Example 81 with Put

use of org.apache.hadoop.hbase.client.Put in project phoenix by apache.

the class EndToEndCoveredIndexingIT method testDeleteColumnsInThePast.

/**
     * Covering deletes (via {@link Delete#deleteColumns}) cover everything back in time from the
     * given time. If its modifying the latest state, we don't need to do anything but add deletes. If
     * its modifying back in time state, we need to just fix up the surrounding elements as anything
     * else ahead of it will be fixed up by later updates.
     * <p>
     * similar to {@link #testMultipleTimestampsInSingleDelete()}, but with covering deletes.
     * @throws Exception on failure
     */
@Test
public void testDeleteColumnsInThePast() throws Exception {
    HTable primary = createSetupTables(fam1);
    // do a put to the primary table
    Put p = new Put(row1);
    long ts1 = 10, ts2 = 11, ts3 = 12;
    p.add(FAM, indexed_qualifer, ts1, value1);
    p.add(FAM2, regular_qualifer, ts2, value3);
    primary.put(p);
    primary.flushCommits();
    // now build up a delete with a couple different timestamps
    Delete d = new Delete(row1);
    // these deletes don't need to match the exact ts because they cover everything earlier
    d.deleteColumns(FAM, indexed_qualifer, ts2);
    d.deleteColumns(FAM2, regular_qualifer, ts3);
    primary.delete(d);
    // read the index for the expected values
    HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());
    // build the expected kvs
    List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
    pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
    // check the first entry at ts1
    List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs);
    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, value1);
    // delete at ts2 changes what the put would insert
    pairs.clear();
    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col1));
    pairs.add(new Pair<byte[], CoveredColumn>(value3, col2));
    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs);
    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1);
    // final delete clears out everything
    expected = Collections.emptyList();
    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts3, value1);
    // cleanup
    closeAndCleanupTables(primary, index1);
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) KeyValue(org.apache.hadoop.hbase.KeyValue) ArrayList(java.util.ArrayList) HTable(org.apache.hadoop.hbase.client.HTable) Put(org.apache.hadoop.hbase.client.Put) Pair(org.apache.hadoop.hbase.util.Pair) Test(org.junit.Test) NeedsOwnMiniClusterTest(org.apache.phoenix.end2end.NeedsOwnMiniClusterTest)

Example 82 with Put

use of org.apache.hadoop.hbase.client.Put in project phoenix by apache.

the class CoveredColumnIndexCodec method getIndexKeyValueForTesting.

/**
     * Essentially a short-cut from building a {@link Put}.
     * 
     * @param pk
     *            row key
     * @param timestamp
     *            timestamp of all the keyvalues
     * @param values
     *            expected value--column pair
     * @return a keyvalues that the index contains for a given row at a timestamp with the given value -- column pairs.
     */
public static List<KeyValue> getIndexKeyValueForTesting(byte[] pk, long timestamp, List<Pair<byte[], CoveredColumn>> values) {
    int length = 0;
    List<ColumnEntry> expected = new ArrayList<ColumnEntry>(values.size());
    for (Pair<byte[], CoveredColumn> value : values) {
        ColumnEntry entry = new ColumnEntry(value.getFirst(), value.getSecond());
        length += value.getFirst().length;
        expected.add(entry);
    }
    byte[] rowKey = CoveredColumnIndexCodec.composeRowKey(pk, length, expected);
    Put p = new Put(rowKey, timestamp);
    CoveredColumnIndexCodec.addColumnsToPut(p, expected);
    List<KeyValue> kvs = new ArrayList<KeyValue>();
    for (Entry<byte[], List<KeyValue>> entry : p.getFamilyMap().entrySet()) {
        kvs.addAll(entry.getValue());
    }
    return kvs;
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) ArrayList(java.util.ArrayList) ArrayList(java.util.ArrayList) List(java.util.List) Put(org.apache.hadoop.hbase.client.Put)

Example 83 with Put

use of org.apache.hadoop.hbase.client.Put in project phoenix by apache.

the class IndexUpdateManager method toString.

@Override
public String toString() {
    StringBuffer sb = new StringBuffer("Pending Index Updates:\n");
    for (Entry<ImmutableBytesPtr, Collection<Mutation>> entry : map.entrySet()) {
        String tableName = Bytes.toStringBinary(entry.getKey().get());
        sb.append("   Table: '" + tableName + "'\n");
        for (Mutation m : entry.getValue()) {
            sb.append("\t");
            if (shouldBeRemoved(m)) {
                sb.append("[REMOVED]");
            }
            sb.append(m.getClass().getSimpleName() + ":" + ((m instanceof Put) ? m.getTimeStamp() + " " : ""));
            sb.append(" row=" + Bytes.toStringBinary(m.getRow()));
            sb.append("\n");
            if (m.getFamilyCellMap().isEmpty()) {
                sb.append("\t\t=== EMPTY ===\n");
            }
            for (List<Cell> kvs : m.getFamilyCellMap().values()) {
                for (Cell kv : kvs) {
                    sb.append("\t\t" + kv.toString() + "/value=" + Bytes.toStringBinary(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()));
                    sb.append("\n");
                }
            }
        }
    }
    return sb.toString();
}
Also used : ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Collection(java.util.Collection) Mutation(org.apache.hadoop.hbase.client.Mutation) Cell(org.apache.hadoop.hbase.Cell) Put(org.apache.hadoop.hbase.client.Put)

Example 84 with Put

use of org.apache.hadoop.hbase.client.Put in project phoenix by apache.

the class TestIndexWriter method testFailureOnRunningUpdateAbortsPending.

/**
   * Index updates can potentially be queued up if there aren't enough writer threads. If a running
   * index write fails, then we should early exit the pending indexupdate, when it comes up (if the
   * pool isn't already shutdown).
   * <p>
   * This test is a little bit racey - we could actually have the failure of the first task before
   * the third task is even submitted. However, we should never see the third task attempt to make
   * the batch write, so we should never see a failure here.
   * @throws Exception on failure
   */
@SuppressWarnings({ "unchecked", "deprecation" })
@Test
public void testFailureOnRunningUpdateAbortsPending() throws Exception {
    Abortable abort = new StubAbortable();
    Stoppable stop = Mockito.mock(Stoppable.class);
    // single thread factory so the older request gets queued
    ExecutorService exec = Executors.newFixedThreadPool(3);
    Map<ImmutableBytesPtr, HTableInterface> tables = new HashMap<ImmutableBytesPtr, HTableInterface>();
    FakeTableFactory factory = new FakeTableFactory(tables);
    // updates to two different tables
    byte[] tableName = Bytes.add(this.testName.getTableName(), new byte[] { 1, 2, 3, 4 });
    Put m = new Put(row);
    m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
    // this will sort after the first tablename
    byte[] tableName2 = this.testName.getTableName();
    List<Pair<Mutation, byte[]>> indexUpdates = new ArrayList<Pair<Mutation, byte[]>>();
    indexUpdates.add(new Pair<Mutation, byte[]>(m, tableName));
    indexUpdates.add(new Pair<Mutation, byte[]>(m, tableName2));
    indexUpdates.add(new Pair<Mutation, byte[]>(m, tableName2));
    // first table will fail
    HTableInterface table = Mockito.mock(HTableInterface.class);
    Mockito.when(table.batch(Mockito.anyList())).thenThrow(new IOException("Intentional IOException for failed first write."));
    Mockito.when(table.getTableName()).thenReturn(tableName);
    RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class);
    Configuration conf = new Configuration();
    Mockito.when(e.getConfiguration()).thenReturn(conf);
    Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String, Object>());
    // second table just blocks to make sure that the abort propagates to the third task
    final CountDownLatch waitOnAbortedLatch = new CountDownLatch(1);
    final boolean[] failed = new boolean[] { false };
    HTableInterface table2 = Mockito.mock(HTableInterface.class);
    Mockito.when(table2.getTableName()).thenReturn(tableName2);
    Mockito.when(table2.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            waitOnAbortedLatch.await();
            return null;
        }
    }).thenAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            failed[0] = true;
            throw new RuntimeException("Unexpected exception - second index table shouldn't have been written to");
        }
    });
    // add the tables to the set of tables, so its returned to the writer
    tables.put(new ImmutableBytesPtr(tableName), table);
    tables.put(new ImmutableBytesPtr(tableName2), table2);
    ParallelWriterIndexCommitter committer = new ParallelWriterIndexCommitter(VersionInfo.getVersion());
    committer.setup(factory, exec, abort, stop, 2, e);
    KillServerOnFailurePolicy policy = new KillServerOnFailurePolicy();
    policy.setup(stop, abort);
    IndexWriter writer = new IndexWriter(committer, policy);
    try {
        writer.write(indexUpdates);
        fail("Should not have successfully completed all index writes");
    } catch (SingleIndexWriteFailureException s) {
        LOG.info("Correctly got a failure to reach the index", s);
        // should have correctly gotten the correct abort, so let the next task execute
        waitOnAbortedLatch.countDown();
    }
    assertFalse("Third set of index writes never have been attempted - should have seen the abort before done!", failed[0]);
    writer.stop(this.testName.getTableNameString() + " finished");
    assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
    assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
}
Also used : StubAbortable(org.apache.phoenix.hbase.index.StubAbortable) Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ArrayList(java.util.ArrayList) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) Abortable(org.apache.hadoop.hbase.Abortable) StubAbortable(org.apache.phoenix.hbase.index.StubAbortable) Pair(org.apache.hadoop.hbase.util.Pair) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Stoppable(org.apache.hadoop.hbase.Stoppable) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) Put(org.apache.hadoop.hbase.client.Put) Answer(org.mockito.stubbing.Answer) InvocationOnMock(org.mockito.invocation.InvocationOnMock) SingleIndexWriteFailureException(org.apache.phoenix.hbase.index.exception.SingleIndexWriteFailureException) ExecutorService(java.util.concurrent.ExecutorService) Mutation(org.apache.hadoop.hbase.client.Mutation) Test(org.junit.Test)

Example 85 with Put

use of org.apache.hadoop.hbase.client.Put in project phoenix by apache.

the class UpgradeIT method putUnlockKVInSysMutex.

private void putUnlockKVInSysMutex(byte[] row) throws Exception {
    try (Connection conn = getConnection(false, null)) {
        ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
        try (HTableInterface sysMutexTable = services.getTable(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME_BYTES)) {
            byte[] family = PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES;
            byte[] qualifier = UPGRADE_MUTEX;
            Put put = new Put(row);
            put.add(family, qualifier, UPGRADE_MUTEX_UNLOCKED);
            sysMutexTable.put(put);
            sysMutexTable.flushCommits();
        }
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) DelegateConnectionQueryServices(org.apache.phoenix.query.DelegateConnectionQueryServices) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) Put(org.apache.hadoop.hbase.client.Put)

Aggregations

Put (org.apache.hadoop.hbase.client.Put)1416 Test (org.junit.Test)672 Table (org.apache.hadoop.hbase.client.Table)489 ArrayList (java.util.ArrayList)317 Result (org.apache.hadoop.hbase.client.Result)279 TableName (org.apache.hadoop.hbase.TableName)257 IOException (java.io.IOException)241 Delete (org.apache.hadoop.hbase.client.Delete)225 Scan (org.apache.hadoop.hbase.client.Scan)222 Cell (org.apache.hadoop.hbase.Cell)200 Get (org.apache.hadoop.hbase.client.Get)196 Configuration (org.apache.hadoop.conf.Configuration)148 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)139 Connection (org.apache.hadoop.hbase.client.Connection)122 KeyValue (org.apache.hadoop.hbase.KeyValue)112 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)110 Admin (org.apache.hadoop.hbase.client.Admin)89 List (java.util.List)83 Mutation (org.apache.hadoop.hbase.client.Mutation)82 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)80