use of org.apache.hadoop.hbase.client.Put in project phoenix by apache.
the class EndToEndCoveredIndexingIT method testDeleteColumnsInThePast.
/**
* Covering deletes (via {@link Delete#deleteColumns}) cover everything back in time from the
* given time. If its modifying the latest state, we don't need to do anything but add deletes. If
* its modifying back in time state, we need to just fix up the surrounding elements as anything
* else ahead of it will be fixed up by later updates.
* <p>
* similar to {@link #testMultipleTimestampsInSingleDelete()}, but with covering deletes.
* @throws Exception on failure
*/
@Test
public void testDeleteColumnsInThePast() throws Exception {
HTable primary = createSetupTables(fam1);
// do a put to the primary table
Put p = new Put(row1);
long ts1 = 10, ts2 = 11, ts3 = 12;
p.add(FAM, indexed_qualifer, ts1, value1);
p.add(FAM2, regular_qualifer, ts2, value3);
primary.put(p);
primary.flushCommits();
// now build up a delete with a couple different timestamps
Delete d = new Delete(row1);
// these deletes don't need to match the exact ts because they cover everything earlier
d.deleteColumns(FAM, indexed_qualifer, ts2);
d.deleteColumns(FAM2, regular_qualifer, ts3);
primary.delete(d);
// read the index for the expected values
HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());
// build the expected kvs
List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
// check the first entry at ts1
List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs);
IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, value1);
// delete at ts2 changes what the put would insert
pairs.clear();
pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col1));
pairs.add(new Pair<byte[], CoveredColumn>(value3, col2));
expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs);
IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1);
// final delete clears out everything
expected = Collections.emptyList();
IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts3, value1);
// cleanup
closeAndCleanupTables(primary, index1);
}
use of org.apache.hadoop.hbase.client.Put in project phoenix by apache.
the class CoveredColumnIndexCodec method getIndexKeyValueForTesting.
/**
* Essentially a short-cut from building a {@link Put}.
*
* @param pk
* row key
* @param timestamp
* timestamp of all the keyvalues
* @param values
* expected value--column pair
* @return a keyvalues that the index contains for a given row at a timestamp with the given value -- column pairs.
*/
public static List<KeyValue> getIndexKeyValueForTesting(byte[] pk, long timestamp, List<Pair<byte[], CoveredColumn>> values) {
int length = 0;
List<ColumnEntry> expected = new ArrayList<ColumnEntry>(values.size());
for (Pair<byte[], CoveredColumn> value : values) {
ColumnEntry entry = new ColumnEntry(value.getFirst(), value.getSecond());
length += value.getFirst().length;
expected.add(entry);
}
byte[] rowKey = CoveredColumnIndexCodec.composeRowKey(pk, length, expected);
Put p = new Put(rowKey, timestamp);
CoveredColumnIndexCodec.addColumnsToPut(p, expected);
List<KeyValue> kvs = new ArrayList<KeyValue>();
for (Entry<byte[], List<KeyValue>> entry : p.getFamilyMap().entrySet()) {
kvs.addAll(entry.getValue());
}
return kvs;
}
use of org.apache.hadoop.hbase.client.Put in project phoenix by apache.
the class IndexUpdateManager method toString.
@Override
public String toString() {
StringBuffer sb = new StringBuffer("Pending Index Updates:\n");
for (Entry<ImmutableBytesPtr, Collection<Mutation>> entry : map.entrySet()) {
String tableName = Bytes.toStringBinary(entry.getKey().get());
sb.append(" Table: '" + tableName + "'\n");
for (Mutation m : entry.getValue()) {
sb.append("\t");
if (shouldBeRemoved(m)) {
sb.append("[REMOVED]");
}
sb.append(m.getClass().getSimpleName() + ":" + ((m instanceof Put) ? m.getTimeStamp() + " " : ""));
sb.append(" row=" + Bytes.toStringBinary(m.getRow()));
sb.append("\n");
if (m.getFamilyCellMap().isEmpty()) {
sb.append("\t\t=== EMPTY ===\n");
}
for (List<Cell> kvs : m.getFamilyCellMap().values()) {
for (Cell kv : kvs) {
sb.append("\t\t" + kv.toString() + "/value=" + Bytes.toStringBinary(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()));
sb.append("\n");
}
}
}
}
return sb.toString();
}
use of org.apache.hadoop.hbase.client.Put in project phoenix by apache.
the class TestIndexWriter method testFailureOnRunningUpdateAbortsPending.
/**
* Index updates can potentially be queued up if there aren't enough writer threads. If a running
* index write fails, then we should early exit the pending indexupdate, when it comes up (if the
* pool isn't already shutdown).
* <p>
* This test is a little bit racey - we could actually have the failure of the first task before
* the third task is even submitted. However, we should never see the third task attempt to make
* the batch write, so we should never see a failure here.
* @throws Exception on failure
*/
@SuppressWarnings({ "unchecked", "deprecation" })
@Test
public void testFailureOnRunningUpdateAbortsPending() throws Exception {
Abortable abort = new StubAbortable();
Stoppable stop = Mockito.mock(Stoppable.class);
// single thread factory so the older request gets queued
ExecutorService exec = Executors.newFixedThreadPool(3);
Map<ImmutableBytesPtr, HTableInterface> tables = new HashMap<ImmutableBytesPtr, HTableInterface>();
FakeTableFactory factory = new FakeTableFactory(tables);
// updates to two different tables
byte[] tableName = Bytes.add(this.testName.getTableName(), new byte[] { 1, 2, 3, 4 });
Put m = new Put(row);
m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
// this will sort after the first tablename
byte[] tableName2 = this.testName.getTableName();
List<Pair<Mutation, byte[]>> indexUpdates = new ArrayList<Pair<Mutation, byte[]>>();
indexUpdates.add(new Pair<Mutation, byte[]>(m, tableName));
indexUpdates.add(new Pair<Mutation, byte[]>(m, tableName2));
indexUpdates.add(new Pair<Mutation, byte[]>(m, tableName2));
// first table will fail
HTableInterface table = Mockito.mock(HTableInterface.class);
Mockito.when(table.batch(Mockito.anyList())).thenThrow(new IOException("Intentional IOException for failed first write."));
Mockito.when(table.getTableName()).thenReturn(tableName);
RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class);
Configuration conf = new Configuration();
Mockito.when(e.getConfiguration()).thenReturn(conf);
Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String, Object>());
// second table just blocks to make sure that the abort propagates to the third task
final CountDownLatch waitOnAbortedLatch = new CountDownLatch(1);
final boolean[] failed = new boolean[] { false };
HTableInterface table2 = Mockito.mock(HTableInterface.class);
Mockito.when(table2.getTableName()).thenReturn(tableName2);
Mockito.when(table2.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
waitOnAbortedLatch.await();
return null;
}
}).thenAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
failed[0] = true;
throw new RuntimeException("Unexpected exception - second index table shouldn't have been written to");
}
});
// add the tables to the set of tables, so its returned to the writer
tables.put(new ImmutableBytesPtr(tableName), table);
tables.put(new ImmutableBytesPtr(tableName2), table2);
ParallelWriterIndexCommitter committer = new ParallelWriterIndexCommitter(VersionInfo.getVersion());
committer.setup(factory, exec, abort, stop, 2, e);
KillServerOnFailurePolicy policy = new KillServerOnFailurePolicy();
policy.setup(stop, abort);
IndexWriter writer = new IndexWriter(committer, policy);
try {
writer.write(indexUpdates);
fail("Should not have successfully completed all index writes");
} catch (SingleIndexWriteFailureException s) {
LOG.info("Correctly got a failure to reach the index", s);
// should have correctly gotten the correct abort, so let the next task execute
waitOnAbortedLatch.countDown();
}
assertFalse("Third set of index writes never have been attempted - should have seen the abort before done!", failed[0]);
writer.stop(this.testName.getTableNameString() + " finished");
assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
}
use of org.apache.hadoop.hbase.client.Put in project phoenix by apache.
the class UpgradeIT method putUnlockKVInSysMutex.
private void putUnlockKVInSysMutex(byte[] row) throws Exception {
try (Connection conn = getConnection(false, null)) {
ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
try (HTableInterface sysMutexTable = services.getTable(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME_BYTES)) {
byte[] family = PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES;
byte[] qualifier = UPGRADE_MUTEX;
Put put = new Put(row);
put.add(family, qualifier, UPGRADE_MUTEX_UNLOCKED);
sysMutexTable.put(put);
sysMutexTable.flushCommits();
}
}
}
Aggregations