use of org.apache.phoenix.hbase.index.table.HTableInterfaceReference in project phoenix by apache.
the class ParallelWriterIndexCommitter method write.
@Override
public void write(Multimap<HTableInterfaceReference, Mutation> toWrite, final boolean allowLocalUpdates) throws SingleIndexWriteFailureException {
/*
* This bit here is a little odd, so let's explain what's going on. Basically, we want to do the writes in
* parallel to each index table, so each table gets its own task and is submitted to the pool. Where it gets
* tricky is that we want to block the calling thread until one of two things happens: (1) all index tables get
* successfully updated, or (2) any one of the index table writes fail; in either case, we should return as
* quickly as possible. We get a little more complicated in that if we do get a single failure, but any of the
* index writes hasn't been started yet (its been queued up, but not submitted to a thread) we want to that task
* to fail immediately as we know that write is a waste and will need to be replayed anyways.
*/
Set<Entry<HTableInterfaceReference, Collection<Mutation>>> entries = toWrite.asMap().entrySet();
TaskBatch<Void> tasks = new TaskBatch<Void>(entries.size());
for (Entry<HTableInterfaceReference, Collection<Mutation>> entry : entries) {
// get the mutations for each table. We leak the implementation here a little bit to save
// doing a complete copy over of all the index update for each table.
final List<Mutation> mutations = kvBuilder.cloneIfNecessary((List<Mutation>) entry.getValue());
final HTableInterfaceReference tableReference = entry.getKey();
if (env != null && !allowLocalUpdates && tableReference.getTableName().equals(env.getRegion().getTableDesc().getNameAsString())) {
continue;
}
/*
* Write a batch of index updates to an index table. This operation stops (is cancelable) via two
* mechanisms: (1) setting aborted or stopped on the IndexWriter or, (2) interrupting the running thread.
* The former will only work if we are not in the midst of writing the current batch to the table, though we
* do check these status variables before starting and before writing the batch. The latter usage,
* interrupting the thread, will work in the previous situations as was at some points while writing the
* batch, depending on the underlying writer implementation (HTableInterface#batch is blocking, but doesn't
* elaborate when is supports an interrupt).
*/
tasks.add(new Task<Void>() {
/**
* Do the actual write to the primary table.
*
* @return
*/
@SuppressWarnings("deprecation")
@Override
public Void call() throws Exception {
// this may have been queued, so another task infront of us may have failed, so we should
// early exit, if that's the case
throwFailureIfDone();
if (LOG.isTraceEnabled()) {
LOG.trace("Writing index update:" + mutations + " to table: " + tableReference);
}
HTableInterface table = null;
try {
if (allowLocalUpdates && env != null && tableReference.getTableName().equals(env.getRegion().getTableDesc().getNameAsString())) {
try {
throwFailureIfDone();
IndexUtil.writeLocalUpdates(env.getRegion(), mutations, true);
return null;
} catch (IOException ignord) {
// when it's failed we fall back to the standard & slow way
if (LOG.isDebugEnabled()) {
LOG.debug("indexRegion.batchMutate failed and fall back to HTable.batch(). Got error=" + ignord);
}
}
}
table = factory.getTable(tableReference.get());
throwFailureIfDone();
table.batch(mutations);
} catch (SingleIndexWriteFailureException e) {
throw e;
} catch (IOException e) {
throw new SingleIndexWriteFailureException(tableReference.toString(), mutations, e, PhoenixIndexFailurePolicy.getDisableIndexOnFailure(env));
} catch (InterruptedException e) {
// reset the interrupt status on the thread
Thread.currentThread().interrupt();
throw new SingleIndexWriteFailureException(tableReference.toString(), mutations, e, PhoenixIndexFailurePolicy.getDisableIndexOnFailure(env));
} finally {
if (table != null) {
table.close();
}
}
return null;
}
private void throwFailureIfDone() throws SingleIndexWriteFailureException {
if (this.isBatchFailed() || Thread.currentThread().isInterrupted()) {
throw new SingleIndexWriteFailureException("Pool closed, not attempting to write to the index!", null);
}
}
});
}
// actually submit the tasks to the pool and wait for them to finish/fail
try {
pool.submitUninterruptible(tasks);
} catch (EarlyExitFailure e) {
propagateFailure(e);
} catch (ExecutionException e) {
LOG.error("Found a failed index update!");
propagateFailure(e.getCause());
}
}
use of org.apache.phoenix.hbase.index.table.HTableInterfaceReference in project phoenix by apache.
the class TestParalleWriterIndexCommitter method testSynchronouslyCompletesAllWrites.
@SuppressWarnings({ "unchecked" })
@Test
public void testSynchronouslyCompletesAllWrites() throws Exception {
LOGGER.info("Starting " + test.getTableNameString());
LOGGER.info("Current thread is interrupted: " + Thread.interrupted());
RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class);
Configuration conf = new Configuration();
Mockito.when(e.getConfiguration()).thenReturn(conf);
Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String, Object>());
Region mockRegion = Mockito.mock(Region.class);
Mockito.when(e.getRegion()).thenReturn(mockRegion);
TableDescriptor mockTableDesc = Mockito.mock(TableDescriptor.class);
Mockito.when(mockTableDesc.getTableName()).thenReturn(TableName.valueOf("test"));
Connection mockConnection = Mockito.mock(Connection.class);
Mockito.when(e.getConnection()).thenReturn(mockConnection);
Mockito.when(mockRegion.getTableDescriptor()).thenReturn(mockTableDesc);
Stoppable stop = Mockito.mock(Stoppable.class);
ExecutorService exec = Executors.newFixedThreadPool(1);
Map<ImmutableBytesPtr, Table> tables = new LinkedHashMap<ImmutableBytesPtr, Table>();
FakeTableFactory factory = new FakeTableFactory(tables);
ImmutableBytesPtr tableName = new ImmutableBytesPtr(this.test.getTableName());
Put m = new Put(row);
m.addColumn(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
Multimap<HTableInterfaceReference, Mutation> indexUpdates = ArrayListMultimap.<HTableInterfaceReference, Mutation>create();
indexUpdates.put(new HTableInterfaceReference(tableName), m);
Table table = Mockito.mock(Table.class);
final boolean[] completed = new boolean[] { false };
Mockito.doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
// just keep track that it was called
completed[0] = true;
return null;
}
}).when(table).batch(Mockito.anyList(), Mockito.any());
Mockito.when(table.getName()).thenReturn(org.apache.hadoop.hbase.TableName.valueOf(test.getTableName()));
// add the table to the set of tables, so its returned to the writer
tables.put(tableName, table);
// setup the writer and failure policy
TrackingParallelWriterIndexCommitter writer = new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion());
writer.setup(factory, exec, stop, e);
writer.write(indexUpdates, true, ScanUtil.UNKNOWN_CLIENT_VERSION);
assertTrue("Writer returned before the table batch completed! Likely a race condition tripped", completed[0]);
writer.stop(this.test.getTableNameString() + " finished");
assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
}
use of org.apache.phoenix.hbase.index.table.HTableInterfaceReference in project phoenix by apache.
the class TestPerRegionIndexWriteCache method testMultipleAddsForSingleRegion.
@Test
public void testMultipleAddsForSingleRegion() {
PerRegionIndexWriteCache cache = new PerRegionIndexWriteCache();
HTableInterfaceReference t1 = new HTableInterfaceReference(new ImmutableBytesPtr(Bytes.toBytes("t1")));
List<Mutation> mutations = Lists.<Mutation>newArrayList(p);
cache.addEdits(r1, t1, mutations);
// add a second set
mutations = Lists.<Mutation>newArrayList(p2);
cache.addEdits(r1, t1, mutations);
Multimap<HTableInterfaceReference, Mutation> edits = cache.getEdits(r1);
Set<Entry<HTableInterfaceReference, Collection<Mutation>>> entries = edits.asMap().entrySet();
assertEquals("Got more than one table in the the edit map!", 1, entries.size());
for (Entry<HTableInterfaceReference, Collection<Mutation>> entry : entries) {
// ensure that we are still storing a list here - otherwise it breaks the parallel writer
// implementation
final List<Mutation> stored = (List<Mutation>) entry.getValue();
assertEquals("Got an unexpected amount of mutations in the entry", 2, stored.size());
assertEquals("Got an unexpected mutation in the entry", p, stored.get(0));
assertEquals("Got an unexpected mutation in the entry", p2, stored.get(1));
}
}
use of org.apache.phoenix.hbase.index.table.HTableInterfaceReference in project phoenix by apache.
the class TestPerRegionIndexWriteCache method testAddRemoveSingleRegion.
@Test
public void testAddRemoveSingleRegion() {
PerRegionIndexWriteCache cache = new PerRegionIndexWriteCache();
HTableInterfaceReference t1 = new HTableInterfaceReference(new ImmutableBytesPtr(Bytes.toBytes("t1")));
List<Mutation> mutations = new ArrayList<Mutation>();
mutations.add(p);
cache.addEdits(r1, t1, mutations);
Multimap<HTableInterfaceReference, Mutation> edits = cache.getEdits(r1);
Set<Entry<HTableInterfaceReference, Collection<Mutation>>> entries = edits.asMap().entrySet();
assertEquals("Got more than one table in the the edit map!", 1, entries.size());
for (Entry<HTableInterfaceReference, Collection<Mutation>> entry : entries) {
// ensure that we are still storing a list here - otherwise it breaks the parallel writer implementation
final List<Mutation> stored = (List<Mutation>) entry.getValue();
assertEquals("Got an unexpected amount of mutations in the entry", 1, stored.size());
assertEquals("Got an unexpected mutation in the entry", p, stored.get(0));
}
// ensure that a second get doesn't have any more edits. This ensures that we don't keep
// references around to these edits and have a memory leak
assertNull("Got an entry for a region we removed", cache.getEdits(r1));
}
use of org.apache.phoenix.hbase.index.table.HTableInterfaceReference in project phoenix by apache.
the class TestPerRegionIndexWriteCache method testMultipleRegions.
@Test
public void testMultipleRegions() {
PerRegionIndexWriteCache cache = new PerRegionIndexWriteCache();
HTableInterfaceReference t1 = new HTableInterfaceReference(new ImmutableBytesPtr(Bytes.toBytes("t1")));
List<Mutation> mutations = Lists.<Mutation>newArrayList(p);
List<Mutation> m2 = Lists.<Mutation>newArrayList(p2);
// add each region
cache.addEdits(r1, t1, mutations);
cache.addEdits(r2, t1, m2);
// check region1
Multimap<HTableInterfaceReference, Mutation> edits = cache.getEdits(r1);
Set<Entry<HTableInterfaceReference, Collection<Mutation>>> entries = edits.asMap().entrySet();
assertEquals("Got more than one table in the the edit map!", 1, entries.size());
for (Entry<HTableInterfaceReference, Collection<Mutation>> entry : entries) {
// ensure that we are still storing a list here - otherwise it breaks the parallel writer
// implementation
final List<Mutation> stored = (List<Mutation>) entry.getValue();
assertEquals("Got an unexpected amount of mutations in the entry for region1", 1, stored.size());
assertEquals("Got an unexpected mutation in the entry for region2", p, stored.get(0));
}
// check region2
edits = cache.getEdits(r2);
entries = edits.asMap().entrySet();
assertEquals("Got more than one table in the the edit map!", 1, entries.size());
for (Entry<HTableInterfaceReference, Collection<Mutation>> entry : entries) {
// ensure that we are still storing a list here - otherwise it breaks the parallel writer
// implementation
final List<Mutation> stored = (List<Mutation>) entry.getValue();
assertEquals("Got an unexpected amount of mutations in the entry for region2", 1, stored.size());
assertEquals("Got an unexpected mutation in the entry for region2", p2, stored.get(0));
}
// ensure that a second get doesn't have any more edits. This ensures that we don't keep
// references around to these edits and have a memory leak
assertNull("Got an entry for a region we removed", cache.getEdits(r1));
}
Aggregations