use of org.apache.hadoop.hbase.Stoppable in project phoenix by apache.
the class TestIndexWriter method testFailureOnRunningUpdateAbortsPending.
/**
* Index updates can potentially be queued up if there aren't enough writer threads. If a running
* index write fails, then we should early exit the pending indexupdate, when it comes up (if the
* pool isn't already shutdown).
* <p>
* This test is a little bit racey - we could actually have the failure of the first task before
* the third task is even submitted. However, we should never see the third task attempt to make
* the batch write, so we should never see a failure here.
* @throws Exception on failure
*/
@SuppressWarnings({ "unchecked", "deprecation" })
@Test
public void testFailureOnRunningUpdateAbortsPending() throws Exception {
Abortable abort = new StubAbortable();
Stoppable stop = Mockito.mock(Stoppable.class);
// single thread factory so the older request gets queued
ExecutorService exec = Executors.newFixedThreadPool(3);
Map<ImmutableBytesPtr, HTableInterface> tables = new HashMap<ImmutableBytesPtr, HTableInterface>();
FakeTableFactory factory = new FakeTableFactory(tables);
// updates to two different tables
byte[] tableName = Bytes.add(this.testName.getTableName(), new byte[] { 1, 2, 3, 4 });
Put m = new Put(row);
m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
// this will sort after the first tablename
byte[] tableName2 = this.testName.getTableName();
List<Pair<Mutation, byte[]>> indexUpdates = new ArrayList<Pair<Mutation, byte[]>>();
indexUpdates.add(new Pair<Mutation, byte[]>(m, tableName));
indexUpdates.add(new Pair<Mutation, byte[]>(m, tableName2));
indexUpdates.add(new Pair<Mutation, byte[]>(m, tableName2));
// first table will fail
HTableInterface table = Mockito.mock(HTableInterface.class);
Mockito.when(table.batch(Mockito.anyList())).thenThrow(new IOException("Intentional IOException for failed first write."));
Mockito.when(table.getTableName()).thenReturn(tableName);
RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class);
Configuration conf = new Configuration();
Mockito.when(e.getConfiguration()).thenReturn(conf);
Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String, Object>());
// second table just blocks to make sure that the abort propagates to the third task
final CountDownLatch waitOnAbortedLatch = new CountDownLatch(1);
final boolean[] failed = new boolean[] { false };
HTableInterface table2 = Mockito.mock(HTableInterface.class);
Mockito.when(table2.getTableName()).thenReturn(tableName2);
Mockito.when(table2.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
waitOnAbortedLatch.await();
return null;
}
}).thenAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
failed[0] = true;
throw new RuntimeException("Unexpected exception - second index table shouldn't have been written to");
}
});
// add the tables to the set of tables, so its returned to the writer
tables.put(new ImmutableBytesPtr(tableName), table);
tables.put(new ImmutableBytesPtr(tableName2), table2);
ParallelWriterIndexCommitter committer = new ParallelWriterIndexCommitter(VersionInfo.getVersion());
committer.setup(factory, exec, abort, stop, 2, e);
KillServerOnFailurePolicy policy = new KillServerOnFailurePolicy();
policy.setup(stop, abort);
IndexWriter writer = new IndexWriter(committer, policy);
try {
writer.write(indexUpdates);
fail("Should not have successfully completed all index writes");
} catch (SingleIndexWriteFailureException s) {
LOG.info("Correctly got a failure to reach the index", s);
// should have correctly gotten the correct abort, so let the next task execute
waitOnAbortedLatch.countDown();
}
assertFalse("Third set of index writes never have been attempted - should have seen the abort before done!", failed[0]);
writer.stop(this.testName.getTableNameString() + " finished");
assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
}
use of org.apache.hadoop.hbase.Stoppable in project phoenix by apache.
the class TestIndexWriter method testShutdownInterruptsAsExpected.
/**
* Test that if we get an interruption to to the thread while doing a batch (e.g. via shutdown),
* that we correctly end the task
* @throws Exception on failure
*/
@SuppressWarnings({ "unchecked", "deprecation" })
@Test
public void testShutdownInterruptsAsExpected() throws Exception {
Stoppable stop = Mockito.mock(Stoppable.class);
Abortable abort = new StubAbortable();
// single thread factory so the older request gets queued
ExecutorService exec = Executors.newFixedThreadPool(1);
Map<ImmutableBytesPtr, HTableInterface> tables = new HashMap<ImmutableBytesPtr, HTableInterface>();
RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class);
Configuration conf = new Configuration();
Mockito.when(e.getConfiguration()).thenReturn(conf);
Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String, Object>());
Region mockRegion = Mockito.mock(Region.class);
Mockito.when(e.getRegion()).thenReturn(mockRegion);
HTableDescriptor mockTableDesc = Mockito.mock(HTableDescriptor.class);
Mockito.when(mockRegion.getTableDesc()).thenReturn(mockTableDesc);
FakeTableFactory factory = new FakeTableFactory(tables);
byte[] tableName = this.testName.getTableName();
HTableInterface table = Mockito.mock(HTableInterface.class);
Mockito.when(table.getTableName()).thenReturn(tableName);
final CountDownLatch writeStartedLatch = new CountDownLatch(1);
// latch never gets counted down, so we wait forever
final CountDownLatch waitOnAbortedLatch = new CountDownLatch(1);
Mockito.when(table.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
LOG.info("Write started");
writeStartedLatch.countDown();
// when we interrupt the thread for shutdown, we should see this throw an interrupt too
try {
waitOnAbortedLatch.await();
} catch (InterruptedException e) {
LOG.info("Correctly interrupted while writing!");
throw e;
}
return null;
}
});
// add the tables to the set of tables, so its returned to the writer
tables.put(new ImmutableBytesPtr(tableName), table);
// update a single table
Put m = new Put(row);
m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
final List<Pair<Mutation, byte[]>> indexUpdates = new ArrayList<Pair<Mutation, byte[]>>();
indexUpdates.add(new Pair<Mutation, byte[]>(m, tableName));
// setup the writer
TrackingParallelWriterIndexCommitter committer = new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion());
committer.setup(factory, exec, abort, stop, e);
KillServerOnFailurePolicy policy = new KillServerOnFailurePolicy();
policy.setup(stop, abort);
final IndexWriter writer = new IndexWriter(committer, policy);
final boolean[] failedWrite = new boolean[] { false };
Thread primaryWriter = new Thread() {
@Override
public void run() {
try {
writer.write(indexUpdates);
} catch (IndexWriteException e) {
failedWrite[0] = true;
}
}
};
primaryWriter.start();
// wait for the write to start before intentionally shutdown the pool
writeStartedLatch.await();
writer.stop("Shutting down writer for test " + this.testName.getTableNameString());
primaryWriter.join();
assertTrue("Writer should have failed because of the stop we issued", failedWrite[0]);
}
use of org.apache.hadoop.hbase.Stoppable in project phoenix by apache.
the class TestParalleIndexWriter method testSynchronouslyCompletesAllWrites.
@SuppressWarnings({ "unchecked", "deprecation" })
@Test
public void testSynchronouslyCompletesAllWrites() throws Exception {
LOG.info("Starting " + test.getTableNameString());
LOG.info("Current thread is interrupted: " + Thread.interrupted());
Abortable abort = new StubAbortable();
Stoppable stop = Mockito.mock(Stoppable.class);
ExecutorService exec = Executors.newFixedThreadPool(1);
Map<ImmutableBytesPtr, HTableInterface> tables = new LinkedHashMap<ImmutableBytesPtr, HTableInterface>();
FakeTableFactory factory = new FakeTableFactory(tables);
RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class);
Configuration conf = new Configuration();
Mockito.when(e.getConfiguration()).thenReturn(conf);
Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String, Object>());
Region mockRegion = Mockito.mock(Region.class);
Mockito.when(e.getRegion()).thenReturn(mockRegion);
HTableDescriptor mockTableDesc = Mockito.mock(HTableDescriptor.class);
Mockito.when(mockRegion.getTableDesc()).thenReturn(mockTableDesc);
ImmutableBytesPtr tableName = new ImmutableBytesPtr(this.test.getTableName());
Put m = new Put(row);
m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
Multimap<HTableInterfaceReference, Mutation> indexUpdates = ArrayListMultimap.<HTableInterfaceReference, Mutation>create();
indexUpdates.put(new HTableInterfaceReference(tableName), m);
HTableInterface table = Mockito.mock(HTableInterface.class);
final boolean[] completed = new boolean[] { false };
Mockito.when(table.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
// just keep track that it was called
completed[0] = true;
return null;
}
});
Mockito.when(table.getTableName()).thenReturn(test.getTableName());
// add the table to the set of tables, so its returned to the writer
tables.put(tableName, table);
// setup the writer and failure policy
TrackingParallelWriterIndexCommitter writer = new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion());
writer.setup(factory, exec, abort, stop, e);
writer.write(indexUpdates, true);
assertTrue("Writer returned before the table batch completed! Likely a race condition tripped", completed[0]);
writer.stop(this.test.getTableNameString() + " finished");
assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
}
use of org.apache.hadoop.hbase.Stoppable in project phoenix by apache.
the class TestParalleWriterIndexCommitter method testCorrectlyCleansUpResources.
@Test
public void testCorrectlyCleansUpResources() throws Exception {
ExecutorService exec = Executors.newFixedThreadPool(1);
FakeTableFactory factory = new FakeTableFactory(Collections.<ImmutableBytesPtr, HTableInterface>emptyMap());
TrackingParallelWriterIndexCommitter writer = new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion());
Abortable mockAbort = Mockito.mock(Abortable.class);
Stoppable mockStop = Mockito.mock(Stoppable.class);
RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class);
Configuration conf = new Configuration();
Mockito.when(e.getConfiguration()).thenReturn(conf);
Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String, Object>());
// create a simple writer
writer.setup(factory, exec, mockAbort, mockStop, e);
// stop the writer
writer.stop(this.test.getTableNameString() + " finished");
assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
Mockito.verifyZeroInteractions(mockAbort, mockStop);
}
use of org.apache.hadoop.hbase.Stoppable in project hbase by apache.
the class TestRegionsRecoveryChore method testRegionReopensWithoutStoreRefConfig.
@Test
public void testRegionReopensWithoutStoreRefConfig() throws Exception {
regionNo = 0;
ClusterMetrics clusterMetrics = TestRegionsRecoveryChore.getClusterMetrics(10);
final Map<ServerName, ServerMetrics> serverMetricsMap = clusterMetrics.getLiveServerMetrics();
LOG.debug("All Region Names with refCount....");
for (ServerMetrics serverMetrics : serverMetricsMap.values()) {
Map<byte[], RegionMetrics> regionMetricsMap = serverMetrics.getRegionMetrics();
for (RegionMetrics regionMetrics : regionMetricsMap.values()) {
LOG.debug("name: " + new String(regionMetrics.getRegionName()) + " refCount: " + regionMetrics.getStoreRefCount());
}
}
Mockito.when(hMaster.getClusterMetrics()).thenReturn(clusterMetrics);
Mockito.when(hMaster.getAssignmentManager()).thenReturn(assignmentManager);
for (byte[] regionName : REGION_NAME_LIST) {
Mockito.when(assignmentManager.getRegionInfo(regionName)).thenReturn(TestRegionsRecoveryChore.getRegionInfo(regionName));
}
Stoppable stoppable = new StoppableImplementation();
Configuration configuration = getCustomConf();
configuration.unset("hbase.regions.recovery.store.file.ref.count");
regionsRecoveryChore = new RegionsRecoveryChore(stoppable, configuration, hMaster);
regionsRecoveryChore.chore();
// Verify that by default the feature is turned off so no regions
// should be reopened
Mockito.verify(hMaster, Mockito.times(0)).reopenRegions(Mockito.any(), Mockito.anyList(), Mockito.anyLong(), Mockito.anyLong());
// default maxCompactedStoreFileRefCount is -1 (no regions to be reopened using AM)
Mockito.verify(hMaster, Mockito.times(0)).getAssignmentManager();
Mockito.verify(assignmentManager, Mockito.times(0)).getRegionInfo(Mockito.any());
}
Aggregations