use of org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand in project crate by crate.
the class PartitionedTableConcurrentIntegrationTest method testSelectWhileShardsAreRelocating.
/**
* Test depends on 2 data nodes
*/
@Test
public void testSelectWhileShardsAreRelocating() throws Throwable {
execute("create table t (name string, p string) " + "clustered into 2 shards " + "partitioned by (p) with (number_of_replicas = 0)");
ensureYellow();
execute("insert into t (name, p) values (?, ?)", new Object[][] { new Object[] { "Marvin", "a" }, new Object[] { "Trillian", "a" } });
execute("refresh table t");
execute("set global stats.enabled=true");
final AtomicReference<Throwable> lastThrowable = new AtomicReference<>();
final CountDownLatch selects = new CountDownLatch(100);
Thread t = new Thread(new Runnable() {
@Override
public void run() {
while (selects.getCount() > 0) {
try {
execute("select * from t");
} catch (Throwable t) {
// The failed job should have three started operations
SQLResponse res = execute("select id from sys.jobs_log where error is not null order by started desc limit 1");
if (res.rowCount() > 0) {
String id = (String) res.rows()[0][0];
res = execute("select count(*) from sys.operations_log where name=? or name = ?and job_id = ?", new Object[] { "collect", "fetchContext", id });
if ((long) res.rows()[0][0] < 3) {
// set the error if there where less than three attempts
lastThrowable.set(t);
}
}
} finally {
selects.countDown();
}
}
}
});
t.start();
PartitionName partitionName = new PartitionName("t", Collections.singletonList(new BytesRef("a")));
final String indexName = partitionName.asIndexName();
ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
DiscoveryNodes nodes = clusterService.state().nodes();
List<String> nodeIds = new ArrayList<>(2);
for (DiscoveryNode node : nodes) {
if (node.dataNode()) {
nodeIds.add(node.getId());
}
}
final Map<String, String> nodeSwap = new HashMap<>(2);
nodeSwap.put(nodeIds.get(0), nodeIds.get(1));
nodeSwap.put(nodeIds.get(1), nodeIds.get(0));
final CountDownLatch relocations = new CountDownLatch(20);
Thread relocatingThread = new Thread(new Runnable() {
@Override
public void run() {
while (relocations.getCount() > 0) {
ClusterStateResponse clusterStateResponse = admin().cluster().prepareState().setIndices(indexName).execute().actionGet();
List<ShardRouting> shardRoutings = clusterStateResponse.getState().routingTable().allShards(indexName);
ClusterRerouteRequestBuilder clusterRerouteRequestBuilder = admin().cluster().prepareReroute();
int numMoves = 0;
for (ShardRouting shardRouting : shardRoutings) {
if (shardRouting.currentNodeId() == null) {
continue;
}
if (shardRouting.state() != ShardRoutingState.STARTED) {
continue;
}
String toNode = nodeSwap.get(shardRouting.currentNodeId());
clusterRerouteRequestBuilder.add(new MoveAllocationCommand(shardRouting.shardId(), shardRouting.currentNodeId(), toNode));
numMoves++;
}
if (numMoves > 0) {
clusterRerouteRequestBuilder.execute().actionGet();
client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
relocations.countDown();
}
}
}
});
relocatingThread.start();
relocations.await(SQLTransportExecutor.REQUEST_TIMEOUT.getSeconds() + 1, TimeUnit.SECONDS);
selects.await(SQLTransportExecutor.REQUEST_TIMEOUT.getSeconds() + 1, TimeUnit.SECONDS);
Throwable throwable = lastThrowable.get();
if (throwable != null) {
throw throwable;
}
t.join();
relocatingThread.join();
}
Aggregations