use of org.apache.accumulo.core.client.impl.ClientContext in project accumulo by apache.
the class DynamicThreadPoolsIT method test.
@Test
public void test() throws Exception {
final String[] tables = getUniqueNames(15);
String firstTable = tables[0];
Connector c = getConnector();
c.instanceOperations().setProperty(Property.TSERV_MAJC_MAXCONCURRENT.getKey(), "5");
TestIngest.Opts opts = new TestIngest.Opts();
opts.rows = 500 * 1000;
opts.createTable = true;
opts.setTableName(firstTable);
ClientConfiguration clientConf = cluster.getClientConfig();
if (clientConf.hasSasl()) {
opts.updateKerberosCredentials(clientConf);
} else {
opts.setPrincipal(getAdminPrincipal());
}
TestIngest.ingest(c, opts, new BatchWriterOpts());
c.tableOperations().flush(firstTable, null, null, true);
for (int i = 1; i < tables.length; i++) c.tableOperations().clone(firstTable, tables[i], true, null, null);
// time between checks of the thread pool sizes
sleepUninterruptibly(11, TimeUnit.SECONDS);
Credentials creds = new Credentials(getAdminPrincipal(), getAdminToken());
for (int i = 1; i < tables.length; i++) c.tableOperations().compact(tables[i], null, null, true, false);
for (int i = 0; i < 30; i++) {
int count = 0;
MasterClientService.Iface client = null;
MasterMonitorInfo stats = null;
while (true) {
try {
client = MasterClient.getConnectionWithRetry(new ClientContext(c.getInstance(), creds, clientConf));
stats = client.getMasterStats(Tracer.traceInfo(), creds.toThrift(c.getInstance()));
break;
} catch (ThriftNotActiveServiceException e) {
// Let it loop, fetching a new location
sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
} finally {
if (client != null)
MasterClient.close(client);
}
}
for (TabletServerStatus server : stats.tServerInfo) {
for (TableInfo table : server.tableMap.values()) {
count += table.majors.running;
}
}
System.out.println("count " + count);
if (count > 3)
return;
sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
}
fail("Could not observe higher number of threads after changing the config");
}
use of org.apache.accumulo.core.client.impl.ClientContext in project accumulo by apache.
the class WrongTabletTest method main.
public static void main(String[] args) {
final Opts opts = new Opts();
opts.parseArgs(WrongTabletTest.class.getName(), args);
final HostAndPort location = HostAndPort.fromString(opts.location);
final Instance inst = opts.getInstance();
final ServerConfigurationFactory conf = new ServerConfigurationFactory(inst);
final ClientContext context = new AccumuloServerContext(inst, conf) {
@Override
public synchronized Credentials getCredentials() {
try {
return new Credentials(opts.getPrincipal(), opts.getToken());
} catch (AccumuloSecurityException e) {
throw new RuntimeException(e);
}
}
};
try {
TabletClientService.Iface client = ThriftUtil.getTServerClient(location, context);
Mutation mutation = new Mutation(new Text("row_0003750001"));
mutation.putDelete(new Text("colf"), new Text("colq"));
client.update(Tracer.traceInfo(), context.rpcCreds(), new KeyExtent(Table.ID.of("!!"), null, new Text("row_0003750000")).toThrift(), mutation.toThrift(), TDurability.DEFAULT);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of org.apache.accumulo.core.client.impl.ClientContext in project accumulo by apache.
the class TransportCachingIT method testCachedTransport.
@Test
public void testCachedTransport() {
Connector conn = getConnector();
Instance instance = conn.getInstance();
ClientConfiguration clientConf = cluster.getClientConfig();
ClientContext context = new ClientContext(instance, new Credentials(getAdminPrincipal(), getAdminToken()), clientConf);
long rpcTimeout = ConfigurationTypeHelper.getTimeInMillis(Property.GENERAL_RPC_TIMEOUT.getDefaultValue());
// create list of servers
ArrayList<ThriftTransportKey> servers = new ArrayList<>();
// add tservers
ZooCache zc = new ZooCacheFactory().getZooCache(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut());
for (String tserver : zc.getChildren(ZooUtil.getRoot(instance) + Constants.ZTSERVERS)) {
String path = ZooUtil.getRoot(instance) + Constants.ZTSERVERS + "/" + tserver;
byte[] data = ZooUtil.getLockData(zc, path);
if (data != null) {
String strData = new String(data, UTF_8);
if (!strData.equals("master"))
servers.add(new ThriftTransportKey(new ServerServices(strData).getAddress(Service.TSERV_CLIENT), rpcTimeout, context));
}
}
ThriftTransportPool pool = ThriftTransportPool.getInstance();
TTransport first = null;
while (null == first) {
try {
// Get a transport (cached or not)
first = pool.getAnyTransport(servers, true).getSecond();
} catch (TTransportException e) {
log.warn("Failed to obtain transport to {}", servers);
}
}
assertNotNull(first);
// Return it to unreserve it
pool.returnTransport(first);
TTransport second = null;
while (null == second) {
try {
// Get a cached transport (should be the first)
second = pool.getAnyTransport(servers, true).getSecond();
} catch (TTransportException e) {
log.warn("Failed obtain 2nd transport to {}", servers);
}
}
// We should get the same transport
assertTrue("Expected the first and second to be the same instance", first == second);
// Return the 2nd
pool.returnTransport(second);
TTransport third = null;
while (null == third) {
try {
// Get a non-cached transport
third = pool.getAnyTransport(servers, false).getSecond();
} catch (TTransportException e) {
log.warn("Failed obtain 2nd transport to {}", servers);
}
}
assertFalse("Expected second and third transport to be different instances", second == third);
pool.returnTransport(third);
}
use of org.apache.accumulo.core.client.impl.ClientContext in project accumulo by apache.
the class CollectTabletStats method main.
public static void main(String[] args) throws Exception {
final CollectOptions opts = new CollectOptions();
final ScannerOpts scanOpts = new ScannerOpts();
opts.parseArgs(CollectTabletStats.class.getName(), args, scanOpts);
String[] columnsTmp = new String[] {};
if (opts.columns != null)
columnsTmp = opts.columns.split(",");
final String[] columns = columnsTmp;
final VolumeManager fs = VolumeManagerImpl.get();
Instance instance = opts.getInstance();
final ServerConfigurationFactory sconf = new ServerConfigurationFactory(instance);
Credentials creds = new Credentials(opts.getPrincipal(), opts.getToken());
ClientContext context = new ClientContext(instance, creds, sconf.getSystemConfiguration());
Table.ID tableId = Tables.getTableId(instance, opts.getTableName());
if (tableId == null) {
log.error("Unable to find table named {}", opts.getTableName());
System.exit(-1);
}
TreeMap<KeyExtent, String> tabletLocations = new TreeMap<>();
List<KeyExtent> candidates = findTablets(context, !opts.selectFarTablets, opts.getTableName(), tabletLocations);
if (candidates.size() < opts.numThreads) {
System.err.println("ERROR : Unable to find " + opts.numThreads + " " + (opts.selectFarTablets ? "far" : "local") + " tablets");
System.exit(-1);
}
List<KeyExtent> tabletsToTest = selectRandomTablets(opts.numThreads, candidates);
Map<KeyExtent, List<FileRef>> tabletFiles = new HashMap<>();
for (KeyExtent ke : tabletsToTest) {
List<FileRef> files = getTabletFiles(context, ke);
tabletFiles.put(ke, files);
}
System.out.println();
System.out.println("run location : " + InetAddress.getLocalHost().getHostName() + "/" + InetAddress.getLocalHost().getHostAddress());
System.out.println("num threads : " + opts.numThreads);
System.out.println("table : " + opts.getTableName());
System.out.println("table id : " + tableId);
for (KeyExtent ke : tabletsToTest) {
System.out.println("\t *** Information about tablet " + ke.getUUID() + " *** ");
System.out.println("\t\t# files in tablet : " + tabletFiles.get(ke).size());
System.out.println("\t\ttablet location : " + tabletLocations.get(ke));
reportHdfsBlockLocations(tabletFiles.get(ke));
}
System.out.println("%n*** RUNNING TEST ***%n");
ExecutorService threadPool = Executors.newFixedThreadPool(opts.numThreads);
for (int i = 0; i < opts.iterations; i++) {
ArrayList<Test> tests = new ArrayList<>();
for (final KeyExtent ke : tabletsToTest) {
final List<FileRef> files = tabletFiles.get(ke);
Test test = new Test(ke) {
@Override
public int runTest() throws Exception {
return readFiles(fs, sconf.getSystemConfiguration(), files, ke, columns);
}
};
tests.add(test);
}
runTest("read files", tests, opts.numThreads, threadPool);
}
for (int i = 0; i < opts.iterations; i++) {
ArrayList<Test> tests = new ArrayList<>();
for (final KeyExtent ke : tabletsToTest) {
final List<FileRef> files = tabletFiles.get(ke);
Test test = new Test(ke) {
@Override
public int runTest() throws Exception {
return readFilesUsingIterStack(fs, sconf, files, opts.auths, ke, columns, false);
}
};
tests.add(test);
}
runTest("read tablet files w/ system iter stack", tests, opts.numThreads, threadPool);
}
for (int i = 0; i < opts.iterations; i++) {
ArrayList<Test> tests = new ArrayList<>();
for (final KeyExtent ke : tabletsToTest) {
final List<FileRef> files = tabletFiles.get(ke);
Test test = new Test(ke) {
@Override
public int runTest() throws Exception {
return readFilesUsingIterStack(fs, sconf, files, opts.auths, ke, columns, true);
}
};
tests.add(test);
}
runTest("read tablet files w/ table iter stack", tests, opts.numThreads, threadPool);
}
for (int i = 0; i < opts.iterations; i++) {
ArrayList<Test> tests = new ArrayList<>();
final Connector conn = opts.getConnector();
for (final KeyExtent ke : tabletsToTest) {
Test test = new Test(ke) {
@Override
public int runTest() throws Exception {
return scanTablet(conn, opts.getTableName(), opts.auths, scanOpts.scanBatchSize, ke.getPrevEndRow(), ke.getEndRow(), columns);
}
};
tests.add(test);
}
runTest("read tablet data through accumulo", tests, opts.numThreads, threadPool);
}
for (final KeyExtent ke : tabletsToTest) {
final Connector conn = opts.getConnector();
threadPool.submit(new Runnable() {
@Override
public void run() {
try {
calcTabletStats(conn, opts.getTableName(), opts.auths, scanOpts.scanBatchSize, ke, columns);
} catch (Exception e) {
log.error("Failed to calculate tablet stats.", e);
}
}
});
}
threadPool.shutdown();
}
use of org.apache.accumulo.core.client.impl.ClientContext in project accumulo by apache.
the class InputConfigurator method getTabletLocator.
/**
* Initializes an Accumulo {@link TabletLocator} based on the configuration.
*
* @param implementingClass
* the class whose name will be used as a prefix for the property configuration key
* @param conf
* the Hadoop configuration object to configure
* @param tableId
* The table id for which to initialize the {@link TabletLocator}
* @return an Accumulo tablet locator
* @throws TableNotFoundException
* if the table name set on the configuration doesn't exist
* @since 1.6.0
*/
public static TabletLocator getTabletLocator(Class<?> implementingClass, Configuration conf, Table.ID tableId) throws TableNotFoundException {
String instanceType = conf.get(enumToConfKey(implementingClass, InstanceOpts.TYPE));
if ("MockInstance".equals(instanceType))
return DeprecationUtil.makeMockLocator();
Instance instance = getInstance(implementingClass, conf);
ClientConfiguration clientConf = getClientConfiguration(implementingClass, conf);
ClientContext context = new ClientContext(instance, new Credentials(getPrincipal(implementingClass, conf), getAuthenticationToken(implementingClass, conf)), clientConf);
return TabletLocator.getLocator(context, tableId);
}
Aggregations