use of org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException in project accumulo by apache.
the class ChaoticLoadBalancer method balance.
@Override
public long balance(SortedMap<TServerInstance, TabletServerStatus> current, Set<KeyExtent> migrations, List<TabletMigration> migrationsOut) {
Map<TServerInstance, Long> numTablets = new HashMap<>();
List<TServerInstance> underCapacityTServer = new ArrayList<>();
if (!migrations.isEmpty()) {
outstandingMigrations.migrations = migrations;
constraintNotMet(outstandingMigrations);
return 100;
}
resetBalancerErrors();
boolean moveMetadata = r.nextInt(4) == 0;
long totalTablets = 0;
for (Entry<TServerInstance, TabletServerStatus> e : current.entrySet()) {
long tabletCount = 0;
for (TableInfo ti : e.getValue().getTableMap().values()) {
tabletCount += ti.tablets;
}
numTablets.put(e.getKey(), tabletCount);
underCapacityTServer.add(e.getKey());
totalTablets += tabletCount;
}
// totalTablets is fuzzy due to asynchronicity of the stats
// *1.2 to handle fuzziness, and prevent locking for 'perfect' balancing scenarios
long avg = (long) Math.ceil(((double) totalTablets) / current.size() * 1.2);
for (Entry<TServerInstance, TabletServerStatus> e : current.entrySet()) {
for (String tableId : e.getValue().getTableMap().keySet()) {
Table.ID id = Table.ID.of(tableId);
if (!moveMetadata && MetadataTable.ID.equals(id))
continue;
try {
for (TabletStats ts : getOnlineTabletsForTable(e.getKey(), id)) {
KeyExtent ke = new KeyExtent(ts.extent);
int index = r.nextInt(underCapacityTServer.size());
TServerInstance dest = underCapacityTServer.get(index);
if (dest.equals(e.getKey()))
continue;
migrationsOut.add(new TabletMigration(ke, e.getKey(), dest));
if (numTablets.put(dest, numTablets.get(dest) + 1) > avg)
underCapacityTServer.remove(index);
if (numTablets.put(e.getKey(), numTablets.get(e.getKey()) - 1) <= avg && !underCapacityTServer.contains(e.getKey()))
underCapacityTServer.add(e.getKey());
// We can get some craziness with only 1 tserver, so lets make sure there's always an option!
if (underCapacityTServer.isEmpty())
underCapacityTServer.addAll(numTablets.keySet());
}
} catch (ThriftSecurityException e1) {
// Shouldn't happen, but carry on if it does
log.debug("Encountered ThriftSecurityException. This should not happen. Carrying on anyway.", e1);
} catch (TException e1) {
// Shouldn't happen, but carry on if it does
log.debug("Encountered TException. This should not happen. Carrying on anyway.", e1);
}
}
}
return 100;
}
use of org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException in project accumulo by apache.
the class ClientServiceHandler method checkTableClass.
@Override
public boolean checkTableClass(TInfo tinfo, TCredentials credentials, String tableName, String className, String interfaceMatch) throws TException, ThriftTableOperationException, ThriftSecurityException {
security.authenticateUser(credentials, credentials);
Table.ID tableId = checkTableId(instance, tableName, null);
ClassLoader loader = getClass().getClassLoader();
Class<?> shouldMatch;
try {
shouldMatch = loader.loadClass(interfaceMatch);
AccumuloConfiguration conf = context.getServerConfigurationFactory().getTableConfiguration(tableId);
String context = conf.get(Property.TABLE_CLASSPATH);
ClassLoader currentLoader;
if (context != null && !context.equals("")) {
currentLoader = AccumuloVFSClassLoader.getContextManager().getClassLoader(context);
} else {
currentLoader = AccumuloVFSClassLoader.getClassLoader();
}
Class<?> test = currentLoader.loadClass(className).asSubclass(shouldMatch);
test.newInstance();
return true;
} catch (Exception e) {
log.warn("Error checking object types", e);
return false;
}
}
use of org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException in project accumulo by apache.
the class TestThrift1474 method test.
@Test
public void test() throws IOException, TException, InterruptedException {
TServerSocket serverTransport = new TServerSocket(0);
serverTransport.listen();
int port = serverTransport.getServerSocket().getLocalPort();
TestServer handler = new TestServer();
ThriftTest.Processor<ThriftTest.Iface> processor = new ThriftTest.Processor<>(handler);
TThreadPoolServer.Args args = new TThreadPoolServer.Args(serverTransport);
args.stopTimeoutVal = 10;
args.stopTimeoutUnit = TimeUnit.MILLISECONDS;
final TServer server = new TThreadPoolServer(args.processor(processor));
Thread thread = new Thread() {
@Override
public void run() {
server.serve();
}
};
thread.start();
while (!server.isServing()) {
sleepUninterruptibly(10, TimeUnit.MILLISECONDS);
}
TTransport transport = new TSocket("localhost", port);
transport.open();
TProtocol protocol = new TBinaryProtocol(transport);
ThriftTest.Client client = new ThriftTest.Client(protocol);
assertTrue(client.success());
assertFalse(client.fails());
try {
client.throwsError();
fail("no exception thrown");
} catch (ThriftSecurityException ex) {
// expected
}
server.stop();
thread.join();
}
use of org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException in project accumulo by apache.
the class CleanUp method call.
@Override
public Repo<Master> call(long tid, Master master) throws Exception {
master.clearMigrations(tableId);
int refCount = 0;
try {
// look for other tables that references this table's files
Connector conn = master.getConnector();
try (BatchScanner bs = conn.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 8)) {
Range allTables = MetadataSchema.TabletsSection.getRange();
Range tableRange = MetadataSchema.TabletsSection.getRange(tableId);
Range beforeTable = new Range(allTables.getStartKey(), true, tableRange.getStartKey(), false);
Range afterTable = new Range(tableRange.getEndKey(), false, allTables.getEndKey(), true);
bs.setRanges(Arrays.asList(beforeTable, afterTable));
bs.fetchColumnFamily(DataFileColumnFamily.NAME);
IteratorSetting cfg = new IteratorSetting(40, "grep", GrepIterator.class);
GrepIterator.setTerm(cfg, "/" + tableId + "/");
bs.addScanIterator(cfg);
for (Entry<Key, Value> entry : bs) {
if (entry.getKey().getColumnQualifier().toString().contains("/" + tableId + "/")) {
refCount++;
}
}
}
} catch (Exception e) {
refCount = -1;
log.error("Failed to scan " + MetadataTable.NAME + " looking for references to deleted table " + tableId, e);
}
// remove metadata table entries
try {
// Intentionally do not pass master lock. If master loses lock, this operation may complete before master can kill itself.
// If the master lock passed to deleteTable, it is possible that the delete mutations will be dropped. If the delete operations
// are dropped and the operation completes, then the deletes will not be repeated.
MetadataTableUtil.deleteTable(tableId, refCount != 0, master, null);
} catch (Exception e) {
log.error("error deleting " + tableId + " from metadata table", e);
}
// remove any problem reports the table may have
try {
ProblemReports.getInstance(master).deleteProblemReports(tableId);
} catch (Exception e) {
log.error("Failed to delete problem reports for table " + tableId, e);
}
if (refCount == 0) {
final AccumuloConfiguration conf = master.getConfiguration();
boolean archiveFiles = conf.getBoolean(Property.GC_FILE_ARCHIVE);
// delete the map files
try {
VolumeManager fs = master.getFileSystem();
for (String dir : ServerConstants.getTablesDirs()) {
if (archiveFiles) {
archiveFile(fs, dir, tableId);
} else {
fs.deleteRecursively(new Path(dir, tableId.canonicalID()));
}
}
} catch (IOException e) {
log.error("Unable to remove deleted table directory", e);
} catch (IllegalArgumentException exception) {
if (exception.getCause() instanceof UnknownHostException) {
/* Thrown if HDFS encounters a DNS problem in some edge cases */
log.error("Unable to remove deleted table directory", exception);
} else {
throw exception;
}
}
}
// remove table from zookeeper
try {
TableManager.getInstance().removeTable(tableId);
Tables.clearCache(master.getInstance());
} catch (Exception e) {
log.error("Failed to find table id in zookeeper", e);
}
// remove any permissions associated with this table
try {
AuditedSecurityOperation.getInstance(master).deleteTable(master.rpcCreds(), tableId, namespaceId);
} catch (ThriftSecurityException e) {
log.error("{}", e.getMessage(), e);
}
Utils.unreserveTable(tableId, tid, true);
Utils.unreserveNamespace(namespaceId, tid, false);
LoggerFactory.getLogger(CleanUp.class).debug("Deleted table " + tableId);
return null;
}
use of org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException in project accumulo by apache.
the class TableOperationsImpl method addSplits.
private void addSplits(String tableName, SortedSet<Text> partitionKeys, Table.ID tableId) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, AccumuloServerException {
TabletLocator tabLocator = TabletLocator.getLocator(context, tableId);
for (Text split : partitionKeys) {
boolean successful = false;
int attempt = 0;
long locationFailures = 0;
while (!successful) {
if (attempt > 0)
sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
attempt++;
TabletLocation tl = tabLocator.locateTablet(context, split, false, false);
if (tl == null) {
if (!Tables.exists(context.getInstance(), tableId))
throw new TableNotFoundException(tableId.canonicalID(), tableName, null);
else if (Tables.getTableState(context.getInstance(), tableId) == TableState.OFFLINE)
throw new TableOfflineException(context.getInstance(), tableId.canonicalID());
continue;
}
HostAndPort address = HostAndPort.fromString(tl.tablet_location);
try {
TabletClientService.Client client = ThriftUtil.getTServerClient(address, context);
try {
OpTimer timer = null;
if (log.isTraceEnabled()) {
log.trace("tid={} Splitting tablet {} on {} at {}", Thread.currentThread().getId(), tl.tablet_extent, address, split);
timer = new OpTimer().start();
}
client.splitTablet(Tracer.traceInfo(), context.rpcCreds(), tl.tablet_extent.toThrift(), TextUtil.getByteBuffer(split));
// just split it, might as well invalidate it in the cache
tabLocator.invalidateCache(tl.tablet_extent);
if (timer != null) {
timer.stop();
log.trace("Split tablet in {}", String.format("%.3f secs", timer.scale(TimeUnit.SECONDS)));
}
} finally {
ThriftUtil.returnClient(client);
}
} catch (TApplicationException tae) {
throw new AccumuloServerException(address.toString(), tae);
} catch (TTransportException e) {
tabLocator.invalidateCache(context.getInstance(), tl.tablet_location);
continue;
} catch (ThriftSecurityException e) {
Tables.clearCache(context.getInstance());
if (!Tables.exists(context.getInstance(), tableId))
throw new TableNotFoundException(tableId.canonicalID(), tableName, null);
throw new AccumuloSecurityException(e.user, e.code, e);
} catch (NotServingTabletException e) {
// Do not silently spin when we repeatedly fail to get the location for a tablet
locationFailures++;
if (5 == locationFailures || 0 == locationFailures % 50) {
log.warn("Having difficulty locating hosting tabletserver for split {} on table {}. Seen {} failures.", split, tableName, locationFailures);
}
tabLocator.invalidateCache(tl.tablet_extent);
continue;
} catch (TException e) {
tabLocator.invalidateCache(context.getInstance(), tl.tablet_location);
continue;
}
successful = true;
}
}
}
Aggregations