use of org.apache.accumulo.server.fs.VolumeChooserEnvironment in project accumulo by apache.
the class ChooseDir method call.
@Override
public Repo<Master> call(long tid, Master master) throws Exception {
// Constants.DEFAULT_TABLET_LOCATION has a leading slash prepended to it so we don't need to add one here
VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(tableInfo.tableId);
tableInfo.dir = master.getFileSystem().choose(chooserEnv, ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + tableInfo.tableId + Constants.DEFAULT_TABLET_LOCATION;
return new CreateDir(tableInfo);
}
use of org.apache.accumulo.server.fs.VolumeChooserEnvironment in project accumulo by apache.
the class Initialize method initialize.
private boolean initialize(Opts opts, String instanceNamePath, VolumeManager fs, String rootUser) {
UUID uuid = UUID.randomUUID();
// the actual disk locations of the root table and tablets
String[] configuredVolumes = VolumeConfiguration.getVolumeUris(SiteConfiguration.getInstance());
VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(ChooserScope.INIT);
final String rootTabletDir = new Path(fs.choose(chooserEnv, configuredVolumes) + Path.SEPARATOR + ServerConstants.TABLE_DIR + Path.SEPARATOR + RootTable.ID + RootTable.ROOT_TABLET_LOCATION).toString();
try {
initZooKeeper(opts, uuid.toString(), instanceNamePath, rootTabletDir);
} catch (Exception e) {
log.error("FATAL: Failed to initialize zookeeper", e);
return false;
}
try {
initFileSystem(opts, fs, uuid, rootTabletDir);
} catch (Exception e) {
log.error("FATAL Failed to initialize filesystem", e);
if (SiteConfiguration.getInstance().get(Property.INSTANCE_VOLUMES).trim().equals("")) {
Configuration fsConf = CachedConfiguration.getInstance();
final String defaultFsUri = "file:///";
String fsDefaultName = fsConf.get("fs.default.name", defaultFsUri), fsDefaultFS = fsConf.get("fs.defaultFS", defaultFsUri);
// Try to determine when we couldn't find an appropriate core-site.xml on the classpath
if (defaultFsUri.equals(fsDefaultName) && defaultFsUri.equals(fsDefaultFS)) {
log.error("FATAL: Default filesystem value ('fs.defaultFS' or 'fs.default.name') of '{}' was found in the Hadoop configuration", defaultFsUri);
log.error("FATAL: Please ensure that the Hadoop core-site.xml is on the classpath using 'general.classpaths' in accumulo-site.xml");
}
}
return false;
}
final Instance instance = HdfsZooInstance.getInstance();
final ServerConfigurationFactory confFactory = new ServerConfigurationFactory(instance);
// If they did not, fall back to the credentials present in accumulo-site.xml that the servers will use themselves.
try {
final SiteConfiguration siteConf = confFactory.getSiteConfiguration();
if (siteConf.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) {
final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
// We don't have any valid creds to talk to HDFS
if (!ugi.hasKerberosCredentials()) {
final String accumuloKeytab = siteConf.get(Property.GENERAL_KERBEROS_KEYTAB), accumuloPrincipal = siteConf.get(Property.GENERAL_KERBEROS_PRINCIPAL);
// Fail if the site configuration doesn't contain appropriate credentials to login as servers
if (StringUtils.isBlank(accumuloKeytab) || StringUtils.isBlank(accumuloPrincipal)) {
log.error("FATAL: No Kerberos credentials provided, and Accumulo is not properly configured for server login");
return false;
}
log.info("Logging in as {} with {}", accumuloPrincipal, accumuloKeytab);
// Login using the keytab as the 'accumulo' user
UserGroupInformation.loginUserFromKeytab(accumuloPrincipal, accumuloKeytab);
}
}
} catch (IOException e) {
log.error("FATAL: Failed to get the Kerberos user", e);
return false;
}
try {
AccumuloServerContext context = new AccumuloServerContext(instance, confFactory);
initSecurity(context, opts, uuid.toString(), rootUser);
} catch (Exception e) {
log.error("FATAL: Failed to initialize security", e);
return false;
}
if (opts.uploadAccumuloSite) {
try {
log.info("Uploading properties in accumulo-site.xml to Zookeeper. Properties that cannot be set in Zookeeper will be skipped:");
Map<String, String> entries = new TreeMap<>();
SiteConfiguration.getInstance().getProperties(entries, x -> true, false);
for (Map.Entry<String, String> entry : entries.entrySet()) {
String key = entry.getKey();
String value = entry.getValue();
if (Property.isValidZooPropertyKey(key)) {
SystemPropUtil.setSystemProperty(key, value);
log.info("Uploaded - {} = {}", key, Property.isSensitive(key) ? "<hidden>" : value);
} else {
log.info("Skipped - {} = {}", key, Property.isSensitive(key) ? "<hidden>" : value);
}
}
} catch (Exception e) {
log.error("FATAL: Failed to upload accumulo-site.xml to Zookeeper", e);
return false;
}
}
return true;
}
use of org.apache.accumulo.server.fs.VolumeChooserEnvironment in project accumulo by apache.
the class MetadataTableUtil method createReplicationTable.
/**
* During an upgrade from 1.6 to 1.7, we need to add the replication table
*/
public static void createReplicationTable(ClientContext context) throws IOException {
VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(ReplicationTable.ID);
String dir = VolumeManagerImpl.get().choose(chooserEnv, ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + ReplicationTable.ID + Constants.DEFAULT_TABLET_LOCATION;
Mutation m = new Mutation(new Text(KeyExtent.getMetadataEntry(ReplicationTable.ID, null)));
m.put(DIRECTORY_COLUMN.getColumnFamily(), DIRECTORY_COLUMN.getColumnQualifier(), 0, new Value(dir.getBytes(UTF_8)));
m.put(TIME_COLUMN.getColumnFamily(), TIME_COLUMN.getColumnQualifier(), 0, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes(UTF_8)));
m.put(PREV_ROW_COLUMN.getColumnFamily(), PREV_ROW_COLUMN.getColumnQualifier(), 0, KeyExtent.encodePrevEndRow(null));
update(getMetadataTable(context), null, m);
}
use of org.apache.accumulo.server.fs.VolumeChooserEnvironment in project accumulo by apache.
the class MetadataTableUtil method cloneTable.
public static void cloneTable(ClientContext context, Table.ID srcTableId, Table.ID tableId, VolumeManager volumeManager) throws Exception {
Connector conn = context.getConnector();
try (BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig())) {
while (true) {
try {
initializeClone(MetadataTable.NAME, srcTableId, tableId, conn, bw);
while (true) {
int rewrites = checkClone(MetadataTable.NAME, srcTableId, tableId, conn, bw);
if (rewrites == 0)
break;
}
bw.flush();
break;
} catch (TabletIterator.TabletDeletedException tde) {
// tablets were merged in the src table
bw.flush();
// delete what we have cloned and try again
deleteTable(tableId, false, context, null);
log.debug("Tablets merged in table {} while attempting to clone, trying again", srcTableId);
sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
}
}
// delete the clone markers and create directory entries
Scanner mscanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
mscanner.setRange(new KeyExtent(tableId, null, null).toMetadataRange());
mscanner.fetchColumnFamily(ClonedColumnFamily.NAME);
int dirCount = 0;
for (Entry<Key, Value> entry : mscanner) {
Key k = entry.getKey();
Mutation m = new Mutation(k.getRow());
m.putDelete(k.getColumnFamily(), k.getColumnQualifier());
VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(tableId);
String dir = volumeManager.choose(chooserEnv, ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + tableId + Path.SEPARATOR + new String(FastFormat.toZeroPaddedString(dirCount++, 8, 16, Constants.CLONE_PREFIX_BYTES));
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(dir.getBytes(UTF_8)));
bw.addMutation(m);
}
}
}
use of org.apache.accumulo.server.fs.VolumeChooserEnvironment in project accumulo by apache.
the class RandomizeVolumes method randomize.
public static int randomize(Connector c, String tableName) throws IOException, AccumuloSecurityException, AccumuloException, TableNotFoundException {
final VolumeManager vm = VolumeManagerImpl.get();
if (vm.getVolumes().size() < 2) {
log.error("There are not enough volumes configured");
return 1;
}
String tblStr = c.tableOperations().tableIdMap().get(tableName);
if (null == tblStr) {
log.error("Could not determine the table ID for table {}", tableName);
return 2;
}
Table.ID tableId = Table.ID.of(tblStr);
TableState tableState = TableManager.getInstance().getTableState(tableId);
if (TableState.OFFLINE != tableState) {
log.info("Taking {} offline", tableName);
c.tableOperations().offline(tableName, true);
log.info("{} offline", tableName);
}
SimpleThreadPool pool = new SimpleThreadPool(50, "directory maker");
log.info("Rewriting entries for {}", tableName);
Scanner scanner = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
DIRECTORY_COLUMN.fetch(scanner);
scanner.setRange(TabletsSection.getRange(tableId));
BatchWriter writer = c.createBatchWriter(MetadataTable.NAME, null);
int count = 0;
for (Entry<Key, Value> entry : scanner) {
String oldLocation = entry.getValue().toString();
String directory;
if (oldLocation.contains(":")) {
String[] parts = oldLocation.split(Path.SEPARATOR);
Table.ID tableIdEntry = Table.ID.of(parts[parts.length - 2]);
if (!tableIdEntry.equals(tableId)) {
log.error("Unexpected table id found: {}, expected {}; skipping", tableIdEntry, tableId);
continue;
}
directory = parts[parts.length - 1];
} else {
directory = oldLocation.substring(Path.SEPARATOR.length());
}
Key key = entry.getKey();
Mutation m = new Mutation(key.getRow());
VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(tableId);
final String newLocation = vm.choose(chooserEnv, ServerConstants.getBaseUris()) + Path.SEPARATOR + ServerConstants.TABLE_DIR + Path.SEPARATOR + tableId + Path.SEPARATOR + directory;
m.put(key.getColumnFamily(), key.getColumnQualifier(), new Value(newLocation.getBytes(UTF_8)));
if (log.isTraceEnabled()) {
log.trace("Replacing {} with {}", oldLocation, newLocation);
}
writer.addMutation(m);
pool.submit(new Runnable() {
@Override
public void run() {
try {
vm.mkdirs(new Path(newLocation));
} catch (IOException ex) {
// nevermind
}
}
});
count++;
}
writer.close();
pool.shutdown();
while (!pool.isTerminated()) {
log.trace("Waiting for mkdir() calls to finish");
try {
pool.awaitTermination(5, TimeUnit.SECONDS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
break;
}
}
log.info("Updated {} entries for table {}", count, tableName);
if (TableState.OFFLINE != tableState) {
c.tableOperations().online(tableName, true);
log.info("table {} back online", tableName);
}
return 0;
}
Aggregations