use of org.apache.accumulo.core.conf.AccumuloConfiguration in project accumulo by apache.
the class SplitLarge method main.
public static void main(String[] args) throws Exception {
Configuration conf = CachedConfiguration.getInstance();
FileSystem fs = FileSystem.get(conf);
Opts opts = new Opts();
opts.parseArgs(SplitLarge.class.getName(), args);
for (String file : opts.files) {
AccumuloConfiguration aconf = DefaultConfiguration.getInstance();
Path path = new Path(file);
CachableBlockFile.Reader rdr = new CachableBlockFile.Reader(fs, path, conf, null, null, aconf);
try (Reader iter = new RFile.Reader(rdr)) {
if (!file.endsWith(".rf")) {
throw new IllegalArgumentException("File must end with .rf");
}
String smallName = file.substring(0, file.length() - 3) + "_small.rf";
String largeName = file.substring(0, file.length() - 3) + "_large.rf";
int blockSize = (int) aconf.getAsBytes(Property.TABLE_FILE_BLOCK_SIZE);
try (Writer small = new RFile.Writer(new CachableBlockFile.Writer(fs, new Path(smallName), "gz", null, conf, aconf), blockSize);
Writer large = new RFile.Writer(new CachableBlockFile.Writer(fs, new Path(largeName), "gz", null, conf, aconf), blockSize)) {
small.startDefaultLocalityGroup();
large.startDefaultLocalityGroup();
iter.seek(new Range(), new ArrayList<>(), false);
while (iter.hasTop()) {
Key key = iter.getTopKey();
Value value = iter.getTopValue();
if (key.getSize() + value.getSize() < opts.maxSize) {
small.append(key, value);
} else {
large.append(key, value);
}
iter.next();
}
}
}
}
}
use of org.apache.accumulo.core.conf.AccumuloConfiguration in project accumulo by apache.
the class ClientServiceHandler method getTableConfiguration.
@Override
public Map<String, String> getTableConfiguration(TInfo tinfo, TCredentials credentials, String tableName) throws TException, ThriftTableOperationException {
Table.ID tableId = checkTableId(instance, tableName, null);
AccumuloConfiguration config = context.getServerConfigurationFactory().getTableConfiguration(tableId);
return conf(credentials, config);
}
use of org.apache.accumulo.core.conf.AccumuloConfiguration in project accumulo by apache.
the class Accumulo method init.
public static void init(VolumeManager fs, Instance instance, ServerConfigurationFactory serverConfig, String application) throws IOException {
final AccumuloConfiguration conf = serverConfig.getSystemConfiguration();
log.info("{} starting", application);
log.info("Instance {}", instance.getInstanceID());
int dataVersion = Accumulo.getAccumuloPersistentVersion(fs);
log.info("Data Version {}", dataVersion);
Accumulo.waitForZookeeperAndHdfs(fs);
if (!(canUpgradeFromDataVersion(dataVersion))) {
throw new RuntimeException("This version of accumulo (" + Constants.VERSION + ") is not compatible with files stored using data version " + dataVersion);
}
TreeMap<String, String> sortedProps = new TreeMap<>();
for (Entry<String, String> entry : conf) sortedProps.put(entry.getKey(), entry.getValue());
for (Entry<String, String> entry : sortedProps.entrySet()) {
String key = entry.getKey();
log.info("{} = {}", key, (Property.isSensitive(key) ? "<hidden>" : entry.getValue()));
}
monitorSwappiness(conf);
// Encourage users to configure TLS
final String SSL = "SSL";
for (Property sslProtocolProperty : Arrays.asList(Property.RPC_SSL_CLIENT_PROTOCOL, Property.RPC_SSL_ENABLED_PROTOCOLS, Property.MONITOR_SSL_INCLUDE_PROTOCOLS)) {
String value = conf.get(sslProtocolProperty);
if (value.contains(SSL)) {
log.warn("It is recommended that {} only allow TLS", sslProtocolProperty);
}
}
}
use of org.apache.accumulo.core.conf.AccumuloConfiguration in project accumulo by apache.
the class StandaloneAccumuloCluster method getSiteConfiguration.
@Override
public AccumuloConfiguration getSiteConfiguration() {
Configuration conf = new Configuration(false);
Path accumuloSite = new Path(serverAccumuloConfDir, "accumulo-site.xml");
conf.addResource(accumuloSite);
return new ConfigurationCopy(Iterables.concat(DefaultConfiguration.getInstance(), conf));
}
use of org.apache.accumulo.core.conf.AccumuloConfiguration in project accumulo by apache.
the class ShellSetInstanceTest method testSetInstance_HdfsZooInstance.
private void testSetInstance_HdfsZooInstance(boolean explicitHdfs, boolean onlyInstance, boolean onlyHosts) throws Exception {
ClientConfiguration clientConf = createMock(ClientConfiguration.class);
ShellOptionsJC opts = createMock(ShellOptionsJC.class);
expect(opts.isFake()).andReturn(false);
expect(opts.getClientConfiguration()).andReturn(clientConf);
expect(opts.isHdfsZooInstance()).andReturn(explicitHdfs);
if (!explicitHdfs) {
expect(opts.getZooKeeperInstance()).andReturn(Collections.emptyList());
if (onlyInstance) {
expect(opts.getZooKeeperInstanceName()).andReturn("instance");
expect(clientConf.withInstance("instance")).andReturn(clientConf);
} else {
expect(opts.getZooKeeperInstanceName()).andReturn(null);
}
if (onlyHosts) {
expect(opts.getZooKeeperHosts()).andReturn("host3,host4");
expect(clientConf.withZkHosts("host3,host4")).andReturn(clientConf);
} else {
expect(opts.getZooKeeperHosts()).andReturn(null);
}
}
replay(opts);
if (!onlyInstance) {
expect(clientConf.get(ClientProperty.INSTANCE_NAME)).andReturn(null);
}
mockStatic(ConfigSanityCheck.class);
ConfigSanityCheck.validate(EasyMock.<AccumuloConfiguration>anyObject());
expectLastCall().atLeastOnce();
replay(ConfigSanityCheck.class);
if (!onlyHosts) {
expect(clientConf.containsKey(ClientProperty.INSTANCE_ZK_HOST.getKey())).andReturn(true).atLeastOnce();
expect(clientConf.get(ClientProperty.INSTANCE_ZK_HOST)).andReturn("host1,host2").atLeastOnce();
expect(clientConf.withZkHosts("host1,host2")).andReturn(clientConf);
}
if (!onlyInstance) {
expect(clientConf.containsKey(Property.INSTANCE_VOLUMES.getKey())).andReturn(false).atLeastOnce();
@SuppressWarnings("deprecation") String INSTANCE_DFS_DIR_KEY = Property.INSTANCE_DFS_DIR.getKey();
@SuppressWarnings("deprecation") String INSTANCE_DFS_URI_KEY = Property.INSTANCE_DFS_URI.getKey();
expect(clientConf.containsKey(INSTANCE_DFS_DIR_KEY)).andReturn(true).atLeastOnce();
expect(clientConf.containsKey(INSTANCE_DFS_URI_KEY)).andReturn(true).atLeastOnce();
expect(clientConf.getString(INSTANCE_DFS_URI_KEY)).andReturn("hdfs://nn1").atLeastOnce();
expect(clientConf.getString(INSTANCE_DFS_DIR_KEY)).andReturn("/dfs").atLeastOnce();
}
UUID randomUUID = null;
if (!onlyInstance) {
mockStatic(ZooUtil.class);
randomUUID = UUID.randomUUID();
expect(ZooUtil.getInstanceIDFromHdfs(anyObject(Path.class), anyObject(AccumuloConfiguration.class))).andReturn(randomUUID.toString());
replay(ZooUtil.class);
expect(clientConf.withInstance(randomUUID)).andReturn(clientConf);
}
replay(clientConf);
ZooKeeperInstance theInstance = createMock(ZooKeeperInstance.class);
expectNew(ZooKeeperInstance.class, new Class<?>[] { ClientConfiguration.class }, clientConf).andReturn(theInstance);
replay(theInstance, ZooKeeperInstance.class);
shell.setInstance(opts);
verify(theInstance, ZooKeeperInstance.class);
}
Aggregations