use of org.apache.accumulo.core.conf.DefaultConfiguration in project accumulo by apache.
the class ExistingMacIT method testExistingInstance.
@Test
public void testExistingInstance() throws Exception {
Connector conn = getCluster().getConnector("root", new PasswordToken(ROOT_PASSWORD));
conn.tableOperations().create("table1");
BatchWriter bw = conn.createBatchWriter("table1", new BatchWriterConfig());
Mutation m1 = new Mutation("00081");
m1.put("math", "sqroot", "9");
m1.put("math", "sq", "6560");
bw.addMutation(m1);
bw.close();
conn.tableOperations().flush("table1", null, null, true);
// TOOD use constants
conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
conn.tableOperations().flush(RootTable.NAME, null, null, true);
Set<Entry<ServerType, Collection<ProcessReference>>> procs = getCluster().getProcesses().entrySet();
for (Entry<ServerType, Collection<ProcessReference>> entry : procs) {
if (entry.getKey() == ServerType.ZOOKEEPER)
continue;
for (ProcessReference pr : entry.getValue()) getCluster().killProcess(entry.getKey(), pr);
}
final DefaultConfiguration defaultConfig = DefaultConfiguration.getInstance();
final long zkTimeout = ConfigurationTypeHelper.getTimeInMillis(getCluster().getConfig().getSiteConfig().get(Property.INSTANCE_ZK_TIMEOUT.getKey()));
IZooReaderWriter zrw = new ZooReaderWriterFactory().getZooReaderWriter(getCluster().getZooKeepers(), (int) zkTimeout, defaultConfig.get(Property.INSTANCE_SECRET));
final String zInstanceRoot = Constants.ZROOT + "/" + conn.getInstance().getInstanceID();
while (!AccumuloStatus.isAccumuloOffline(zrw, zInstanceRoot)) {
log.debug("Accumulo services still have their ZK locks held");
Thread.sleep(1000);
}
File hadoopConfDir = createTestDir(ExistingMacIT.class.getSimpleName() + "_hadoop_conf");
FileUtils.deleteQuietly(hadoopConfDir);
assertTrue(hadoopConfDir.mkdirs());
createEmptyConfig(new File(hadoopConfDir, "core-site.xml"));
createEmptyConfig(new File(hadoopConfDir, "hdfs-site.xml"));
File testDir2 = createTestDir(ExistingMacIT.class.getSimpleName() + "_2");
FileUtils.deleteQuietly(testDir2);
MiniAccumuloConfigImpl macConfig2 = new MiniAccumuloConfigImpl(testDir2, "notused");
macConfig2.useExistingInstance(new File(getCluster().getConfig().getConfDir(), "accumulo-site.xml"), hadoopConfDir);
MiniAccumuloClusterImpl accumulo2 = new MiniAccumuloClusterImpl(macConfig2);
accumulo2.start();
conn = accumulo2.getConnector("root", new PasswordToken(ROOT_PASSWORD));
try (Scanner scanner = conn.createScanner("table1", Authorizations.EMPTY)) {
int sum = 0;
for (Entry<Key, Value> entry : scanner) {
sum += Integer.parseInt(entry.getValue().toString());
}
Assert.assertEquals(6569, sum);
}
accumulo2.stop();
}
use of org.apache.accumulo.core.conf.DefaultConfiguration in project accumulo by apache.
the class UserImpersonationTest method setup.
@Before
public void setup() {
cc = new ConfigurationCopy(new HashMap<>());
conf = new AccumuloConfiguration() {
DefaultConfiguration defaultConfig = DefaultConfiguration.getInstance();
@Override
public String get(Property property) {
String value = cc.get(property);
if (null == value) {
return defaultConfig.get(property);
}
return value;
}
@Override
public void getProperties(Map<String, String> props, Predicate<String> filter) {
cc.getProperties(props, filter);
}
};
}
use of org.apache.accumulo.core.conf.DefaultConfiguration in project accumulo by apache.
the class ReplicationWorker method run.
@Override
public void run() {
DefaultConfiguration defaultConf = DefaultConfiguration.getInstance();
long defaultDelay = defaultConf.getTimeInMillis(Property.REPLICATION_WORK_PROCESSOR_DELAY);
long defaultPeriod = defaultConf.getTimeInMillis(Property.REPLICATION_WORK_PROCESSOR_PERIOD);
long delay = conf.getTimeInMillis(Property.REPLICATION_WORK_PROCESSOR_DELAY);
long period = conf.getTimeInMillis(Property.REPLICATION_WORK_PROCESSOR_PERIOD);
try {
DistributedWorkQueue workQueue;
if (defaultDelay != delay && defaultPeriod != period) {
log.debug("Configuration DistributedWorkQueue with delay and period of {} and {}", delay, period);
workQueue = new DistributedWorkQueue(ZooUtil.getRoot(inst) + ReplicationConstants.ZOO_WORK_QUEUE, conf, delay, period);
} else {
log.debug("Configuring DistributedWorkQueue with default delay and period");
workQueue = new DistributedWorkQueue(ZooUtil.getRoot(inst) + ReplicationConstants.ZOO_WORK_QUEUE, conf);
}
workQueue.startProcessing(new ReplicationProcessor(context, conf, fs), executor);
} catch (KeeperException | InterruptedException e) {
throw new RuntimeException(e);
}
}
use of org.apache.accumulo.core.conf.DefaultConfiguration in project accumulo by apache.
the class AccumuloFileOutputFormatIT method handleWriteTests.
private void handleWriteTests(boolean content) throws Exception {
File f = folder.newFile(testName.getMethodName());
if (f.delete()) {
log.debug("Deleted {}", f);
}
MRTester.main(new String[] { content ? TEST_TABLE : EMPTY_TABLE, f.getAbsolutePath() });
assertTrue(f.exists());
File[] files = f.listFiles(new FileFilter() {
@Override
public boolean accept(File file) {
return file.getName().startsWith("part-m-");
}
});
assertNotNull(files);
if (content) {
assertEquals(1, files.length);
assertTrue(files[0].exists());
Configuration conf = CachedConfiguration.getInstance();
DefaultConfiguration acuconf = DefaultConfiguration.getInstance();
FileSKVIterator sample = RFileOperations.getInstance().newReaderBuilder().forFile(files[0].toString(), FileSystem.get(conf), conf).withTableConfiguration(acuconf).build().getSample(new SamplerConfigurationImpl(SAMPLER_CONFIG));
assertNotNull(sample);
} else {
assertEquals(0, files.length);
}
}
use of org.apache.accumulo.core.conf.DefaultConfiguration in project accumulo by apache.
the class BlockCacheFactoryTest method testCreateLruBlockCacheFactory.
@Test
public void testCreateLruBlockCacheFactory() throws Exception {
DefaultConfiguration dc = DefaultConfiguration.getInstance();
ConfigurationCopy cc = new ConfigurationCopy(dc);
cc.set(Property.TSERV_CACHE_MANAGER_IMPL, LruBlockCacheManager.class.getName());
BlockCacheManagerFactory.getInstance(cc);
}
Aggregations