use of org.apache.accumulo.core.conf.AccumuloConfiguration in project accumulo by apache.
the class ZooKeeperInstance method getConnector.
@Override
public Connector getConnector(String principal, AuthenticationToken token) throws AccumuloException, AccumuloSecurityException {
Properties properties = ClientConfConverter.toProperties(clientConf);
properties.setProperty(ClientProperty.AUTH_PRINCIPAL.getKey(), principal);
properties.setProperty(ClientProperty.INSTANCE_NAME.getKey(), getInstanceName());
ClientInfo info = new ClientInfoImpl(properties, token);
AccumuloConfiguration serverConf = ClientConfConverter.toAccumuloConf(properties);
return new org.apache.accumulo.core.clientImpl.ConnectorImpl(new ClientContext(SingletonReservation.noop(), info, serverConf));
}
use of org.apache.accumulo.core.conf.AccumuloConfiguration in project accumulo by apache.
the class RFileOperations method openWriter.
@Override
protected FileSKVWriter openWriter(FileOptions options) throws IOException {
AccumuloConfiguration acuconf = options.getTableConfiguration();
long blockSize = acuconf.getAsBytes(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE);
Preconditions.checkArgument((blockSize < Integer.MAX_VALUE && blockSize > 0), "table.file.compress.blocksize must be greater than 0 and less than " + Integer.MAX_VALUE);
long indexBlockSize = acuconf.getAsBytes(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX);
Preconditions.checkArgument((indexBlockSize < Integer.MAX_VALUE && indexBlockSize > 0), "table.file.compress.blocksize.index must be greater than 0 and less than " + Integer.MAX_VALUE);
SamplerConfigurationImpl samplerConfig = SamplerConfigurationImpl.newSamplerConfig(acuconf);
Sampler sampler = null;
if (samplerConfig != null) {
sampler = SamplerFactory.newSampler(samplerConfig, acuconf, options.isAccumuloStartEnabled());
}
String compression = options.getCompression();
compression = compression == null ? options.getTableConfiguration().get(Property.TABLE_FILE_COMPRESSION_TYPE) : compression;
FSDataOutputStream outputStream = options.getOutputStream();
Configuration conf = options.getConfiguration();
if (outputStream == null) {
int hrep = conf.getInt("dfs.replication", 3);
int trep = acuconf.getCount(Property.TABLE_FILE_REPLICATION);
int rep = hrep;
if (trep > 0 && trep != hrep) {
rep = trep;
}
long hblock = conf.getLong("dfs.block.size", 1 << 26);
long tblock = acuconf.getAsBytes(Property.TABLE_FILE_BLOCK_SIZE);
long block = hblock;
if (tblock > 0)
block = tblock;
int bufferSize = conf.getInt("io.file.buffer.size", 4096);
String file = options.getFilename();
FileSystem fs = options.getFileSystem();
outputStream = fs.create(new Path(file), false, bufferSize, (short) rep, block);
}
BCFile.Writer _cbw = new BCFile.Writer(outputStream, options.getRateLimiter(), compression, conf, options.cryptoService);
return new RFile.Writer(_cbw, (int) blockSize, (int) indexBlockSize, samplerConfig, sampler);
}
use of org.apache.accumulo.core.conf.AccumuloConfiguration in project accumulo by apache.
the class AccumuloFileOutputFormatTest method validateConfiguration.
@Test
public void validateConfiguration() {
int a = 7;
long b = 300L;
long c = 50L;
long d = 10L;
String e = "snappy";
SamplerConfiguration samplerConfig = new SamplerConfiguration(RowSampler.class.getName());
samplerConfig.addOption("hasher", "murmur3_32");
samplerConfig.addOption("modulus", "109");
JobConf job = new JobConf();
AccumuloFileOutputFormat.setReplication(job, a);
AccumuloFileOutputFormat.setFileBlockSize(job, b);
AccumuloFileOutputFormat.setDataBlockSize(job, c);
AccumuloFileOutputFormat.setIndexBlockSize(job, d);
AccumuloFileOutputFormat.setCompressionType(job, e);
AccumuloFileOutputFormat.setSampler(job, samplerConfig);
AccumuloConfiguration acuconf = org.apache.accumulo.core.clientImpl.mapreduce.lib.FileOutputConfigurator.getAccumuloConfiguration(AccumuloFileOutputFormat.class, job);
assertEquals(7, acuconf.getCount(Property.TABLE_FILE_REPLICATION));
assertEquals(300L, acuconf.getAsBytes(Property.TABLE_FILE_BLOCK_SIZE));
assertEquals(50L, acuconf.getAsBytes(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE));
assertEquals(10L, acuconf.getAsBytes(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX));
assertEquals("snappy", acuconf.get(Property.TABLE_FILE_COMPRESSION_TYPE));
assertEquals(new SamplerConfigurationImpl(samplerConfig), SamplerConfigurationImpl.newSamplerConfig(acuconf));
a = 17;
b = 1300L;
c = 150L;
d = 110L;
e = "lzo";
samplerConfig = new SamplerConfiguration(RowSampler.class.getName());
samplerConfig.addOption("hasher", "md5");
samplerConfig.addOption("modulus", "100003");
job = new JobConf();
AccumuloFileOutputFormat.setReplication(job, a);
AccumuloFileOutputFormat.setFileBlockSize(job, b);
AccumuloFileOutputFormat.setDataBlockSize(job, c);
AccumuloFileOutputFormat.setIndexBlockSize(job, d);
AccumuloFileOutputFormat.setCompressionType(job, e);
AccumuloFileOutputFormat.setSampler(job, samplerConfig);
acuconf = org.apache.accumulo.core.clientImpl.mapreduce.lib.FileOutputConfigurator.getAccumuloConfiguration(AccumuloFileOutputFormat.class, job);
assertEquals(17, acuconf.getCount(Property.TABLE_FILE_REPLICATION));
assertEquals(1300L, acuconf.getAsBytes(Property.TABLE_FILE_BLOCK_SIZE));
assertEquals(150L, acuconf.getAsBytes(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE));
assertEquals(110L, acuconf.getAsBytes(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX));
assertEquals("lzo", acuconf.get(Property.TABLE_FILE_COMPRESSION_TYPE));
assertEquals(new SamplerConfigurationImpl(samplerConfig), SamplerConfigurationImpl.newSamplerConfig(acuconf));
}
use of org.apache.accumulo.core.conf.AccumuloConfiguration in project accumulo by apache.
the class MultiLevelIndexTest method runTest.
private void runTest(int maxBlockSize, int num) throws IOException {
AccumuloConfiguration aconf = DefaultConfiguration.getInstance();
ByteArrayOutputStream baos = new ByteArrayOutputStream();
FSDataOutputStream dos = new FSDataOutputStream(baos, new FileSystem.Statistics("a"));
BCFile.Writer _cbw = new BCFile.Writer(dos, null, "gz", hadoopConf, CryptoServiceFactory.newInstance(aconf, JAVA));
BufferedWriter mliw = new BufferedWriter(new Writer(_cbw, maxBlockSize));
for (int i = 0; i < num; i++) mliw.add(new Key(String.format("%05d000", i)), i, 0, 0, 0);
mliw.addLast(new Key(String.format("%05d000", num)), num, 0, 0, 0);
BCFile.Writer.BlockAppender root = _cbw.prepareMetaBlock("root");
mliw.close(root);
root.close();
_cbw.close();
dos.close();
baos.close();
byte[] data = baos.toByteArray();
SeekableByteArrayInputStream bais = new SeekableByteArrayInputStream(data);
FSDataInputStream in = new FSDataInputStream(bais);
CachableBuilder cb = new CachableBuilder().input(in, "source-1").length(data.length).conf(hadoopConf).cryptoService(CryptoServiceFactory.newInstance(aconf, JAVA));
CachableBlockFile.Reader _cbr = new CachableBlockFile.Reader(cb);
Reader reader = new Reader(_cbr, RFile.RINDEX_VER_8);
CachableBlockFile.CachedBlockRead rootIn = _cbr.getMetaBlock("root");
reader.readFields(rootIn);
rootIn.close();
IndexIterator liter = reader.lookup(new Key("000000"));
int count = 0;
while (liter.hasNext()) {
assertEquals(count, liter.nextIndex());
assertEquals(count, liter.peek().getNumEntries());
assertEquals(count, liter.next().getNumEntries());
count++;
}
assertEquals(num + 1, count);
while (liter.hasPrevious()) {
count--;
assertEquals(count, liter.previousIndex());
assertEquals(count, liter.peekPrevious().getNumEntries());
assertEquals(count, liter.previous().getNumEntries());
}
assertEquals(0, count);
// go past the end
liter = reader.lookup(new Key(String.format("%05d000", num + 1)));
assertFalse(liter.hasNext());
random.ints(100, 0, num * 1_000).forEach(k -> {
int expected;
if (k % 1000 == 0)
// end key is inclusive
expected = k / 1000;
else
expected = k / 1000 + 1;
try {
IndexEntry ie = reader.lookup(new Key(String.format("%08d", k))).next();
assertEquals(expected, ie.getNumEntries());
} catch (IOException e) {
throw new UncheckedIOException(e);
}
});
}
use of org.apache.accumulo.core.conf.AccumuloConfiguration in project accumulo by apache.
the class CreateCompatTestFile method main.
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
AccumuloConfiguration aconf = DefaultConfiguration.getInstance();
BCFile.Writer _cbw = new BCFile.Writer(fs.create(new Path(args[0])), null, "gz", conf, CryptoServiceFactory.newInstance(aconf, ClassloaderType.JAVA));
RFile.Writer writer = new RFile.Writer(_cbw, 1000);
writer.startNewLocalityGroup("lg1", newColFamSequence(formatStr("cf_", 1), formatStr("cf_", 2)));
for (int i = 0; i < 1000; i++) {
writer.append(newKey(formatStr("r_", i), formatStr("cf_", 1), formatStr("cq_", 0), "", 1000 - i), newValue(i + ""));
writer.append(newKey(formatStr("r_", i), formatStr("cf_", 2), formatStr("cq_", 0), "", 1000 - i), newValue(i + ""));
}
writer.startNewLocalityGroup("lg2", newColFamSequence(formatStr("cf_", 3)));
for (int i = 0; i < 1000; i++) {
writer.append(newKey(formatStr("r_", i), formatStr("cf_", 3), formatStr("cq_", 0), "", 1000 - i), newValue(i + ""));
}
writer.startDefaultLocalityGroup();
for (int i = 0; i < 1000; i++) {
writer.append(newKey(formatStr("r_", i), formatStr("cf_", 4), formatStr("cq_", 0), "", 1000 - i), newValue(i + ""));
}
writer.close();
_cbw.close();
}
Aggregations