Search in sources :

Example 21 with ZooKeeperInstance

use of org.apache.accumulo.core.client.ZooKeeperInstance in project accumulo by apache.

the class Shell method getZooInstance.

/*
   * Takes instanceName and keepers as separate arguments, rather than just packaged into the clientConfig, so that we can fail over to accumulo-site.xml or
   * HDFS config if they're unspecified.
   */
private static Instance getZooInstance(String instanceName, String keepersOption, ClientConfiguration clientConfig) {
    UUID instanceId = null;
    if (instanceName == null) {
        instanceName = clientConfig.get(ClientProperty.INSTANCE_NAME);
    }
    String keepers = getZooKeepers(keepersOption, clientConfig);
    if (instanceName == null) {
        AccumuloConfiguration conf = SiteConfiguration.getInstance();
        Path instanceDir = new Path(VolumeConfiguration.getVolumeUris(conf)[0], "instance_id");
        instanceId = UUID.fromString(ZooUtil.getInstanceIDFromHdfs(instanceDir, conf));
    }
    if (instanceId != null) {
        return new ZooKeeperInstance(clientConfig.withInstance(instanceId).withZkHosts(keepers));
    } else {
        return new ZooKeeperInstance(clientConfig.withInstance(instanceName).withZkHosts(keepers));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) UUID(java.util.UUID) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) ZooKeeperInstance(org.apache.accumulo.core.client.ZooKeeperInstance)

Example 22 with ZooKeeperInstance

use of org.apache.accumulo.core.client.ZooKeeperInstance in project accumulo by apache.

the class ConfiguratorBaseTest method testSetZooKeeperInstance.

@Test
public void testSetZooKeeperInstance() {
    Configuration conf = new Configuration();
    ConfiguratorBase.setZooKeeperInstance(this.getClass(), conf, ClientConfiguration.create().withInstance("testInstanceName").withZkHosts("testZooKeepers").withSsl(true).withZkTimeout(1234));
    ClientConfiguration clientConf = ClientConfiguration.deserialize(conf.get(ConfiguratorBase.enumToConfKey(this.getClass(), ConfiguratorBase.InstanceOpts.CLIENT_CONFIG)));
    assertEquals("testInstanceName", clientConf.get(ClientProperty.INSTANCE_NAME));
    assertEquals("testZooKeepers", clientConf.get(ClientProperty.INSTANCE_ZK_HOST));
    assertEquals("true", clientConf.get(ClientProperty.INSTANCE_RPC_SSL_ENABLED));
    assertEquals("1234", clientConf.get(ClientProperty.INSTANCE_ZK_TIMEOUT));
    assertEquals(ZooKeeperInstance.class.getSimpleName(), conf.get(ConfiguratorBase.enumToConfKey(this.getClass(), ConfiguratorBase.InstanceOpts.TYPE)));
// We want to test that the correct parameters from the config get passed to the ZKI
// but that keeps us from being able to make assertions on a valid instance name at ZKI creation
// Instance instance = ConfiguratorBase.getInstance(this.getClass(), conf);
// assertEquals(ZooKeeperInstance.class.getName(), instance.getClass().getName());
// assertEquals("testInstanceName", ((ZooKeeperInstance) instance).getInstanceName());
// assertEquals("testZooKeepers", ((ZooKeeperInstance) instance).getZooKeepers());
// assertEquals(1234000, ((ZooKeeperInstance) instance).getZooKeepersSessionTimeOut());
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) ZooKeeperInstance(org.apache.accumulo.core.client.ZooKeeperInstance) Test(org.junit.Test)

Example 23 with ZooKeeperInstance

use of org.apache.accumulo.core.client.ZooKeeperInstance in project hive by apache.

the class TestHiveAccumuloTableInputFormat method testConfigureAccumuloInputFormatWithIterators.

@Test
public void testConfigureAccumuloInputFormatWithIterators() throws Exception {
    AccumuloConnectionParameters accumuloParams = new AccumuloConnectionParameters(conf);
    ColumnMapper columnMapper = new ColumnMapper(conf.get(AccumuloSerDeParameters.COLUMN_MAPPINGS), conf.get(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE), columnNames, columnTypes);
    Set<Pair<Text, Text>> cfCqPairs = inputformat.getPairCollection(columnMapper.getColumnMappings());
    List<IteratorSetting> iterators = new ArrayList<IteratorSetting>();
    Set<Range> ranges = Collections.singleton(new Range());
    String instanceName = "realInstance";
    String zookeepers = "host1:2181,host2:2181,host3:2181";
    IteratorSetting cfg = new IteratorSetting(50, PrimitiveComparisonFilter.class);
    cfg.addOption(PrimitiveComparisonFilter.P_COMPARE_CLASS, StringCompare.class.getName());
    cfg.addOption(PrimitiveComparisonFilter.COMPARE_OPT_CLASS, Equal.class.getName());
    cfg.addOption(PrimitiveComparisonFilter.CONST_VAL, "dave");
    cfg.addOption(PrimitiveComparisonFilter.COLUMN, "person:name");
    iterators.add(cfg);
    cfg = new IteratorSetting(50, PrimitiveComparisonFilter.class);
    cfg.addOption(PrimitiveComparisonFilter.P_COMPARE_CLASS, IntCompare.class.getName());
    cfg.addOption(PrimitiveComparisonFilter.COMPARE_OPT_CLASS, Equal.class.getName());
    cfg.addOption(PrimitiveComparisonFilter.CONST_VAL, "50");
    cfg.addOption(PrimitiveComparisonFilter.COLUMN, "person:age");
    iterators.add(cfg);
    ZooKeeperInstance zkInstance = Mockito.mock(ZooKeeperInstance.class);
    HiveAccumuloTableInputFormat mockInputFormat = Mockito.mock(HiveAccumuloTableInputFormat.class);
    HiveAccumuloHelper helper = Mockito.mock(HiveAccumuloHelper.class);
    // Stub out the ZKI mock
    Mockito.when(zkInstance.getInstanceName()).thenReturn(instanceName);
    Mockito.when(zkInstance.getZooKeepers()).thenReturn(zookeepers);
    // Stub out a mocked Helper instance
    Mockito.when(mockInputFormat.getHelper()).thenReturn(helper);
    // Call out to the real configure method
    Mockito.doCallRealMethod().when(mockInputFormat).configure(conf, zkInstance, con, accumuloParams, columnMapper, iterators, ranges);
    // Also compute the correct cf:cq pairs so we can assert the right argument was passed
    Mockito.doCallRealMethod().when(mockInputFormat).getPairCollection(columnMapper.getColumnMappings());
    mockInputFormat.configure(conf, zkInstance, con, accumuloParams, columnMapper, iterators, ranges);
    // Verify that the correct methods are invoked on AccumuloInputFormat
    Mockito.verify(helper).setInputFormatZooKeeperInstance(conf, instanceName, zookeepers, false);
    Mockito.verify(helper).setInputFormatConnectorInfo(conf, USER, new PasswordToken(PASS));
    Mockito.verify(mockInputFormat).setInputTableName(conf, TEST_TABLE);
    Mockito.verify(mockInputFormat).setScanAuthorizations(conf, con.securityOperations().getUserAuthorizations(USER));
    Mockito.verify(mockInputFormat).addIterators(conf, iterators);
    Mockito.verify(mockInputFormat).setRanges(conf, ranges);
    Mockito.verify(mockInputFormat).fetchColumns(conf, cfCqPairs);
}
Also used : StringCompare(org.apache.hadoop.hive.accumulo.predicate.compare.StringCompare) ArrayList(java.util.ArrayList) Range(org.apache.accumulo.core.data.Range) ZooKeeperInstance(org.apache.accumulo.core.client.ZooKeeperInstance) HiveAccumuloHelper(org.apache.hadoop.hive.accumulo.HiveAccumuloHelper) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) PrimitiveComparisonFilter(org.apache.hadoop.hive.accumulo.predicate.PrimitiveComparisonFilter) GreaterThanOrEqual(org.apache.hadoop.hive.accumulo.predicate.compare.GreaterThanOrEqual) Equal(org.apache.hadoop.hive.accumulo.predicate.compare.Equal) IntCompare(org.apache.hadoop.hive.accumulo.predicate.compare.IntCompare) AccumuloConnectionParameters(org.apache.hadoop.hive.accumulo.AccumuloConnectionParameters) ColumnMapper(org.apache.hadoop.hive.accumulo.columns.ColumnMapper) Pair(org.apache.accumulo.core.util.Pair) Test(org.junit.Test)

Example 24 with ZooKeeperInstance

use of org.apache.accumulo.core.client.ZooKeeperInstance in project hive by apache.

the class TestHiveAccumuloTableInputFormat method testConfigureAccumuloInputFormat.

@Test
public void testConfigureAccumuloInputFormat() throws Exception {
    AccumuloConnectionParameters accumuloParams = new AccumuloConnectionParameters(conf);
    ColumnMapper columnMapper = new ColumnMapper(conf.get(AccumuloSerDeParameters.COLUMN_MAPPINGS), conf.get(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE), columnNames, columnTypes);
    Set<Pair<Text, Text>> cfCqPairs = inputformat.getPairCollection(columnMapper.getColumnMappings());
    List<IteratorSetting> iterators = Collections.emptyList();
    Set<Range> ranges = Collections.singleton(new Range());
    String instanceName = "realInstance";
    String zookeepers = "host1:2181,host2:2181,host3:2181";
    ZooKeeperInstance zkInstance = Mockito.mock(ZooKeeperInstance.class);
    HiveAccumuloTableInputFormat mockInputFormat = Mockito.mock(HiveAccumuloTableInputFormat.class);
    HiveAccumuloHelper helper = Mockito.mock(HiveAccumuloHelper.class);
    // Stub out the ZKI mock
    Mockito.when(zkInstance.getInstanceName()).thenReturn(instanceName);
    Mockito.when(zkInstance.getZooKeepers()).thenReturn(zookeepers);
    // Stub out a mocked Helper instance
    Mockito.when(mockInputFormat.getHelper()).thenReturn(helper);
    // Call out to the real configure method
    Mockito.doCallRealMethod().when(mockInputFormat).configure(conf, zkInstance, con, accumuloParams, columnMapper, iterators, ranges);
    // Also compute the correct cf:cq pairs so we can assert the right argument was passed
    Mockito.doCallRealMethod().when(mockInputFormat).getPairCollection(columnMapper.getColumnMappings());
    mockInputFormat.configure(conf, zkInstance, con, accumuloParams, columnMapper, iterators, ranges);
    // Verify that the correct methods are invoked on AccumuloInputFormat
    Mockito.verify(helper).setInputFormatZooKeeperInstance(conf, instanceName, zookeepers, false);
    Mockito.verify(helper).setInputFormatConnectorInfo(conf, USER, new PasswordToken(PASS));
    Mockito.verify(mockInputFormat).setInputTableName(conf, TEST_TABLE);
    Mockito.verify(mockInputFormat).setScanAuthorizations(conf, con.securityOperations().getUserAuthorizations(USER));
    Mockito.verify(mockInputFormat).addIterators(conf, iterators);
    Mockito.verify(mockInputFormat).setRanges(conf, ranges);
    Mockito.verify(mockInputFormat).fetchColumns(conf, cfCqPairs);
}
Also used : Range(org.apache.accumulo.core.data.Range) ZooKeeperInstance(org.apache.accumulo.core.client.ZooKeeperInstance) HiveAccumuloHelper(org.apache.hadoop.hive.accumulo.HiveAccumuloHelper) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) AccumuloConnectionParameters(org.apache.hadoop.hive.accumulo.AccumuloConnectionParameters) ColumnMapper(org.apache.hadoop.hive.accumulo.columns.ColumnMapper) Pair(org.apache.accumulo.core.util.Pair) Test(org.junit.Test)

Example 25 with ZooKeeperInstance

use of org.apache.accumulo.core.client.ZooKeeperInstance in project incubator-rya by apache.

the class IndexWritingTool method run.

@Override
public int run(final String[] args) throws Exception {
    Preconditions.checkArgument(args.length == 7, "java " + IndexWritingTool.class.getCanonicalName() + " hdfsSaveLocation sparqlFile cbinstance cbzk cbuser cbpassword rdfTablePrefix.");
    final String inputDir = PathUtils.clean(args[0]);
    final String sparqlFile = PathUtils.clean(args[1]);
    final String instStr = args[2];
    final String zooStr = args[3];
    final String userStr = args[4];
    final String passStr = args[5];
    final String tablePrefix = args[6];
    final String sparql = FileUtils.readFileToString(new File(sparqlFile));
    final Job job = new Job(getConf(), "Write HDFS Index to Accumulo");
    job.setJarByClass(this.getClass());
    final Configuration jobConf = job.getConfiguration();
    jobConf.setBoolean("mapred.map.tasks.speculative.execution", false);
    setVarOrders(sparql, jobConf);
    TextInputFormat.setInputPaths(job, inputDir);
    job.setInputFormatClass(TextInputFormat.class);
    job.setMapperClass(MyMapper.class);
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(Mutation.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Mutation.class);
    job.setNumReduceTasks(0);
    String tableName;
    if (zooStr.equals("mock")) {
        tableName = tablePrefix;
    } else {
        tableName = tablePrefix + "INDEX_" + UUID.randomUUID().toString().replace("-", "").toUpperCase();
    }
    setAccumuloOutput(instStr, zooStr, userStr, passStr, job, tableName);
    jobConf.set(sparql_key, sparql);
    final int complete = job.waitForCompletion(true) ? 0 : -1;
    if (complete == 0) {
        final String[] varOrders = jobConf.getStrings("varOrders");
        final String orders = Joiner.on("\u0000").join(varOrders);
        Instance inst;
        if (zooStr.equals("mock")) {
            inst = new MockInstance(instStr);
        } else {
            inst = new ZooKeeperInstance(instStr, zooStr);
        }
        final Connector conn = inst.getConnector(userStr, passStr.getBytes(StandardCharsets.UTF_8));
        final BatchWriter bw = conn.createBatchWriter(tableName, 10, 5000, 1);
        final Counters counters = job.getCounters();
        final Counter c1 = counters.findCounter(cardCounter, cardCounter);
        final Mutation m = new Mutation("~SPARQL");
        final Value v = new Value(sparql.getBytes(StandardCharsets.UTF_8));
        m.put(new Text("" + c1.getValue()), new Text(orders), v);
        bw.addMutation(m);
        bw.close();
        return complete;
    } else {
        return complete;
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Configuration(org.apache.hadoop.conf.Configuration) MockInstance(org.apache.accumulo.core.client.mock.MockInstance) Instance(org.apache.accumulo.core.client.Instance) ZooKeeperInstance(org.apache.accumulo.core.client.ZooKeeperInstance) Text(org.apache.hadoop.io.Text) ZooKeeperInstance(org.apache.accumulo.core.client.ZooKeeperInstance) Counter(org.apache.hadoop.mapreduce.Counter) MockInstance(org.apache.accumulo.core.client.mock.MockInstance) Value(org.apache.accumulo.core.data.Value) Counters(org.apache.hadoop.mapreduce.Counters) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Job(org.apache.hadoop.mapreduce.Job) File(java.io.File)

Aggregations

ZooKeeperInstance (org.apache.accumulo.core.client.ZooKeeperInstance)52 PasswordToken (org.apache.accumulo.core.client.security.tokens.PasswordToken)35 Instance (org.apache.accumulo.core.client.Instance)24 Connector (org.apache.accumulo.core.client.Connector)17 AccumuloException (org.apache.accumulo.core.client.AccumuloException)15 MockInstance (org.apache.accumulo.core.client.mock.MockInstance)15 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)13 IOException (java.io.IOException)8 Test (org.junit.Test)8 ClientConfiguration (org.apache.accumulo.core.client.ClientConfiguration)7 AccumuloRdfConfiguration (org.apache.rya.accumulo.AccumuloRdfConfiguration)7 Range (org.apache.accumulo.core.data.Range)6 ArrayList (java.util.ArrayList)5 MiniAccumuloCluster (org.apache.accumulo.minicluster.MiniAccumuloCluster)5 Text (org.apache.hadoop.io.Text)5 File (java.io.File)4 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)4 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)4 Pair (org.apache.accumulo.core.util.Pair)4 AccumuloConnectionParameters (org.apache.hadoop.hive.accumulo.AccumuloConnectionParameters)4