Search in sources :

Example 6 with BatchWriter

use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.

the class CleanTmpIT method test.

@Test
public void test() throws Exception {
    Connector c = getConnector();
    // make a table
    String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    // write to it
    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
    Mutation m = new Mutation("row");
    m.put("cf", "cq", "value");
    bw.addMutation(m);
    bw.flush();
    // Compact memory to make a file
    c.tableOperations().compact(tableName, null, null, true, true);
    // Make sure that we'll have a WAL
    m = new Mutation("row2");
    m.put("cf", "cq", "value");
    bw.addMutation(m);
    bw.close();
    // create a fake _tmp file in its directory
    String id = c.tableOperations().tableIdMap().get(tableName);
    Path file;
    try (Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        s.setRange(Range.prefix(id));
        s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
        Entry<Key, Value> entry = Iterables.getOnlyElement(s);
        file = new Path(entry.getKey().getColumnQualifier().toString());
    }
    FileSystem fs = getCluster().getFileSystem();
    assertTrue("Could not find file: " + file, fs.exists(file));
    Path tabletDir = file.getParent();
    assertNotNull("Tablet dir should not be null", tabletDir);
    Path tmp = new Path(tabletDir, "junk.rf_tmp");
    // Make the file
    fs.create(tmp).close();
    log.info("Created tmp file {}", tmp.toString());
    getCluster().stop();
    getCluster().start();
    try (Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY)) {
        assertEquals(2, Iterators.size(scanner.iterator()));
        // If we performed log recovery, we should have cleaned up any stray files
        assertFalse("File still exists: " + tmp, fs.exists(tmp));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) FileSystem(org.apache.hadoop.fs.FileSystem) RawLocalFileSystem(org.apache.hadoop.fs.RawLocalFileSystem) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 7 with BatchWriter

use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.

the class CleanUpIT method run.

@Test
public void run() throws Exception {
    String tableName = getUniqueNames(1)[0];
    getConnector().tableOperations().create(tableName);
    BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
    Mutation m1 = new Mutation("r1");
    m1.put("cf1", "cq1", 1, "5");
    bw.addMutation(m1);
    bw.flush();
    try (Scanner scanner = getConnector().createScanner(tableName, new Authorizations())) {
        int count = 0;
        for (Entry<Key, Value> entry : scanner) {
            count++;
            if (!entry.getValue().toString().equals("5")) {
                Assert.fail("Unexpected value " + entry.getValue());
            }
        }
        Assert.assertEquals("Unexpected count", 1, count);
        int threadCount = countThreads();
        if (threadCount < 2) {
            printThreadNames();
            Assert.fail("Not seeing expected threads. Saw " + threadCount);
        }
        CleanUp.shutdownNow();
        Mutation m2 = new Mutation("r2");
        m2.put("cf1", "cq1", 1, "6");
        try {
            bw.addMutation(m1);
            bw.flush();
            Assert.fail("batch writer did not fail");
        } catch (Exception e) {
        }
        try {
            // expect this to fail also, want to clean up batch writer threads
            bw.close();
            Assert.fail("batch writer close not fail");
        } catch (Exception e) {
        }
        try {
            count = 0;
            Iterator<Entry<Key, Value>> iter = scanner.iterator();
            while (iter.hasNext()) {
                iter.next();
                count++;
            }
            Assert.fail("scanner did not fail");
        } catch (Exception e) {
        }
        threadCount = countThreads();
        if (threadCount > 0) {
            printThreadNames();
            Assert.fail("Threads did not go away. Saw " + threadCount);
        }
    }
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) Authorizations(org.apache.accumulo.core.security.Authorizations) Entry(java.util.Map.Entry) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 8 with BatchWriter

use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.

the class CloneTestIT method writeData.

private BatchWriter writeData(String table1, Connector c) throws TableNotFoundException, MutationsRejectedException {
    BatchWriter bw = c.createBatchWriter(table1, new BatchWriterConfig());
    Mutation m1 = new Mutation("001");
    m1.put("data", "x", "9");
    m1.put("data", "y", "7");
    Mutation m2 = new Mutation("008");
    m2.put("data", "x", "3");
    m2.put("data", "y", "4");
    bw.addMutation(m1);
    bw.addMutation(m2);
    bw.flush();
    return bw;
}
Also used : BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation)

Example 9 with BatchWriter

use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.

the class CloneTestIT method testProps.

@Test
public void testProps() throws Exception {
    String[] tableNames = getUniqueNames(2);
    String table1 = tableNames[0];
    String table2 = tableNames[1];
    Connector c = getConnector();
    c.tableOperations().create(table1);
    c.tableOperations().setProperty(table1, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "1M");
    c.tableOperations().setProperty(table1, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX.getKey(), "2M");
    c.tableOperations().setProperty(table1, Property.TABLE_FILE_MAX.getKey(), "23");
    BatchWriter bw = writeData(table1, c);
    Map<String, String> props = new HashMap<>();
    props.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "500K");
    Set<String> exclude = new HashSet<>();
    exclude.add(Property.TABLE_FILE_MAX.getKey());
    c.tableOperations().clone(table1, table2, true, props, exclude);
    Mutation m3 = new Mutation("009");
    m3.put("data", "x", "1");
    m3.put("data", "y", "2");
    bw.addMutation(m3);
    bw.close();
    checkData(table2, c);
    checkMetadata(table2, c);
    HashMap<String, String> tableProps = new HashMap<>();
    for (Entry<String, String> prop : c.tableOperations().getProperties(table2)) {
        tableProps.put(prop.getKey(), prop.getValue());
    }
    Assert.assertEquals("500K", tableProps.get(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey()));
    Assert.assertEquals(Property.TABLE_FILE_MAX.getDefaultValue(), tableProps.get(Property.TABLE_FILE_MAX.getKey()));
    Assert.assertEquals("2M", tableProps.get(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX.getKey()));
    c.tableOperations().delete(table1);
    c.tableOperations().delete(table2);
}
Also used : Connector(org.apache.accumulo.core.client.Connector) HashMap(java.util.HashMap) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 10 with BatchWriter

use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.

the class CloneTestIT method testDeleteClone.

@Test
public void testDeleteClone() throws Exception {
    String[] tableNames = getUniqueNames(3);
    String table1 = tableNames[0];
    String table2 = tableNames[1];
    String table3 = tableNames[2];
    Connector c = getConnector();
    AccumuloCluster cluster = getCluster();
    Assume.assumeTrue(cluster instanceof MiniAccumuloClusterImpl);
    MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) cluster;
    String rootPath = mac.getConfig().getDir().getAbsolutePath();
    // verify that deleting a new table removes the files
    c.tableOperations().create(table3);
    writeData(table3, c).close();
    c.tableOperations().flush(table3, null, null, true);
    // check for files
    FileSystem fs = getCluster().getFileSystem();
    String id = c.tableOperations().tableIdMap().get(table3);
    FileStatus[] status = fs.listStatus(new Path(rootPath + "/accumulo/tables/" + id));
    assertTrue(status.length > 0);
    // verify disk usage
    List<DiskUsage> diskUsage = c.tableOperations().getDiskUsage(Collections.singleton(table3));
    assertEquals(1, diskUsage.size());
    assertTrue(diskUsage.get(0).getUsage() > 100);
    // delete the table
    c.tableOperations().delete(table3);
    // verify its gone from the file system
    Path tablePath = new Path(rootPath + "/accumulo/tables/" + id);
    if (fs.exists(tablePath)) {
        status = fs.listStatus(tablePath);
        assertTrue(status == null || status.length == 0);
    }
    c.tableOperations().create(table1);
    BatchWriter bw = writeData(table1, c);
    Map<String, String> props = new HashMap<>();
    props.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "500K");
    Set<String> exclude = new HashSet<>();
    exclude.add(Property.TABLE_FILE_MAX.getKey());
    c.tableOperations().clone(table1, table2, true, props, exclude);
    Mutation m3 = new Mutation("009");
    m3.put("data", "x", "1");
    m3.put("data", "y", "2");
    bw.addMutation(m3);
    bw.close();
    // delete source table, should not affect clone
    c.tableOperations().delete(table1);
    checkData(table2, c);
    c.tableOperations().compact(table2, null, null, true, true);
    checkData(table2, c);
    c.tableOperations().delete(table2);
}
Also used : Path(org.apache.hadoop.fs.Path) Connector(org.apache.accumulo.core.client.Connector) FileStatus(org.apache.hadoop.fs.FileStatus) HashMap(java.util.HashMap) AccumuloCluster(org.apache.accumulo.cluster.AccumuloCluster) DiskUsage(org.apache.accumulo.core.client.admin.DiskUsage) FileSystem(org.apache.hadoop.fs.FileSystem) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) MiniAccumuloClusterImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

BatchWriter (org.apache.accumulo.core.client.BatchWriter)402 Mutation (org.apache.accumulo.core.data.Mutation)360 Test (org.junit.Test)264 Value (org.apache.accumulo.core.data.Value)250 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)246 Text (org.apache.hadoop.io.Text)194 Key (org.apache.accumulo.core.data.Key)179 Scanner (org.apache.accumulo.core.client.Scanner)174 Connector (org.apache.accumulo.core.client.Connector)169 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)81 Authorizations (org.apache.accumulo.core.security.Authorizations)68 Range (org.apache.accumulo.core.data.Range)61 Entry (java.util.Map.Entry)51 Map (java.util.Map)50 BatchScanner (org.apache.accumulo.core.client.BatchScanner)46 MutationsRejectedException (org.apache.accumulo.core.client.MutationsRejectedException)44 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)40 HashMap (java.util.HashMap)38 ArrayList (java.util.ArrayList)36 Status (org.apache.accumulo.server.replication.proto.Replication.Status)32