Search in sources :

Example 11 with AccumuloClient

use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.

the class BulkNewIT method testBulkFileMax.

private void testBulkFileMax(boolean usePlan) throws Exception {
    try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
        addSplits(c, tableName, "0333 0666 0999 1333 1666");
        String dir = getDir("/testBulkFileMax-");
        Map<String, Set<String>> hashes = new HashMap<>();
        for (String endRow : Arrays.asList("0333 0666 0999 1333 1666 null".split(" "))) {
            hashes.put(endRow, new HashSet<>());
        }
        // Add a junk file, should be ignored
        FSDataOutputStream out = fs.create(new Path(dir, "junk"));
        out.writeChars("ABCDEFG\n");
        out.close();
        // 1 Tablet 0333-null
        String h1 = writeData(dir + "/f1.", aconf, 0, 333);
        hashes.get("0333").add(h1);
        // 3 Tablets 0666-0334, 0999-0667, 1333-1000
        String h2 = writeData(dir + "/bad-file.", aconf, 334, 1333);
        hashes.get("0666").add(h2);
        hashes.get("0999").add(h2);
        hashes.get("1333").add(h2);
        // 1 Tablet 1666-1334
        String h3 = writeData(dir + "/f3.", aconf, 1334, 1499);
        hashes.get("1666").add(h3);
        // 2 Tablets 1666-1334, >1666
        String h4 = writeData(dir + "/f4.", aconf, 1500, 1999);
        hashes.get("1666").add(h4);
        hashes.get("null").add(h4);
        if (usePlan) {
            LoadPlan loadPlan = LoadPlan.builder().loadFileTo("f1.rf", RangeType.TABLE, null, row(333)).loadFileTo("bad-file.rf", RangeType.TABLE, row(333), row(1333)).loadFileTo("f3.rf", RangeType.FILE, row(1334), row(1499)).loadFileTo("f4.rf", RangeType.FILE, row(1500), row(1999)).build();
            c.tableOperations().importDirectory(dir).to(tableName).plan(loadPlan).load();
        } else {
            c.tableOperations().importDirectory(dir).to(tableName).load();
        }
        verifyData(c, tableName, 0, 1999, false);
        verifyMetadata(c, tableName, hashes);
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) Path(org.apache.hadoop.fs.Path) SortedSet(java.util.SortedSet) Set(java.util.Set) TreeSet(java.util.TreeSet) HashSet(java.util.HashSet) HashMap(java.util.HashMap) LoadPlan(org.apache.accumulo.core.data.LoadPlan) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Example 12 with AccumuloClient

use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.

the class BulkNewIT method testEmptyDirWithIgnoreOption.

// Test that the ignore option does not throw an exception if the import directory contains
// no files.
@Test
public void testEmptyDirWithIgnoreOption() throws Exception {
    try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
        String dir = getDir("/testBulkFile-");
        FileSystem fs = getCluster().getFileSystem();
        fs.mkdirs(new Path(dir));
        c.tableOperations().importDirectory(dir).to(tableName).ignoreEmptyDir(true).load();
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) RawLocalFileSystem(org.apache.hadoop.fs.RawLocalFileSystem) Test(org.junit.Test)

Example 13 with AccumuloClient

use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.

the class BulkNewIT method testEmptyDir.

@Test
public void testEmptyDir() throws Exception {
    try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
        String dir = getDir("/testBulkFile-");
        FileSystem fs = getCluster().getFileSystem();
        fs.mkdirs(new Path(dir));
        assertThrows(IllegalArgumentException.class, () -> c.tableOperations().importDirectory(dir).to(tableName).load());
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) RawLocalFileSystem(org.apache.hadoop.fs.RawLocalFileSystem) Test(org.junit.Test)

Example 14 with AccumuloClient

use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.

the class BulkNewIT method testBadPermissions.

@Test
public void testBadPermissions() throws Exception {
    try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
        addSplits(c, tableName, "0333");
        String dir = getDir("/testBadPermissions-");
        writeData(dir + "/f1.", aconf, 0, 333);
        Path rFilePath = new Path(dir, "f1." + RFile.EXTENSION);
        FsPermission originalPerms = fs.getFileStatus(rFilePath).getPermission();
        fs.setPermission(rFilePath, FsPermission.valueOf("----------"));
        try {
            final var importMappingOptions = c.tableOperations().importDirectory(dir).to(tableName);
            var e = assertThrows(Exception.class, importMappingOptions::load);
            Throwable cause = e.getCause();
            assertTrue(cause instanceof FileNotFoundException || cause.getCause() instanceof FileNotFoundException);
        } finally {
            fs.setPermission(rFilePath, originalPerms);
        }
        originalPerms = fs.getFileStatus(new Path(dir)).getPermission();
        fs.setPermission(new Path(dir), FsPermission.valueOf("dr--r--r--"));
        try {
            final var importMappingOptions = c.tableOperations().importDirectory(dir).to(tableName);
            var ae = assertThrows(AccumuloException.class, importMappingOptions::load);
            assertTrue(ae.getCause() instanceof FileNotFoundException);
        } finally {
            fs.setPermission(new Path(dir), originalPerms);
        }
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) Path(org.apache.hadoop.fs.Path) FileNotFoundException(java.io.FileNotFoundException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 15 with AccumuloClient

use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.

the class BadLocalityGroupMincIT method test.

@Test
public void test() throws Exception {
    try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
        String tableName = getUniqueNames(1)[0];
        c.tableOperations().create(tableName);
        // intentionally bad locality group config where two groups share a family
        c.tableOperations().setProperty(tableName, Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + "g1", "fam1,fam2");
        c.tableOperations().setProperty(tableName, Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + "g2", "fam2,fam3");
        c.tableOperations().setProperty(tableName, Property.TABLE_LOCALITY_GROUPS.getKey(), "g1,g2");
        c.tableOperations().offline(tableName, true);
        c.tableOperations().online(tableName, true);
        try (BatchWriter bw = c.createBatchWriter(tableName)) {
            Mutation m = new Mutation(new Text("r1"));
            m.put("acf", tableName, "1");
            bw.addMutation(m);
        }
        FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 0, 0);
        // even with bad locality group config, the minor compaction should still work
        c.tableOperations().flush(tableName, null, null, true);
        FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 1, 1);
        Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY);
        Entry<Key, Value> entry = Iterables.getOnlyElement(scanner);
        assertEquals("r1", entry.getKey().getRowData().toString());
        assertEquals("acf", entry.getKey().getColumnFamilyData().toString());
        assertEquals(tableName, entry.getKey().getColumnQualifierData().toString());
        assertEquals("1", entry.getValue().toString());
        // this should not hang
        c.tableOperations().delete(tableName);
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) Scanner(org.apache.accumulo.core.client.Scanner) Value(org.apache.accumulo.core.data.Value) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Aggregations

AccumuloClient (org.apache.accumulo.core.client.AccumuloClient)500 Test (org.junit.Test)411 BatchWriter (org.apache.accumulo.core.client.BatchWriter)149 Text (org.apache.hadoop.io.Text)143 Mutation (org.apache.accumulo.core.data.Mutation)138 Scanner (org.apache.accumulo.core.client.Scanner)122 Value (org.apache.accumulo.core.data.Value)118 Key (org.apache.accumulo.core.data.Key)108 NewTableConfiguration (org.apache.accumulo.core.client.admin.NewTableConfiguration)91 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)64 HashMap (java.util.HashMap)61 Range (org.apache.accumulo.core.data.Range)51 TreeSet (java.util.TreeSet)50 ArrayList (java.util.ArrayList)47 Entry (java.util.Map.Entry)41 Path (org.apache.hadoop.fs.Path)39 CompactionConfig (org.apache.accumulo.core.client.admin.CompactionConfig)34 Authorizations (org.apache.accumulo.core.security.Authorizations)34 BatchScanner (org.apache.accumulo.core.client.BatchScanner)32 HashSet (java.util.HashSet)31