Search in sources :

Example 1 with BlobStoreFile

use of org.apache.storm.blobstore.BlobStoreFile in project storm by apache.

the class HdfsBlobStoreImpl method listBlobStoreFiles.

protected Iterator<BlobStoreFile> listBlobStoreFiles(Path path) throws IOException {
    ArrayList<BlobStoreFile> ret = new ArrayList<BlobStoreFile>();
    FileStatus[] files = fileSystem.listStatus(new Path[] { path });
    if (files != null) {
        for (FileStatus sub : files) {
            try {
                ret.add(new HdfsBlobStoreFile(sub.getPath().getParent(), sub.getPath().getName(), hadoopConf));
            } catch (IllegalArgumentException e) {
                // Ignored the file did not match
                LOG.warn("Found an unexpected file in {} {}", path, sub.getPath().getName());
            }
        }
    }
    return ret.iterator();
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus) BlobStoreFile(org.apache.storm.blobstore.BlobStoreFile) ArrayList(java.util.ArrayList)

Example 2 with BlobStoreFile

use of org.apache.storm.blobstore.BlobStoreFile in project storm by apache.

the class HdfsBlobStoreImplTest method testMultiple.

// Be careful about adding additional tests as the dfscluster will be shared
@Test
public void testMultiple() throws Exception {
    String testString = "testingblob";
    String validKey = "validkeyBasic";
    // Will be closed automatically when shutting down the DFS cluster
    FileSystem fs = DFS_CLUSTER_RULE.getDfscluster().getFileSystem();
    Map<String, Object> conf = new HashMap<>();
    try (TestHdfsBlobStoreImpl hbs = new TestHdfsBlobStoreImpl(blobDir, conf, DFS_CLUSTER_RULE.getHadoopConf())) {
        // should have created blobDir
        assertTrue("BlobStore dir wasn't created", fs.exists(blobDir));
        assertEquals("BlobStore dir was created with wrong permissions", HdfsBlobStoreImpl.BLOBSTORE_DIR_PERMISSION, fs.getFileStatus(blobDir).getPermission());
        // test exist with non-existent key
        assertFalse("file exists but shouldn't", hbs.exists("bogus"));
        // test write
        BlobStoreFile pfile = hbs.write(validKey, false);
        // Adding metadata to avoid null pointer exception
        SettableBlobMeta meta = new SettableBlobMeta();
        meta.set_replication_factor(1);
        pfile.setMetadata(meta);
        try (OutputStream ios = pfile.getOutputStream()) {
            ios.write(testString.getBytes(StandardCharsets.UTF_8));
        }
        // test modTime can change
        Long initialModTime = pfile.getModTime();
        try (OutputStream ios = pfile.getOutputStream()) {
            ios.write(testString.getBytes(StandardCharsets.UTF_8));
        }
        Long nextModTime = pfile.getModTime();
        assertTrue(nextModTime > initialModTime);
        // test commit creates properly
        assertTrue("BlobStore key dir wasn't created", fs.exists(fullKeyDir));
        pfile.commit();
        Path dataFile = new Path(new Path(fullKeyDir, validKey), BLOBSTORE_DATA);
        assertTrue("blob data not committed", fs.exists(dataFile));
        assertEquals("BlobStore dir was created with wrong permissions", HdfsBlobStoreFile.BLOBSTORE_FILE_PERMISSION, fs.getFileStatus(dataFile).getPermission());
        assertTrue("key doesn't exist but should", hbs.exists(validKey));
        // test read
        BlobStoreFile readpFile = hbs.read(validKey);
        try (InputStream inStream = readpFile.getInputStream()) {
            String readString = IOUtils.toString(inStream, StandardCharsets.UTF_8);
            assertEquals("string read from blob doesn't match", testString, readString);
        }
        // test listkeys
        Iterator<String> keys = hbs.listKeys();
        assertTrue("blob has one key", keys.hasNext());
        assertEquals("one key in blobstore", validKey, keys.next());
        // delete
        hbs.deleteKey(validKey);
        assertFalse("key not deleted", fs.exists(dataFile));
        assertFalse("key not deleted", hbs.exists(validKey));
        // Now do multiple
        String testString2 = "testingblob2";
        String validKey2 = "validkey2";
        // test write
        pfile = hbs.write(validKey, false);
        pfile.setMetadata(meta);
        try (OutputStream ios = pfile.getOutputStream()) {
            ios.write(testString.getBytes(StandardCharsets.UTF_8));
        }
        // test commit creates properly
        assertTrue("BlobStore key dir wasn't created", fs.exists(fullKeyDir));
        pfile.commit();
        assertTrue("blob data not committed", fs.exists(dataFile));
        assertEquals("BlobStore dir was created with wrong permissions", HdfsBlobStoreFile.BLOBSTORE_FILE_PERMISSION, fs.getFileStatus(dataFile).getPermission());
        assertTrue("key doesn't exist but should", hbs.exists(validKey));
        // test write again
        pfile = hbs.write(validKey2, false);
        pfile.setMetadata(meta);
        try (OutputStream ios2 = pfile.getOutputStream()) {
            ios2.write(testString2.getBytes(StandardCharsets.UTF_8));
        }
        // test commit second creates properly
        pfile.commit();
        Path dataFile2 = new Path(new Path(fullKeyDir, validKey2), BLOBSTORE_DATA);
        assertTrue("blob data not committed", fs.exists(dataFile2));
        assertEquals("BlobStore dir was created with wrong permissions", HdfsBlobStoreFile.BLOBSTORE_FILE_PERMISSION, fs.getFileStatus(dataFile2).getPermission());
        assertTrue("key doesn't exist but should", hbs.exists(validKey2));
        // test listkeys
        keys = hbs.listKeys();
        int total = 0;
        boolean key1Found = false;
        boolean key2Found = false;
        while (keys.hasNext()) {
            total++;
            String key = keys.next();
            if (key.equals(validKey)) {
                key1Found = true;
            } else if (key.equals(validKey2)) {
                key2Found = true;
            } else {
                fail("Found key that wasn't expected: " + key);
            }
        }
        assertEquals("number of keys is wrong", 2, total);
        assertTrue("blobstore missing key1", key1Found);
        assertTrue("blobstore missing key2", key2Found);
        // test read
        readpFile = hbs.read(validKey);
        try (InputStream inStream = readpFile.getInputStream()) {
            String readString = IOUtils.toString(inStream, StandardCharsets.UTF_8);
            assertEquals("string read from blob doesn't match", testString, readString);
        }
        // test read
        readpFile = hbs.read(validKey2);
        try (InputStream inStream = readpFile.getInputStream()) {
            String readString = IOUtils.toString(inStream, StandardCharsets.UTF_8);
            assertEquals("string read from blob doesn't match", testString2, readString);
        }
        hbs.deleteKey(validKey);
        assertFalse("key not deleted", hbs.exists(validKey));
        hbs.deleteKey(validKey2);
        assertFalse("key not deleted", hbs.exists(validKey2));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) HashMap(java.util.HashMap) BlobStoreFile(org.apache.storm.blobstore.BlobStoreFile) InputStream(java.io.InputStream) OutputStream(java.io.OutputStream) FileSystem(org.apache.hadoop.fs.FileSystem) SettableBlobMeta(org.apache.storm.generated.SettableBlobMeta) Test(org.junit.Test)

Example 3 with BlobStoreFile

use of org.apache.storm.blobstore.BlobStoreFile in project storm by apache.

the class HdfsBlobStoreImplTest method testGetFileLength.

@Test
public void testGetFileLength() throws Exception {
    Map<String, Object> conf = new HashMap<>();
    String validKey = "validkeyBasic";
    String testString = "testingblob";
    try (TestHdfsBlobStoreImpl hbs = new TestHdfsBlobStoreImpl(blobDir, conf, DFS_CLUSTER_RULE.getHadoopConf())) {
        BlobStoreFile pfile = hbs.write(validKey, false);
        // Adding metadata to avoid null pointer exception
        SettableBlobMeta meta = new SettableBlobMeta();
        meta.set_replication_factor(1);
        pfile.setMetadata(meta);
        try (OutputStream ios = pfile.getOutputStream()) {
            ios.write(testString.getBytes(StandardCharsets.UTF_8));
        }
        assertEquals(testString.getBytes(StandardCharsets.UTF_8).length, pfile.getFileLength());
    }
}
Also used : HashMap(java.util.HashMap) BlobStoreFile(org.apache.storm.blobstore.BlobStoreFile) OutputStream(java.io.OutputStream) SettableBlobMeta(org.apache.storm.generated.SettableBlobMeta) Test(org.junit.Test)

Example 4 with BlobStoreFile

use of org.apache.storm.blobstore.BlobStoreFile in project storm by apache.

the class HdfsBlobStoreImpl method fullCleanup.

public void fullCleanup(long age) throws IOException {
    long cleanUpIfBefore = System.currentTimeMillis() - age;
    Iterator<String> keys = new KeyInHashDirIterator();
    while (keys.hasNext()) {
        String key = keys.next();
        Path keyDir = getKeyDir(key);
        Iterator<BlobStoreFile> i = listBlobStoreFiles(keyDir);
        if (!i.hasNext()) {
            // The dir is empty, so try to delete it, may fail, but that is OK
            try {
                fileSystem.delete(keyDir, true);
            } catch (Exception e) {
                LOG.warn("Could not delete " + keyDir + " will try again later");
            }
        }
        while (i.hasNext()) {
            BlobStoreFile f = i.next();
            if (f.isTmp()) {
                if (f.getModTime() <= cleanUpIfBefore) {
                    f.delete();
                }
            }
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) BlobStoreFile(org.apache.storm.blobstore.BlobStoreFile) NoSuchElementException(java.util.NoSuchElementException) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException)

Example 5 with BlobStoreFile

use of org.apache.storm.blobstore.BlobStoreFile in project storm by apache.

the class HdfsBlobStore method getBlobMeta.

@Override
public ReadableBlobMeta getBlobMeta(String key, Subject who) throws AuthorizationException, KeyNotFoundException {
    who = checkAndGetSubject(who);
    validateKey(key);
    SettableBlobMeta meta = extractBlobMeta(key);
    aclHandler.validateUserCanReadMeta(meta.get_acl(), who, key);
    ReadableBlobMeta rbm = new ReadableBlobMeta();
    rbm.set_settable(meta);
    try {
        BlobStoreFile pf = hbs.read(DATA_PREFIX + key);
        rbm.set_version(pf.getModTime());
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
    return rbm;
}
Also used : BlobStoreFile(org.apache.storm.blobstore.BlobStoreFile) ReadableBlobMeta(org.apache.storm.generated.ReadableBlobMeta) IOException(java.io.IOException) SettableBlobMeta(org.apache.storm.generated.SettableBlobMeta)

Aggregations

BlobStoreFile (org.apache.storm.blobstore.BlobStoreFile)9 IOException (java.io.IOException)6 SettableBlobMeta (org.apache.storm.generated.SettableBlobMeta)5 FileNotFoundException (java.io.FileNotFoundException)2 InputStream (java.io.InputStream)2 OutputStream (java.io.OutputStream)2 HashMap (java.util.HashMap)2 Path (org.apache.hadoop.fs.Path)2 Test (org.junit.Test)2 ByteArrayOutputStream (java.io.ByteArrayOutputStream)1 ArrayList (java.util.ArrayList)1 NoSuchElementException (java.util.NoSuchElementException)1 FileStatus (org.apache.hadoop.fs.FileStatus)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 ReadableBlobMeta (org.apache.storm.generated.ReadableBlobMeta)1 WrappedKeyAlreadyExistsException (org.apache.storm.utils.WrappedKeyAlreadyExistsException)1 WrappedKeyNotFoundException (org.apache.storm.utils.WrappedKeyNotFoundException)1