Search in sources :

Example 6 with FileDiff

use of org.smartdata.model.FileDiff in project SSM by Intel-bigdata.

the class TestAlluxioEntryApplier method testDeleteFileApplier.

@Test
public void testDeleteFileApplier() throws Exception {
    FileSystem fs = Mockito.mock(FileSystem.class);
    AlluxioEntryApplier entryApplier = new AlluxioEntryApplier(metaStore, fs);
    FileInfo fooFile = FileInfo.newBuilder().setFileId(100663295).setIsdir(false).setPath("/foo/foobar_del").build();
    metaStore.insertFile(fooFile);
    BackUpInfo backUpInfo = new BackUpInfo(1L, "/foo/foobar_del", "remote/dest/", 10);
    metaStore.insertBackUpInfo(backUpInfo);
    alluxio.wire.FileInfo info1 = new alluxio.wire.FileInfo().setFileId(100663295).setPath("/foo/foobar_del").setLength(500L).setFolder(false).setBlockSizeBytes(510000).setLastModificationTimeMs(1515665270681L).setCreationTimeMs(1515665270681L).setMode(493).setOwner("user1").setGroup("group1");
    URIStatus status1 = new URIStatus(info1);
    Mockito.when(fs.getStatus(new AlluxioURI("/foo/foobar_del"))).thenReturn(status1);
    DeleteFileEntry deleteFileEntry = DeleteFileEntry.newBuilder().setId(100663295).setOpTimeMs(1515737580798L).setAlluxioOnly(true).setRecursive(false).build();
    JournalEntry deleteFileJEntry = JournalEntry.newBuilder().setDeleteFile(deleteFileEntry).build();
    entryApplier.apply(deleteFileJEntry);
    List<FileDiff> fileDiffs = metaStore.getFileDiffsByFileName("/foo/foobar_del");
    Assert.assertTrue(fileDiffs.size() > 0);
    for (FileDiff fileDiff : fileDiffs) {
        if (fileDiff.getDiffType().equals(FileDiffType.DELETE)) {
            Assert.assertEquals("/foo/foobar_del", fileDiff.getSrc());
        }
    }
}
Also used : AlluxioEntryApplier(org.smartdata.alluxio.metric.fetcher.AlluxioEntryApplier) URIStatus(alluxio.client.file.URIStatus) JournalEntry(alluxio.proto.journal.Journal.JournalEntry) FileInfo(org.smartdata.model.FileInfo) BackUpInfo(org.smartdata.model.BackUpInfo) FileSystem(alluxio.client.file.FileSystem) FileDiff(org.smartdata.model.FileDiff) AlluxioURI(alluxio.AlluxioURI) Test(org.junit.Test)

Example 7 with FileDiff

use of org.smartdata.model.FileDiff in project SSM by Intel-bigdata.

the class TestFileDiffDao method testInsertAndGetSingleRecord.

@Test
public void testInsertAndGetSingleRecord() {
    FileDiff fileDiff = new FileDiff();
    fileDiff.setParameters(new HashMap<String, String>());
    fileDiff.getParameters().put("-test", "test");
    fileDiff.setSrc("test");
    fileDiff.setState(FileDiffState.PENDING);
    fileDiff.setDiffType(FileDiffType.APPEND);
    fileDiff.setCreateTime(1);
    fileDiffDao.insert(fileDiff);
    Assert.assertTrue(fileDiffDao.getAll().get(0).equals(fileDiff));
}
Also used : FileDiff(org.smartdata.model.FileDiff) Test(org.junit.Test)

Example 8 with FileDiff

use of org.smartdata.model.FileDiff in project SSM by Intel-bigdata.

the class AlluxioEntryApplier method deleteFromEntry.

private String deleteFromEntry(DeleteFileEntry deleteFileEntry) throws MetaStoreException {
    String path = getPathByFileId(deleteFileEntry.getId());
    if (inBackup(path)) {
        FileDiff fileDiff = new FileDiff(FileDiffType.DELETE);
        fileDiff.setSrc(path);
        metaStore.insertFileDiff(fileDiff);
    }
    return String.format("DELETE FROM file WHERE fid =%s;", deleteFileEntry.getId());
}
Also used : FileDiff(org.smartdata.model.FileDiff)

Example 9 with FileDiff

use of org.smartdata.model.FileDiff in project SSM by Intel-bigdata.

the class TestInotifyEventApplier method testApplier.

@Test
public void testApplier() throws Exception {
    DFSClient client = Mockito.mock(DFSClient.class);
    FileInfo root = HadoopUtil.convertFileStatus(getDummyDirStatus("/", 1000), "/");
    metaStore.insertFile(root);
    BackUpInfo backUpInfo = new BackUpInfo(1L, "/file", "remote/dest/", 10);
    metaStore.insertBackUpInfo(backUpInfo);
    InotifyEventApplier applier = new InotifyEventApplier(metaStore, client);
    Event.CreateEvent createEvent = new Event.CreateEvent.Builder().iNodeType(Event.CreateEvent.INodeType.FILE).ctime(1).defaultBlockSize(1024).groupName("cg1").overwrite(true).ownerName("user1").path("/file").perms(new FsPermission("777")).replication(3).build();
    HdfsFileStatus status1 = CompatibilityHelperLoader.getHelper().createHdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission("777"), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0);
    Mockito.when(client.getFileInfo(Matchers.startsWith("/file"))).thenReturn(status1);
    Mockito.when(client.getFileInfo(Matchers.startsWith("/dir"))).thenReturn(getDummyDirStatus("", 1010));
    applier.apply(new Event[] { createEvent });
    FileInfo result1 = metaStore.getFile().get(1);
    Assert.assertEquals(result1.getPath(), "/file");
    Assert.assertEquals(result1.getFileId(), 1010L);
    Assert.assertEquals(result1.getPermission(), 511);
    Event close = new Event.CloseEvent("/file", 1024, 0);
    applier.apply(new Event[] { close });
    FileInfo result2 = metaStore.getFile().get(1);
    Assert.assertEquals(result2.getLength(), 1024);
    Assert.assertEquals(result2.getModificationTime(), 0L);
    // Event truncate = new Event.TruncateEvent("/file", 512, 16);
    // applier.apply(new Event[] {truncate});
    // ResultSet result3 = metaStore.executeQuery("SELECT * FROM files");
    // Assert.assertEquals(result3.getLong("length"), 512);
    // Assert.assertEquals(result3.getLong("modification_time"), 16L);
    Event meta = new Event.MetadataUpdateEvent.Builder().path("/file").metadataType(Event.MetadataUpdateEvent.MetadataType.TIMES).mtime(2).atime(3).replication(4).ownerName("user2").groupName("cg2").build();
    applier.apply(new Event[] { meta });
    FileInfo result4 = metaStore.getFile().get(1);
    Assert.assertEquals(result4.getAccessTime(), 3);
    Assert.assertEquals(result4.getModificationTime(), 2);
    Event meta1 = new Event.MetadataUpdateEvent.Builder().path("/file").metadataType(Event.MetadataUpdateEvent.MetadataType.OWNER).ownerName("user1").groupName("cg1").build();
    applier.apply(new Event[] { meta1 });
    result4 = metaStore.getFile().get(1);
    Assert.assertEquals(result4.getOwner(), "user1");
    Assert.assertEquals(result4.getGroup(), "cg1");
    Event.CreateEvent createEvent2 = new Event.CreateEvent.Builder().iNodeType(Event.CreateEvent.INodeType.DIRECTORY).ctime(1).groupName("cg1").overwrite(true).ownerName("user1").path("/dir").perms(new FsPermission("777")).replication(3).build();
    Event.CreateEvent createEvent3 = new Event.CreateEvent.Builder().iNodeType(Event.CreateEvent.INodeType.FILE).ctime(1).groupName("cg1").overwrite(true).ownerName("user1").path("/dir/file").perms(new FsPermission("777")).replication(3).build();
    Event rename = new Event.RenameEvent.Builder().dstPath("/dir2").srcPath("/dir").timestamp(5).build();
    applier.apply(new Event[] { createEvent2, createEvent3, rename });
    List<FileInfo> result5 = metaStore.getFile();
    List<String> expectedPaths = Arrays.asList("/dir2", "/dir2/file", "/file");
    List<String> actualPaths = new ArrayList<>();
    for (FileInfo s : result5) {
        actualPaths.add(s.getPath());
    }
    Collections.sort(actualPaths);
    Assert.assertTrue(actualPaths.size() == 4);
    Assert.assertTrue(actualPaths.containsAll(expectedPaths));
    Event unlink = new Event.UnlinkEvent.Builder().path("/").timestamp(6).build();
    applier.apply(new Event[] { unlink });
    Thread.sleep(1200);
    Assert.assertEquals(metaStore.getFile().size(), 0);
    System.out.println("Files in table " + metaStore.getFile().size());
    List<FileDiff> fileDiffList = metaStore.getPendingDiff();
    Assert.assertTrue(fileDiffList.size() == 4);
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) ArrayList(java.util.ArrayList) FileInfo(org.smartdata.model.FileInfo) BackUpInfo(org.smartdata.model.BackUpInfo) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Event(org.apache.hadoop.hdfs.inotify.Event) FileDiff(org.smartdata.model.FileDiff) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 10 with FileDiff

use of org.smartdata.model.FileDiff in project SSM by Intel-bigdata.

the class InotifyEventApplier method getMetaDataUpdateSql.

private String getMetaDataUpdateSql(Event.MetadataUpdateEvent metadataUpdateEvent) throws MetaStoreException {
    FileDiff fileDiff = null;
    if (inBackup(metadataUpdateEvent.getPath())) {
        fileDiff = new FileDiff(FileDiffType.METADATA);
        fileDiff.setSrc(metadataUpdateEvent.getPath());
    }
    switch(metadataUpdateEvent.getMetadataType()) {
        case TIMES:
            if (metadataUpdateEvent.getMtime() > 0 && metadataUpdateEvent.getAtime() > 0) {
                if (fileDiff != null) {
                    fileDiff.getParameters().put("-mtime", "" + metadataUpdateEvent.getMtime());
                    // fileDiff.getParameters().put("-access_time", "" + metadataUpdateEvent.getAtime());
                    metaStore.insertFileDiff(fileDiff);
                }
                return String.format("UPDATE file SET modification_time = %s, access_time = %s WHERE path = '%s';", metadataUpdateEvent.getMtime(), metadataUpdateEvent.getAtime(), metadataUpdateEvent.getPath());
            } else if (metadataUpdateEvent.getMtime() > 0) {
                if (fileDiff != null) {
                    fileDiff.getParameters().put("-mtime", "" + metadataUpdateEvent.getMtime());
                    metaStore.insertFileDiff(fileDiff);
                }
                return String.format("UPDATE file SET modification_time = %s WHERE path = '%s';", metadataUpdateEvent.getMtime(), metadataUpdateEvent.getPath());
            } else if (metadataUpdateEvent.getAtime() > 0) {
                // }
                return String.format("UPDATE file SET access_time = %s WHERE path = '%s';", metadataUpdateEvent.getAtime(), metadataUpdateEvent.getPath());
            } else {
                return "";
            }
        case OWNER:
            if (fileDiff != null) {
                fileDiff.getParameters().put("-owner", "" + metadataUpdateEvent.getOwnerName());
                metaStore.insertFileDiff(fileDiff);
            }
            return String.format("UPDATE file SET owner = '%s', owner_group = '%s' WHERE path = '%s';", metadataUpdateEvent.getOwnerName(), metadataUpdateEvent.getGroupName(), metadataUpdateEvent.getPath());
        case PERMS:
            if (fileDiff != null) {
                fileDiff.getParameters().put("-permission", "" + metadataUpdateEvent.getPerms().toShort());
                metaStore.insertFileDiff(fileDiff);
            }
            return String.format("UPDATE file SET permission = %s WHERE path = '%s';", metadataUpdateEvent.getPerms().toShort(), metadataUpdateEvent.getPath());
        case REPLICATION:
            if (fileDiff != null) {
                fileDiff.getParameters().put("-replication", "" + metadataUpdateEvent.getReplication());
                metaStore.insertFileDiff(fileDiff);
            }
            return String.format("UPDATE file SET block_replication = %s WHERE path = '%s';", metadataUpdateEvent.getReplication(), metadataUpdateEvent.getPath());
        case XATTRS:
            final String EC_POLICY = "hdfs.erasurecoding.policy";
            // Todo
            if (LOG.isDebugEnabled()) {
                String message = "\n";
                for (XAttr xAttr : metadataUpdateEvent.getxAttrs()) {
                    message += xAttr.toString() + "\n";
                }
                LOG.debug(message);
            }
            // The following code should be executed merely on HDFS3.x.
            for (XAttr xAttr : metadataUpdateEvent.getxAttrs()) {
                if (xAttr.getName().equals(EC_POLICY)) {
                    try {
                        String ecPolicyName = WritableUtils.readString(new DataInputStream(new ByteArrayInputStream(xAttr.getValue())));
                        byte ecPolicyId = CompatibilityHelperLoader.getHelper().getErasureCodingPolicyByName(client, ecPolicyName);
                        if (ecPolicyId == (byte) -1) {
                            LOG.error("Unrecognized EC policy for updating!");
                        }
                        return String.format("UPDATE file SET ec_policy_id = %s WHERE path = '%s'", ecPolicyId, metadataUpdateEvent.getPath());
                    } catch (IOException ex) {
                        LOG.error("Error occurred for updating ecPolicy!", ex);
                    }
                }
            }
            break;
        case ACLS:
            return "";
    }
    return "";
}
Also used : ByteArrayInputStream(java.io.ByteArrayInputStream) FileDiff(org.smartdata.model.FileDiff) IOException(java.io.IOException) DataInputStream(java.io.DataInputStream) XAttr(org.apache.hadoop.fs.XAttr)

Aggregations

FileDiff (org.smartdata.model.FileDiff)24 Test (org.junit.Test)12 FileInfo (org.smartdata.model.FileInfo)12 BackUpInfo (org.smartdata.model.BackUpInfo)9 AlluxioURI (alluxio.AlluxioURI)8 URIStatus (alluxio.client.file.URIStatus)8 ArrayList (java.util.ArrayList)6 FileSystem (alluxio.client.file.FileSystem)5 JournalEntry (alluxio.proto.journal.Journal.JournalEntry)5 AlluxioEntryApplier (org.smartdata.alluxio.metric.fetcher.AlluxioEntryApplier)5 IOException (java.io.IOException)4 AlluxioException (alluxio.exception.AlluxioException)3 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)3 FsPermission (org.apache.hadoop.fs.permission.FsPermission)2 DFSClient (org.apache.hadoop.hdfs.DFSClient)2 Event (org.apache.hadoop.hdfs.inotify.Event)2 ByteArrayInputStream (java.io.ByteArrayInputStream)1 DataInputStream (java.io.DataInputStream)1 URI (java.net.URI)1 URISyntaxException (java.net.URISyntaxException)1