Search in sources :

Example 6 with BackUpInfo

use of org.smartdata.model.BackUpInfo in project SSM by Intel-bigdata.

the class MetaStore method listSyncRules.

public List<DetailedRuleInfo> listSyncRules() throws MetaStoreException {
    List<RuleInfo> ruleInfos = getRuleInfo();
    List<DetailedRuleInfo> detailedRuleInfos = new ArrayList<>();
    for (RuleInfo ruleInfo : ruleInfos) {
        if (ruleInfo.getState() == RuleState.DELETED) {
            continue;
        }
        int lastIndex = ruleInfo.getRuleText().lastIndexOf("|");
        String lastPart = ruleInfo.getRuleText().substring(lastIndex + 1);
        if (lastPart.contains("sync")) {
            DetailedRuleInfo detailedRuleInfo = new DetailedRuleInfo(ruleInfo);
            // Add sync progress
            BackUpInfo backUpInfo = getBackUpInfo(ruleInfo.getId());
            // Get total matched files
            if (backUpInfo != null) {
                detailedRuleInfo.setBaseProgress(getFilesByPrefix(backUpInfo.getSrc()).size());
                long count = fileDiffDao.getPendingDiff(backUpInfo.getSrc()).size();
                count += fileDiffDao.getByState(backUpInfo.getSrc(), FileDiffState.RUNNING).size();
                if (count > detailedRuleInfo.baseProgress) {
                    count = detailedRuleInfo.baseProgress;
                }
                detailedRuleInfo.setRunningProgress(count);
            } else {
                detailedRuleInfo.setBaseProgress(0);
                detailedRuleInfo.setRunningProgress(0);
            }
            if (detailedRuleInfo.getState() != RuleState.DELETED) {
                detailedRuleInfos.add(detailedRuleInfo);
            }
        }
    }
    return detailedRuleInfos;
}
Also used : DetailedRuleInfo(org.smartdata.model.DetailedRuleInfo) BackUpInfo(org.smartdata.model.BackUpInfo) ArrayList(java.util.ArrayList) DetailedRuleInfo(org.smartdata.model.DetailedRuleInfo) RuleInfo(org.smartdata.model.RuleInfo)

Example 7 with BackUpInfo

use of org.smartdata.model.BackUpInfo in project SSM by Intel-bigdata.

the class TestMetaStore method testMoveSyncRules.

@Test
public void testMoveSyncRules() throws Exception {
    String pathString = "/src/1";
    long length = 123L;
    boolean isDir = false;
    int blockReplication = 1;
    long blockSize = 128 * 1024L;
    long modTime = 123123123L;
    long accessTime = 123123120L;
    String owner = "root";
    String group = "admin";
    long fileId = 56L;
    byte storagePolicy = 0;
    byte erasureCodingPolicy = 0;
    FileInfo fileInfo = new FileInfo(pathString, fileId, length, isDir, (short) blockReplication, blockSize, modTime, accessTime, (short) 1, owner, group, storagePolicy, erasureCodingPolicy);
    metaStore.insertFile(fileInfo);
    Map<String, String> args = new HashMap();
    args.put("-file", "/src/1");
    String rule = "file : accessCount(10m) > 20 \n\n" + "and length() > 3 | ";
    long submitTime = System.currentTimeMillis();
    RuleInfo ruleInfo = new RuleInfo(0, submitTime, rule + "sync -dest /dest/", RuleState.ACTIVE, 0, 0, 0);
    metaStore.insertNewRule(ruleInfo);
    metaStore.insertBackUpInfo(new BackUpInfo(ruleInfo.getId(), "/src/", "/dest/", 100));
    metaStore.insertNewRule(new RuleInfo(1, submitTime, rule + "allssd", RuleState.ACTIVE, 0, 0, 0));
    metaStore.insertNewRule(new RuleInfo(2, submitTime, rule + "archive", RuleState.ACTIVE, 0, 0, 0));
    metaStore.insertNewRule(new RuleInfo(2, submitTime, rule + "onessd", RuleState.ACTIVE, 0, 0, 0));
    metaStore.insertNewRule(new RuleInfo(2, submitTime, rule + "cache", RuleState.ACTIVE, 0, 0, 0));
    Assert.assertTrue(metaStore.listMoveRules().size() == 3);
    Assert.assertTrue(metaStore.listSyncRules().size() == 1);
    CmdletInfo cmdletInfo = new CmdletInfo(1, ruleInfo.getId(), CmdletState.EXECUTING, "test", 123123333L, 232444444L);
    cmdletInfo.setAids(Collections.singletonList(1L));
    metaStore.insertCmdlet(cmdletInfo);
    metaStore.insertAction(new ActionInfo(1, 1, "allssd", args, "Test", "Test", true, 123213213L, true, 123123L, 100));
    Assert.assertTrue(metaStore.listFileActions(ruleInfo.getId(), 0).size() >= 0);
}
Also used : FileInfo(org.smartdata.model.FileInfo) HashMap(java.util.HashMap) BackUpInfo(org.smartdata.model.BackUpInfo) ActionInfo(org.smartdata.model.ActionInfo) RuleInfo(org.smartdata.model.RuleInfo) CmdletInfo(org.smartdata.model.CmdletInfo) Test(org.junit.Test)

Example 8 with BackUpInfo

use of org.smartdata.model.BackUpInfo in project SSM by Intel-bigdata.

the class TestBackUpInfoDao method testBatchInsert.

@Test
public void testBatchInsert() {
    BackUpInfo[] backUpInfos = new BackUpInfo[2];
    backUpInfos[0] = new BackUpInfo(1, "test", "test", 1);
    backUpInfos[1] = new BackUpInfo(2, "test", "test", 1);
    backUpInfoDao.insert(backUpInfos);
    Assert.assertTrue(backUpInfoDao.getByRid(1).equals(backUpInfos[0]));
    Assert.assertTrue(backUpInfoDao.getByRid(2).equals(backUpInfos[1]));
}
Also used : BackUpInfo(org.smartdata.model.BackUpInfo) Test(org.junit.Test)

Example 9 with BackUpInfo

use of org.smartdata.model.BackUpInfo in project SSM by Intel-bigdata.

the class TestBackUpInfoDao method testgetBySrc.

@Test
public void testgetBySrc() {
    Assert.assertTrue(backUpInfoDao.getByDest("1").size() == 0);
    BackUpInfo[] backUpInfos = new BackUpInfo[2];
    backUpInfos[0] = new BackUpInfo(1, "test", "test", 1);
    backUpInfos[1] = new BackUpInfo(2, "test", "test", 1);
    backUpInfoDao.insert(backUpInfos);
    List<BackUpInfo> list = backUpInfoDao.getBySrc("test");
    Assert.assertTrue(list.size() == 2);
    Assert.assertTrue(list.get(0).equals(backUpInfos[0]));
    Assert.assertTrue(list.get(1).equals(backUpInfos[1]));
    Assert.assertTrue(backUpInfoDao.getCountByRid(1) == 0);
}
Also used : BackUpInfo(org.smartdata.model.BackUpInfo) Test(org.junit.Test)

Example 10 with BackUpInfo

use of org.smartdata.model.BackUpInfo in project SSM by Intel-bigdata.

the class TestInotifyEventApplier method testApplier.

@Test
public void testApplier() throws Exception {
    DFSClient client = Mockito.mock(DFSClient.class);
    FileInfo root = HadoopUtil.convertFileStatus(getDummyDirStatus("/", 1000), "/");
    metaStore.insertFile(root);
    BackUpInfo backUpInfo = new BackUpInfo(1L, "/file", "remote/dest/", 10);
    metaStore.insertBackUpInfo(backUpInfo);
    InotifyEventApplier applier = new InotifyEventApplier(metaStore, client);
    Event.CreateEvent createEvent = new Event.CreateEvent.Builder().iNodeType(Event.CreateEvent.INodeType.FILE).ctime(1).defaultBlockSize(1024).groupName("cg1").overwrite(true).ownerName("user1").path("/file").perms(new FsPermission("777")).replication(3).build();
    HdfsFileStatus status1 = CompatibilityHelperLoader.getHelper().createHdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission("777"), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0);
    Mockito.when(client.getFileInfo(Matchers.startsWith("/file"))).thenReturn(status1);
    Mockito.when(client.getFileInfo(Matchers.startsWith("/dir"))).thenReturn(getDummyDirStatus("", 1010));
    applier.apply(new Event[] { createEvent });
    FileInfo result1 = metaStore.getFile().get(1);
    Assert.assertEquals(result1.getPath(), "/file");
    Assert.assertEquals(result1.getFileId(), 1010L);
    Assert.assertEquals(result1.getPermission(), 511);
    Event close = new Event.CloseEvent("/file", 1024, 0);
    applier.apply(new Event[] { close });
    FileInfo result2 = metaStore.getFile().get(1);
    Assert.assertEquals(result2.getLength(), 1024);
    Assert.assertEquals(result2.getModificationTime(), 0L);
    // Event truncate = new Event.TruncateEvent("/file", 512, 16);
    // applier.apply(new Event[] {truncate});
    // ResultSet result3 = metaStore.executeQuery("SELECT * FROM files");
    // Assert.assertEquals(result3.getLong("length"), 512);
    // Assert.assertEquals(result3.getLong("modification_time"), 16L);
    Event meta = new Event.MetadataUpdateEvent.Builder().path("/file").metadataType(Event.MetadataUpdateEvent.MetadataType.TIMES).mtime(2).atime(3).replication(4).ownerName("user2").groupName("cg2").build();
    applier.apply(new Event[] { meta });
    FileInfo result4 = metaStore.getFile().get(1);
    Assert.assertEquals(result4.getAccessTime(), 3);
    Assert.assertEquals(result4.getModificationTime(), 2);
    Event meta1 = new Event.MetadataUpdateEvent.Builder().path("/file").metadataType(Event.MetadataUpdateEvent.MetadataType.OWNER).ownerName("user1").groupName("cg1").build();
    applier.apply(new Event[] { meta1 });
    result4 = metaStore.getFile().get(1);
    Assert.assertEquals(result4.getOwner(), "user1");
    Assert.assertEquals(result4.getGroup(), "cg1");
    Event.CreateEvent createEvent2 = new Event.CreateEvent.Builder().iNodeType(Event.CreateEvent.INodeType.DIRECTORY).ctime(1).groupName("cg1").overwrite(true).ownerName("user1").path("/dir").perms(new FsPermission("777")).replication(3).build();
    Event.CreateEvent createEvent3 = new Event.CreateEvent.Builder().iNodeType(Event.CreateEvent.INodeType.FILE).ctime(1).groupName("cg1").overwrite(true).ownerName("user1").path("/dir/file").perms(new FsPermission("777")).replication(3).build();
    Event rename = new Event.RenameEvent.Builder().dstPath("/dir2").srcPath("/dir").timestamp(5).build();
    applier.apply(new Event[] { createEvent2, createEvent3, rename });
    List<FileInfo> result5 = metaStore.getFile();
    List<String> expectedPaths = Arrays.asList("/dir2", "/dir2/file", "/file");
    List<String> actualPaths = new ArrayList<>();
    for (FileInfo s : result5) {
        actualPaths.add(s.getPath());
    }
    Collections.sort(actualPaths);
    Assert.assertTrue(actualPaths.size() == 4);
    Assert.assertTrue(actualPaths.containsAll(expectedPaths));
    Event unlink = new Event.UnlinkEvent.Builder().path("/").timestamp(6).build();
    applier.apply(new Event[] { unlink });
    Thread.sleep(1200);
    Assert.assertEquals(metaStore.getFile().size(), 0);
    System.out.println("Files in table " + metaStore.getFile().size());
    List<FileDiff> fileDiffList = metaStore.getPendingDiff();
    Assert.assertTrue(fileDiffList.size() == 4);
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) ArrayList(java.util.ArrayList) FileInfo(org.smartdata.model.FileInfo) BackUpInfo(org.smartdata.model.BackUpInfo) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Event(org.apache.hadoop.hdfs.inotify.Event) FileDiff(org.smartdata.model.FileDiff) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Aggregations

BackUpInfo (org.smartdata.model.BackUpInfo)20 Test (org.junit.Test)16 FileDiff (org.smartdata.model.FileDiff)9 FileInfo (org.smartdata.model.FileInfo)7 AlluxioURI (alluxio.AlluxioURI)5 FileSystem (alluxio.client.file.FileSystem)5 URIStatus (alluxio.client.file.URIStatus)5 JournalEntry (alluxio.proto.journal.Journal.JournalEntry)5 AlluxioEntryApplier (org.smartdata.alluxio.metric.fetcher.AlluxioEntryApplier)5 ArrayList (java.util.ArrayList)3 FsPermission (org.apache.hadoop.fs.permission.FsPermission)2 DFSClient (org.apache.hadoop.hdfs.DFSClient)2 Event (org.apache.hadoop.hdfs.inotify.Event)2 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)2 RuleInfo (org.smartdata.model.RuleInfo)2 URI (java.net.URI)1 URISyntaxException (java.net.URISyntaxException)1 HashMap (java.util.HashMap)1 MetaStoreException (org.smartdata.metastore.MetaStoreException)1 ActionInfo (org.smartdata.model.ActionInfo)1