use of org.apache.hadoop.ozone.om.OMMetadataManager in project ozone by apache.
the class TestOzoneFileSystemWithFSO method testCreateFile.
@Test
public void testCreateFile() throws Exception {
// Op 1. create dir -> /d1/d2/d3/d4/
Path parent = new Path("/d1/d2/");
Path file = new Path(parent, "file1");
FSDataOutputStream outputStream = getFs().create(file);
String openFileKey = "";
OMMetadataManager omMgr = getCluster().getOzoneManager().getMetadataManager();
OmBucketInfo omBucketInfo = omMgr.getBucketTable().get(omMgr.getBucketKey(getVolumeName(), getBucketName()));
Assert.assertNotNull("Failed to find bucketInfo", omBucketInfo);
ArrayList<String> dirKeys = new ArrayList<>();
long d1ObjectID = verifyDirKey(omBucketInfo.getObjectID(), "d1", "/d1", dirKeys, omMgr);
long d2ObjectID = verifyDirKey(d1ObjectID, "d2", "/d1/d2", dirKeys, omMgr);
openFileKey = d2ObjectID + OzoneConsts.OM_KEY_PREFIX + file.getName();
// trigger CommitKeyRequest
outputStream.close();
OmKeyInfo omKeyInfo = omMgr.getKeyTable(getBucketLayout()).get(openFileKey);
Assert.assertNotNull("Invalid Key!", omKeyInfo);
verifyOMFileInfoFormat(omKeyInfo, file.getName(), d2ObjectID);
// wait for DB updates
GenericTestUtils.waitFor(() -> {
try {
return omMgr.getOpenKeyTable(getBucketLayout()).isEmpty();
} catch (IOException e) {
LOG.error("DB failure!", e);
Assert.fail("DB failure!");
return false;
}
}, 1000, 120000);
}
use of org.apache.hadoop.ozone.om.OMMetadataManager in project ozone by apache.
the class TestOzoneFileSystemWithFSO method testMultiLevelDirs.
@Test
public void testMultiLevelDirs() throws Exception {
// reset metrics
long numKeys = getCluster().getOzoneManager().getMetrics().getNumKeys();
getCluster().getOzoneManager().getMetrics().decNumKeys(numKeys);
Assert.assertEquals(0, getCluster().getOzoneManager().getMetrics().getNumKeys());
// Op 1. create dir -> /d1/d2/d3/d4/
// Op 2. create dir -> /d1/d2/d3/d4/d5
// Op 3. create dir -> /d1/d2/d3/d4/d6
Path parent = new Path("/d1/d2/d3/d4/");
getFs().mkdirs(parent);
OMMetadataManager omMgr = getCluster().getOzoneManager().getMetadataManager();
OmBucketInfo omBucketInfo = omMgr.getBucketTable().get(omMgr.getBucketKey(getVolumeName(), getBucketName()));
Assert.assertNotNull("Failed to find bucketInfo", omBucketInfo);
ArrayList<String> dirKeys = new ArrayList<>();
long d1ObjectID = verifyDirKey(omBucketInfo.getObjectID(), "d1", "/d1", dirKeys, omMgr);
long d2ObjectID = verifyDirKey(d1ObjectID, "d2", "/d1/d2", dirKeys, omMgr);
long d3ObjectID = verifyDirKey(d2ObjectID, "d3", "/d1/d2/d3", dirKeys, omMgr);
long d4ObjectID = verifyDirKey(d3ObjectID, "d4", "/d1/d2/d3/d4", dirKeys, omMgr);
Assert.assertEquals("Wrong OM numKeys metrics", 4, getCluster().getOzoneManager().getMetrics().getNumKeys());
// create sub-dirs under same parent
Path subDir5 = new Path("/d1/d2/d3/d4/d5");
getFs().mkdirs(subDir5);
Path subDir6 = new Path("/d1/d2/d3/d4/d6");
getFs().mkdirs(subDir6);
long d5ObjectID = verifyDirKey(d4ObjectID, "d5", "/d1/d2/d3/d4/d5", dirKeys, omMgr);
long d6ObjectID = verifyDirKey(d4ObjectID, "d6", "/d1/d2/d3/d4/d6", dirKeys, omMgr);
Assert.assertTrue("Wrong objectIds for sub-dirs[" + d5ObjectID + "/d5, " + d6ObjectID + "/d6] of same parent!", d5ObjectID != d6ObjectID);
Assert.assertEquals("Wrong OM numKeys metrics", 6, getCluster().getOzoneManager().getMetrics().getNumKeys());
}
use of org.apache.hadoop.ozone.om.OMMetadataManager in project ozone by apache.
the class TestOzoneFileInterfaces method testOzFsReadWrite.
@Test
public void testOzFsReadWrite() throws IOException {
long currentTime = Time.now();
int stringLen = 20;
OMMetadataManager metadataManager = cluster.getOzoneManager().getMetadataManager();
String lev1dir = "l1dir";
Path lev1path = createPath("/" + lev1dir);
String lev1key = metadataManager.getOzoneDirKey(volumeName, bucketName, o3fs.pathToKey(lev1path));
String lev2dir = "l2dir";
Path lev2path = createPath("/" + lev1dir + "/" + lev2dir);
String lev2key = metadataManager.getOzoneDirKey(volumeName, bucketName, o3fs.pathToKey(lev2path));
String data = RandomStringUtils.randomAlphanumeric(stringLen);
String filePath = RandomStringUtils.randomAlphanumeric(5);
Path path = createPath("/" + lev1dir + "/" + lev2dir + "/" + filePath);
String fileKey = metadataManager.getOzoneDirKey(volumeName, bucketName, o3fs.pathToKey(path));
// verify prefix directories and the file, do not already exist
assertTrue(metadataManager.getKeyTable(getBucketLayout()).get(lev1key) == null);
assertTrue(metadataManager.getKeyTable(getBucketLayout()).get(lev2key) == null);
assertTrue(metadataManager.getKeyTable(getBucketLayout()).get(fileKey) == null);
try (FSDataOutputStream stream = fs.create(path)) {
stream.writeBytes(data);
}
assertEquals(statistics.getLong(StorageStatistics.CommonStatisticNames.OP_CREATE).longValue(), 1);
assertEquals(statistics.getLong("objects_created").longValue(), 1);
FileStatus status = fs.getFileStatus(path);
assertEquals(statistics.getLong(StorageStatistics.CommonStatisticNames.OP_GET_FILE_STATUS).longValue(), 1);
assertEquals(statistics.getLong("objects_query").longValue(), 1);
// The timestamp of the newly created file should always be greater than
// the time when the test was started
assertTrue("Modification time has not been recorded: " + status, status.getModificationTime() > currentTime);
assertFalse(status.isDirectory());
assertEquals(FsPermission.getFileDefault(), status.getPermission());
verifyOwnerGroup(status);
FileStatus lev1status;
FileStatus lev2status;
// verify prefix directories got created when creating the file.
assertTrue(metadataManager.getKeyTable(getBucketLayout()).get(lev1key).getKeyName().equals("l1dir/"));
assertTrue(metadataManager.getKeyTable(getBucketLayout()).get(lev2key).getKeyName().equals("l1dir/l2dir/"));
lev1status = getDirectoryStat(lev1path);
lev2status = getDirectoryStat(lev2path);
assertTrue((lev1status != null) && (lev2status != null));
try (FSDataInputStream inputStream = fs.open(path)) {
byte[] buffer = new byte[stringLen];
// This read will not change the offset inside the file
int readBytes = inputStream.read(0, buffer, 0, buffer.length);
String out = new String(buffer, 0, buffer.length, UTF_8);
assertEquals(data, out);
assertEquals(readBytes, buffer.length);
assertEquals(0, inputStream.getPos());
// The following read will change the internal offset
readBytes = inputStream.read(buffer, 0, buffer.length);
assertEquals(data, out);
assertEquals(readBytes, buffer.length);
assertEquals(buffer.length, inputStream.getPos());
}
assertEquals(statistics.getLong(StorageStatistics.CommonStatisticNames.OP_OPEN).longValue(), 1);
assertEquals(statistics.getLong("objects_read").longValue(), 1);
}
use of org.apache.hadoop.ozone.om.OMMetadataManager in project ozone by apache.
the class TestOzoneManagerDoubleBufferWithDummyResponse method setup.
@Before
public void setup() throws IOException {
OzoneConfiguration configuration = new OzoneConfiguration();
configuration.set(OZONE_METADATA_DIRS, folder.newFolder().getAbsolutePath());
omMetadataManager = new OmMetadataManagerImpl(configuration);
OzoneManagerRatisSnapshot ozoneManagerRatisSnapshot = index -> {
lastAppliedIndex = index.get(index.size() - 1);
};
doubleBuffer = new OzoneManagerDoubleBuffer.Builder().setOmMetadataManager(omMetadataManager).setOzoneManagerRatisSnapShot(ozoneManagerRatisSnapshot).enableRatis(true).setIndexToTerm((val) -> term).build();
}
use of org.apache.hadoop.ozone.om.OMMetadataManager in project ozone by apache.
the class TestFileSizeCountTask method testReprocessAtScale.
@Test
public void testReprocessAtScale() throws IOException {
// generate mocks for 2 volumes, 500 buckets each volume
// and 42 keys in each bucket.
List<OmKeyInfo> omKeyInfoList = new ArrayList<>();
List<Boolean> hasNextAnswer = new ArrayList<>();
for (int volIndex = 1; volIndex <= 2; volIndex++) {
for (int bktIndex = 1; bktIndex <= 500; bktIndex++) {
for (int keyIndex = 1; keyIndex <= 42; keyIndex++) {
OmKeyInfo omKeyInfo = mock(OmKeyInfo.class);
given(omKeyInfo.getKeyName()).willReturn("key" + keyIndex);
given(omKeyInfo.getVolumeName()).willReturn("vol" + volIndex);
given(omKeyInfo.getBucketName()).willReturn("bucket" + bktIndex);
// Place keys in each bin
long fileSize = (long) Math.pow(2, keyIndex + 9) - 1L;
given(omKeyInfo.getDataSize()).willReturn(fileSize);
omKeyInfoList.add(omKeyInfo);
hasNextAnswer.add(true);
}
}
}
hasNextAnswer.add(false);
OMMetadataManager omMetadataManager = mock(OmMetadataManagerImpl.class);
TypedTable<String, OmKeyInfo> keyTable = mock(TypedTable.class);
TypedTable.TypedTableIterator mockKeyIter = mock(TypedTable.TypedTableIterator.class);
TypedTable.TypedKeyValue mockKeyValue = mock(TypedTable.TypedKeyValue.class);
when(keyTable.iterator()).thenReturn(mockKeyIter);
when(omMetadataManager.getKeyTable(getBucketLayout())).thenReturn(keyTable);
when(mockKeyIter.hasNext()).thenAnswer(AdditionalAnswers.returnsElementsOf(hasNextAnswer));
when(mockKeyIter.next()).thenReturn(mockKeyValue);
when(mockKeyValue.getValue()).thenAnswer(AdditionalAnswers.returnsElementsOf(omKeyInfoList));
Pair<String, Boolean> result = fileSizeCountTask.reprocess(omMetadataManager);
assertTrue(result.getRight());
// 2 volumes * 500 buckets * 42 bins = 42000 rows
assertEquals(42000, fileCountBySizeDao.count());
Record3<String, String, Long> recordToFind = dslContext.newRecord(FILE_COUNT_BY_SIZE.VOLUME, FILE_COUNT_BY_SIZE.BUCKET, FILE_COUNT_BY_SIZE.FILE_SIZE).value1("vol1").value2("bucket1").value3(1024L);
assertEquals(1L, fileCountBySizeDao.findById(recordToFind).getCount().longValue());
// file size upper bound for 100000L is 131072L (next highest power of 2)
recordToFind.value1("vol1");
recordToFind.value3(131072L);
assertEquals(1L, fileCountBySizeDao.findById(recordToFind).getCount().longValue());
recordToFind.value2("bucket500");
recordToFind.value3(Long.MAX_VALUE);
assertEquals(1L, fileCountBySizeDao.findById(recordToFind).getCount().longValue());
}
Aggregations