use of org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo in project ozone by apache.
the class OMKeyRenameResponseWithFSO method addToDBBatch.
@Override
public void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException {
if (isRenameDirectory) {
omMetadataManager.getDirectoryTable().deleteWithBatch(batchOperation, getFromKeyName());
OmDirectoryInfo renameDirInfo = OMFileRequest.getDirectoryInfo(getRenameKeyInfo());
omMetadataManager.getDirectoryTable().putWithBatch(batchOperation, getToKeyName(), renameDirInfo);
} else {
omMetadataManager.getKeyTable(getBucketLayout()).deleteWithBatch(batchOperation, getFromKeyName());
omMetadataManager.getKeyTable(getBucketLayout()).putWithBatch(batchOperation, getToKeyName(), getRenameKeyInfo());
}
}
use of org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo in project ozone by apache.
the class OMDirectoryCreateResponseWithFSO method addToDirectoryTable.
private void addToDirectoryTable(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException {
if (dirInfo != null) {
final long volumeId = omMetadataManager.getVolumeId(volume);
final long bucketId = omMetadataManager.getBucketId(volume, bucket);
if (parentDirInfos != null) {
for (OmDirectoryInfo parentDirInfo : parentDirInfos) {
String parentKey = omMetadataManager.getOzonePathKey(volumeId, bucketId, parentDirInfo.getParentObjectID(), parentDirInfo.getName());
LOG.debug("putWithBatch parent : dir {} info : {}", parentKey, parentDirInfo);
omMetadataManager.getDirectoryTable().putWithBatch(batchOperation, parentKey, parentDirInfo);
}
}
String dirKey = omMetadataManager.getOzonePathKey(volumeId, bucketId, dirInfo.getParentObjectID(), dirInfo.getName());
omMetadataManager.getDirectoryTable().putWithBatch(batchOperation, dirKey, dirInfo);
} else {
// When directory already exists, we don't add it to cache. And it is
// not an error, in this case dirKeyInfo will be null.
LOG.debug("Response Status is OK, dirKeyInfo is null in " + "OMDirectoryCreateResponseWithFSO");
}
}
use of org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo in project ozone by apache.
the class OMKeyAclResponseWithFSO method addToDBBatch.
@Override
public void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException {
final long volumeId = omMetadataManager.getVolumeId(getOmKeyInfo().getVolumeName());
final long bucketId = omMetadataManager.getBucketId(getOmKeyInfo().getVolumeName(), getOmKeyInfo().getBucketName());
String ozoneDbKey = omMetadataManager.getOzonePathKey(volumeId, bucketId, getOmKeyInfo().getParentObjectID(), getOmKeyInfo().getFileName());
if (isDirectory) {
OmDirectoryInfo dirInfo = OMFileRequest.getDirectoryInfo(getOmKeyInfo());
omMetadataManager.getDirectoryTable().putWithBatch(batchOperation, ozoneDbKey, dirInfo);
} else {
omMetadataManager.getKeyTable(getBucketLayout()).putWithBatch(batchOperation, ozoneDbKey, getOmKeyInfo());
}
}
use of org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo in project ozone by apache.
the class S3InitiateMultipartUploadResponseWithFSO method addToDBBatch.
@Override
public void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException {
/**
* Create parent directory entries during MultiPartFileKey Create - do not
* wait for File Commit request.
*/
if (parentDirInfos != null) {
final OmKeyInfo keyInfo = getOmKeyInfo();
final long volumeId = omMetadataManager.getVolumeId(keyInfo.getVolumeName());
final long bucketId = omMetadataManager.getBucketId(keyInfo.getVolumeName(), keyInfo.getBucketName());
for (OmDirectoryInfo parentDirInfo : parentDirInfos) {
final String parentKey = omMetadataManager.getOzonePathKey(volumeId, bucketId, parentDirInfo.getParentObjectID(), parentDirInfo.getName());
omMetadataManager.getDirectoryTable().putWithBatch(batchOperation, parentKey, parentDirInfo);
}
}
OMFileRequest.addToOpenFileTable(omMetadataManager, batchOperation, getOmKeyInfo(), getOmMultipartKeyInfo().getUploadID());
omMetadataManager.getMultipartInfoTable().putWithBatch(batchOperation, mpuDBKey, getOmMultipartKeyInfo());
}
use of org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo in project ozone by apache.
the class NSSummaryTask method process.
@Override
public Pair<String, Boolean> process(OMUpdateEventBatch events) {
Iterator<OMDBUpdateEvent> eventIterator = events.getIterator();
final Collection<String> taskTables = getTaskTables();
while (eventIterator.hasNext()) {
OMDBUpdateEvent<String, ? extends WithParentObjectId> omdbUpdateEvent = eventIterator.next();
OMDBUpdateEvent.OMDBUpdateAction action = omdbUpdateEvent.getAction();
// we only process updates on OM's KeyTable and Dirtable
String table = omdbUpdateEvent.getTable();
boolean updateOnFileTable = table.equals(FILE_TABLE);
if (!taskTables.contains(table)) {
continue;
}
String updatedKey = omdbUpdateEvent.getKey();
try {
if (updateOnFileTable) {
// key update on fileTable
OMDBUpdateEvent<String, OmKeyInfo> keyTableUpdateEvent = (OMDBUpdateEvent<String, OmKeyInfo>) omdbUpdateEvent;
OmKeyInfo updatedKeyInfo = keyTableUpdateEvent.getValue();
OmKeyInfo oldKeyInfo = keyTableUpdateEvent.getOldValue();
switch(action) {
case PUT:
writeOmKeyInfoOnNamespaceDB(updatedKeyInfo);
break;
case DELETE:
deleteOmKeyInfoOnNamespaceDB(updatedKeyInfo);
break;
case UPDATE:
if (oldKeyInfo != null) {
// delete first, then put
deleteOmKeyInfoOnNamespaceDB(oldKeyInfo);
} else {
LOG.warn("Update event does not have the old keyInfo for {}.", updatedKey);
}
writeOmKeyInfoOnNamespaceDB(updatedKeyInfo);
break;
default:
LOG.debug("Skipping DB update event : {}", omdbUpdateEvent.getAction());
}
} else {
// directory update on DirTable
OMDBUpdateEvent<String, OmDirectoryInfo> dirTableUpdateEvent = (OMDBUpdateEvent<String, OmDirectoryInfo>) omdbUpdateEvent;
OmDirectoryInfo updatedDirectoryInfo = dirTableUpdateEvent.getValue();
OmDirectoryInfo oldDirectoryInfo = dirTableUpdateEvent.getOldValue();
switch(action) {
case PUT:
writeOmDirectoryInfoOnNamespaceDB(updatedDirectoryInfo);
break;
case DELETE:
deleteOmDirectoryInfoOnNamespaceDB(updatedDirectoryInfo);
break;
case UPDATE:
if (oldDirectoryInfo != null) {
// delete first, then put
deleteOmDirectoryInfoOnNamespaceDB(oldDirectoryInfo);
} else {
LOG.warn("Update event does not have the old dirInfo for {}.", updatedKey);
}
writeOmDirectoryInfoOnNamespaceDB(updatedDirectoryInfo);
break;
default:
LOG.debug("Skipping DB update event : {}", omdbUpdateEvent.getAction());
}
}
} catch (IOException ioEx) {
LOG.error("Unable to process Namespace Summary data in Recon DB. ", ioEx);
return new ImmutablePair<>(getTaskName(), false);
}
}
LOG.info("Completed a process run of NSSummaryTask");
return new ImmutablePair<>(getTaskName(), true);
}
Aggregations