use of org.finra.herd.model.jpa.StorageUnitEntity in project herd by FINRAOS.
the class BusinessObjectDataServiceGetBusinessObjectDataTest method testGetBusinessObjectDataIncludeStorageUnitStatusHistory.
@Test
public void testGetBusinessObjectDataIncludeStorageUnitStatusHistory() {
// Create a business object data key.
BusinessObjectDataKey businessObjectDataKey = new BusinessObjectDataKey(BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_VALUE, SUBPARTITION_VALUES, DATA_VERSION);
// Create a business object data storage unit key.
BusinessObjectDataStorageUnitKey businessObjectDataStorageUnitKey = new BusinessObjectDataStorageUnitKey(BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_VALUE, SUBPARTITION_VALUES, DATA_VERSION, STORAGE_NAME);
// Create a storage unit entity.
StorageUnitEntity storageUnitEntity = storageUnitDaoTestHelper.createStorageUnitEntity(STORAGE_NAME, businessObjectDataKey, StorageUnitStatusEntity.DISABLED);
// Execute a storage unit status change.
businessObjectDataStorageUnitStatusService.updateBusinessObjectDataStorageUnitStatus(businessObjectDataStorageUnitKey, new BusinessObjectDataStorageUnitStatusUpdateRequest(StorageUnitStatusEntity.ENABLED));
// Retrieve the business object data with enabled include storage unit status history flag.
BusinessObjectData resultBusinessObjectData = businessObjectDataService.getBusinessObjectData(businessObjectDataKey, PARTITION_KEY, NO_BDATA_STATUS, NO_INCLUDE_BUSINESS_OBJECT_DATA_STATUS_HISTORY, INCLUDE_STORAGE_UNIT_STATUS_HISTORY);
// Build the expected response object. The storage unit history record is expected to have system username for the createdBy auditable field.
BusinessObjectData expectedBusinessObjectData = new BusinessObjectData(storageUnitEntity.getBusinessObjectData().getId(), BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_KEY, PARTITION_VALUE, SUBPARTITION_VALUES, DATA_VERSION, LATEST_VERSION_FLAG_SET, BDATA_STATUS, Arrays.asList(new StorageUnit(new Storage(STORAGE_NAME, StoragePlatformEntity.S3, null), NO_STORAGE_DIRECTORY, null, StorageUnitStatusEntity.ENABLED, Arrays.asList(new StorageUnitStatusChangeEvent(StorageUnitStatusEntity.ENABLED, HerdDateUtils.getXMLGregorianCalendarValue(IterableUtils.get(storageUnitEntity.getHistoricalStatuses(), 0).getCreatedOn()), HerdDaoSecurityHelper.SYSTEM_USER)), NO_STORAGE_POLICY_TRANSITION_FAILED_ATTEMPTS, NO_RESTORE_EXPIRATION_ON)), NO_ATTRIBUTES, NO_BUSINESS_OBJECT_DATA_PARENTS, NO_BUSINESS_OBJECT_DATA_CHILDREN, NO_BUSINESS_OBJECT_DATA_STATUS_HISTORY);
// Validate the returned response object.
assertEquals(expectedBusinessObjectData, resultBusinessObjectData);
}
use of org.finra.herd.model.jpa.StorageUnitEntity in project herd by FINRAOS.
the class BusinessObjectDataServiceRestoreBusinessObjectDataTest method testRestoreBusinessObjectDataNonGlacierStorageClass.
@Test
public void testRestoreBusinessObjectDataNonGlacierStorageClass() throws Exception {
// Create S3FileTransferRequestParamsDto to access the S3 bucket.
// Since test S3 key prefix represents a directory, we add a trailing '/' character to it.
S3FileTransferRequestParamsDto glacierS3FileTransferRequestParamsDto = S3FileTransferRequestParamsDto.builder().withS3BucketName(S3_BUCKET_NAME).withS3KeyPrefix(S3_BUCKET_NAME + "/" + TEST_S3_KEY_PREFIX + "/").build();
// Create a business object data key.
BusinessObjectDataKey businessObjectDataKey = new BusinessObjectDataKey(BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_VALUE, NO_SUBPARTITION_VALUES, DATA_VERSION);
// Create database entities required for testing.
BusinessObjectDataEntity businessObjectDataEntity = businessObjectDataServiceTestHelper.createDatabaseEntitiesForInitiateRestoreTesting(businessObjectDataKey);
// Get the storage unit entity.
StorageUnitEntity storageUnitEntity = storageUnitDaoHelper.getStorageUnitEntity(STORAGE_NAME, businessObjectDataEntity);
try {
// Put relative non-Glacier storage class files into the S3 bucket.
for (StorageFileEntity storageFileEntity : storageUnitEntity.getStorageFiles()) {
ObjectMetadata metadata = new ObjectMetadata();
metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Standard);
metadata.setOngoingRestore(false);
s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, storageFileEntity.getPath(), new ByteArrayInputStream(new byte[storageFileEntity.getFileSizeBytes().intValue()]), metadata), NO_S3_CLIENT);
}
// Initiate a restore request for the business object data.
BusinessObjectData businessObjectData = businessObjectDataService.restoreBusinessObjectData(businessObjectDataKey, EXPIRATION_IN_DAYS);
// Validate the returned object.
businessObjectDataServiceTestHelper.validateBusinessObjectData(businessObjectDataEntity.getId(), businessObjectDataKey, LATEST_VERSION_FLAG_SET, BDATA_STATUS, businessObjectData);
// Validate that the origin storage unit status is RESTORING.
assertEquals(StorageUnitStatusEntity.RESTORING, storageUnitEntity.getStatus().getCode());
// Validate that there is still no ongoing restore request for all non-Glacier objects.
for (StorageFileEntity storageFileEntity : storageUnitEntity.getStorageFiles()) {
ObjectMetadata objectMetadata = s3Operations.getObjectMetadata(S3_BUCKET_NAME, storageFileEntity.getPath(), NO_S3_CLIENT);
assertFalse(objectMetadata.getOngoingRestore());
}
} finally {
// Delete test files from S3 storage.
if (!s3Dao.listDirectory(glacierS3FileTransferRequestParamsDto).isEmpty()) {
s3Dao.deleteDirectory(glacierS3FileTransferRequestParamsDto);
}
s3Operations.rollback();
}
}
use of org.finra.herd.model.jpa.StorageUnitEntity in project herd by FINRAOS.
the class BusinessObjectDataServiceGenerateBusinessObjectDataDdlTest method testGenerateBusinessObjectDataDdlLargePartitionValueListWithAutoDiscovery.
@Test
@Ignore
public void testGenerateBusinessObjectDataDdlLargePartitionValueListWithAutoDiscovery() {
final int PRIMARY_PARTITION_VALUE_LIST_SIZE = 10000;
final int SECOND_LEVEL_PARTITION_VALUES_PER_BUSINESS_OBJECT_DATA = 1;
final int STORAGE_FILES_PER_PARTITION = 1;
// Prepare test data and build a list of partition values to generate business object data DDL for.
// Build a list of schema columns.
List<SchemaColumn> schemaColumns = new ArrayList<>();
schemaColumns.add(new SchemaColumn(PARTITION_KEY, "DATE", NO_COLUMN_SIZE, COLUMN_REQUIRED, NO_COLUMN_DEFAULT_VALUE, NO_COLUMN_DESCRIPTION));
schemaColumns.add(new SchemaColumn(COLUMN_NAME, "NUMBER", COLUMN_SIZE, NO_COLUMN_REQUIRED, NO_COLUMN_DEFAULT_VALUE, COLUMN_DESCRIPTION));
schemaColumns.add(new SchemaColumn(COLUMN_NAME_2, "STRING", NO_COLUMN_SIZE, NO_COLUMN_REQUIRED, NO_COLUMN_DEFAULT_VALUE, NO_COLUMN_DESCRIPTION));
// Use the first two columns as partition columns.
List<SchemaColumn> partitionColumns = schemaColumns.subList(0, 2);
// Create a business object format entity with the schema.
BusinessObjectFormatEntity businessObjectFormatEntity = businessObjectFormatDaoTestHelper.createBusinessObjectFormatEntity(NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FileTypeEntity.TXT_FILE_TYPE, FORMAT_VERSION, FORMAT_DESCRIPTION, LATEST_VERSION_FLAG_SET, PARTITION_KEY, NO_PARTITION_KEY_GROUP, NO_ATTRIBUTES, SCHEMA_DELIMITER_PIPE, SCHEMA_ESCAPE_CHARACTER_BACKSLASH, SCHEMA_NULL_VALUE_BACKSLASH_N, schemaColumns, partitionColumns);
// Create an S3 storage entity.
StorageEntity storageEntity = storageDaoTestHelper.createStorageEntity(STORAGE_NAME, StoragePlatformEntity.S3, configurationHelper.getProperty(ConfigurationValue.S3_ATTRIBUTE_NAME_BUCKET_NAME), S3_BUCKET_NAME);
// Create relative business object data, storage unit, and storage file entities.
List<String> partitionValues = new ArrayList<>();
for (int i = 0; i < PRIMARY_PARTITION_VALUE_LIST_SIZE; i++) {
String partitionValue = String.format("%s-%03d", PARTITION_VALUE, i);
partitionValues.add(partitionValue);
// Create a business object data entity.
BusinessObjectDataEntity businessObjectDataEntity = businessObjectDataDaoTestHelper.createBusinessObjectDataEntity(businessObjectFormatEntity, partitionValue, NO_SUBPARTITION_VALUES, DATA_VERSION, true, BusinessObjectDataStatusEntity.VALID);
// Build an S3 key prefix according to the herd S3 naming convention.
String s3KeyPrefix = getExpectedS3KeyPrefix(NAMESPACE, DATA_PROVIDER_NAME, BDEF_NAME, FORMAT_USAGE_CODE, FileTypeEntity.TXT_FILE_TYPE, FORMAT_VERSION, PARTITION_KEY, partitionValue, null, null, DATA_VERSION);
// Create a storage unit with a storage directory path.
StorageUnitEntity storageUnitEntity = storageUnitDaoTestHelper.createStorageUnitEntity(storageEntity, businessObjectDataEntity, StorageUnitStatusEntity.ENABLED, s3KeyPrefix);
// Create storage file entities.
for (int j = 0; j < SECOND_LEVEL_PARTITION_VALUES_PER_BUSINESS_OBJECT_DATA; j++) {
// Build a storage file directory path that includes the relative second level partition value - needed for auto discovery.
String storageFileDirectoryPath = String.format("%s/%s=%s-%03d", s3KeyPrefix, COLUMN_NAME, PARTITION_VALUE_2, j);
for (int k = 0; k < STORAGE_FILES_PER_PARTITION; k++) {
String storageFilePath = String.format("%s/%03d.data", storageFileDirectoryPath, k);
storageFileDaoTestHelper.createStorageFileEntity(storageUnitEntity, storageFilePath, FILE_SIZE_1_KB, ROW_COUNT_1000);
}
}
herdDao.saveAndRefresh(storageUnitEntity);
}
// Retrieve business object data ddl for the entire list of partition values.
BusinessObjectDataDdl businessObjectDataDdl = businessObjectDataService.generateBusinessObjectDataDdl(new BusinessObjectDataDdlRequest(NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FileTypeEntity.TXT_FILE_TYPE, FORMAT_VERSION, Arrays.asList(new PartitionValueFilter(PARTITION_KEY, partitionValues, NO_PARTITION_VALUE_RANGE, NO_LATEST_BEFORE_PARTITION_VALUE, NO_LATEST_AFTER_PARTITION_VALUE)), NO_STANDALONE_PARTITION_VALUE_FILTER, DATA_VERSION, NO_STORAGE_NAMES, STORAGE_NAME, BusinessObjectDataDdlOutputFormatEnum.HIVE_13_DDL, TABLE_NAME, NO_CUSTOM_DDL_NAME, INCLUDE_DROP_TABLE_STATEMENT, INCLUDE_IF_NOT_EXISTS_OPTION, NO_INCLUDE_DROP_PARTITIONS, NO_ALLOW_MISSING_DATA, NO_INCLUDE_ALL_REGISTERED_SUBPARTITIONS, NO_SUPPRESS_SCAN_FOR_UNREGISTERED_SUBPARTITIONS));
// Validate the results.
assertNotNull(businessObjectDataDdl);
}
use of org.finra.herd.model.jpa.StorageUnitEntity in project herd by FINRAOS.
the class BusinessObjectDataServiceGenerateBusinessObjectDataDdlTest method testGenerateBusinessObjectDataDdlSuppressScanForUnregisteredSubPartitionsNoDirectoryPath.
@Test
public void testGenerateBusinessObjectDataDdlSuppressScanForUnregisteredSubPartitionsNoDirectoryPath() {
// Create two VALID sub-partitions both with "available" storage units in a non-Glacier storage.
List<StorageUnitEntity> storageUnitEntities = businessObjectDataServiceTestHelper.createDatabaseEntitiesForBusinessObjectDataDdlTestingTwoPartitionLevels(Arrays.asList(Arrays.asList(PARTITION_VALUE, SUB_PARTITION_VALUE_1), Arrays.asList(PARTITION_VALUE, SUB_PARTITION_VALUE_2)));
// Update both storage units to remove the directory path values.
for (StorageUnitEntity storageUnitEntity : storageUnitEntities) {
storageUnitEntity.setDirectoryPath(NO_STORAGE_DIRECTORY_PATH);
}
// Retrieve business object data DDL with flag set to suppress scan for unregistered sub-partitions.
BusinessObjectDataDdl resultBusinessObjectDataDdl = businessObjectDataService.generateBusinessObjectDataDdl(new BusinessObjectDataDdlRequest(NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FileTypeEntity.TXT_FILE_TYPE, FORMAT_VERSION, Arrays.asList(new PartitionValueFilter(FIRST_PARTITION_COLUMN_NAME, Arrays.asList(PARTITION_VALUE), NO_PARTITION_VALUE_RANGE, NO_LATEST_BEFORE_PARTITION_VALUE, NO_LATEST_AFTER_PARTITION_VALUE)), NO_STANDALONE_PARTITION_VALUE_FILTER, NO_DATA_VERSION, NO_STORAGE_NAMES, STORAGE_NAME, BusinessObjectDataDdlOutputFormatEnum.HIVE_13_DDL, TABLE_NAME, NO_CUSTOM_DDL_NAME, INCLUDE_DROP_TABLE_STATEMENT, INCLUDE_IF_NOT_EXISTS_OPTION, INCLUDE_DROP_PARTITIONS, NO_ALLOW_MISSING_DATA, NO_INCLUDE_ALL_REGISTERED_SUBPARTITIONS, SUPPRESS_SCAN_FOR_UNREGISTERED_SUBPARTITIONS));
// Validate the response object. Both sub-partitions should be present in the generated DDL.
assertEquals(new BusinessObjectDataDdl(NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FileTypeEntity.TXT_FILE_TYPE, FORMAT_VERSION, Arrays.asList(new PartitionValueFilter(FIRST_PARTITION_COLUMN_NAME, Arrays.asList(PARTITION_VALUE), NO_PARTITION_VALUE_RANGE, NO_LATEST_BEFORE_PARTITION_VALUE, NO_LATEST_AFTER_PARTITION_VALUE)), NO_STANDALONE_PARTITION_VALUE_FILTER, NO_DATA_VERSION, NO_STORAGE_NAMES, STORAGE_NAME, BusinessObjectDataDdlOutputFormatEnum.HIVE_13_DDL, TABLE_NAME, NO_CUSTOM_DDL_NAME, businessObjectDataServiceTestHelper.getExpectedBusinessObjectDataDdlTwoPartitionLevels(Arrays.asList(Arrays.asList(PARTITION_VALUE, SUB_PARTITION_VALUE_1), Arrays.asList(PARTITION_VALUE, SUB_PARTITION_VALUE_2)))), resultBusinessObjectDataDdl);
}
use of org.finra.herd.model.jpa.StorageUnitEntity in project herd by FINRAOS.
the class BusinessObjectDataServiceGenerateBusinessObjectDataDdlTest method testGenerateBusinessObjectDataDdlIncludeAllRegisteredSubPartitionsSecondSubPartitionDeletedBdataArchived.
@Test
public void testGenerateBusinessObjectDataDdlIncludeAllRegisteredSubPartitionsSecondSubPartitionDeletedBdataArchived() {
// Create two VALID sub-partitions both with "available" storage units in a non-Glacier storage.
List<StorageUnitEntity> storageUnitEntities = businessObjectDataServiceTestHelper.createDatabaseEntitiesForBusinessObjectDataDdlTestingTwoPartitionLevels(Arrays.asList(Arrays.asList(PARTITION_VALUE, SUB_PARTITION_VALUE_1), Arrays.asList(PARTITION_VALUE, SUB_PARTITION_VALUE_2)));
// Update the second sub-partition business object data status to DELETED.
storageUnitEntities.get(1).getBusinessObjectData().setStatus(businessObjectDataStatusDao.getBusinessObjectDataStatusByCode(BusinessObjectDataStatusEntity.DELETED));
// Update the second sub-partition storage unit status to ARCHIVED.
storageUnitEntities.get(1).setStatus(storageUnitStatusDao.getStorageUnitStatusByCode(StorageUnitStatusEntity.ARCHIVED));
// Retrieve business object data DDL with "IncludeAllRegisteredSubPartitions" option enabled.
BusinessObjectDataDdl resultBusinessObjectDataDdl = businessObjectDataService.generateBusinessObjectDataDdl(new BusinessObjectDataDdlRequest(NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FileTypeEntity.TXT_FILE_TYPE, FORMAT_VERSION, Arrays.asList(new PartitionValueFilter(FIRST_PARTITION_COLUMN_NAME, Arrays.asList(PARTITION_VALUE), NO_PARTITION_VALUE_RANGE, NO_LATEST_BEFORE_PARTITION_VALUE, NO_LATEST_AFTER_PARTITION_VALUE)), NO_STANDALONE_PARTITION_VALUE_FILTER, NO_DATA_VERSION, NO_STORAGE_NAMES, STORAGE_NAME, BusinessObjectDataDdlOutputFormatEnum.HIVE_13_DDL, TABLE_NAME, NO_CUSTOM_DDL_NAME, INCLUDE_DROP_TABLE_STATEMENT, INCLUDE_IF_NOT_EXISTS_OPTION, INCLUDE_DROP_PARTITIONS, NO_ALLOW_MISSING_DATA, INCLUDE_ALL_REGISTERED_SUBPARTITIONS, NO_SUPPRESS_SCAN_FOR_UNREGISTERED_SUBPARTITIONS));
// Validate the response object. Both sub-partitions should. Only the first sub-partition should be present in the generated DDL.
assertEquals(new BusinessObjectDataDdl(NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FileTypeEntity.TXT_FILE_TYPE, FORMAT_VERSION, Arrays.asList(new PartitionValueFilter(FIRST_PARTITION_COLUMN_NAME, Arrays.asList(PARTITION_VALUE), NO_PARTITION_VALUE_RANGE, NO_LATEST_BEFORE_PARTITION_VALUE, NO_LATEST_AFTER_PARTITION_VALUE)), NO_STANDALONE_PARTITION_VALUE_FILTER, NO_DATA_VERSION, NO_STORAGE_NAMES, STORAGE_NAME, BusinessObjectDataDdlOutputFormatEnum.HIVE_13_DDL, TABLE_NAME, NO_CUSTOM_DDL_NAME, businessObjectDataServiceTestHelper.getExpectedBusinessObjectDataDdlTwoPartitionLevels(Arrays.asList(Arrays.asList(PARTITION_VALUE, SUB_PARTITION_VALUE_1)))), resultBusinessObjectDataDdl);
}
Aggregations