use of com.github.ambry.utils.Pair in project ambry by linkedin.
the class LogSegmentTest method getSegmentAndFileChannel.
/**
* Create a {@link LogSegment} and a mock file channel associated with it for testing.
* @param name the name of log segment file.
* @return a pair that contains log segment and mock file channel
* @throws Exception
*/
private Pair<LogSegment, FileChannel> getSegmentAndFileChannel(String name) throws Exception {
File file = new File(tempDir, name);
if (file.exists()) {
assertTrue(file.getAbsolutePath() + " already exists and could not be deleted", file.delete());
}
assertTrue("Segment file could not be created at path " + file.getAbsolutePath(), file.createNewFile());
file.deleteOnExit();
FileChannel fileChannel = Utils.openChannel(file, true);
FileChannel mockFileChannel = Mockito.spy(fileChannel);
LogSegment segment = new LogSegment(file, STANDARD_SEGMENT_SIZE, config, metrics, mockFileChannel);
return new Pair<>(segment, mockFileChannel);
}
use of com.github.ambry.utils.Pair in project ambry by linkedin.
the class LogTest method constructionFailureTest.
/**
* Tests cases where construction of {@link Log} failed either because of bad arguments or because of store exception.
* @throws IOException
*/
@Test
public void constructionFailureTest() throws IOException, StoreException {
List<Pair<Long, Long>> logAndSegmentSizes = new ArrayList<>();
// <=0 values for capacities
logAndSegmentSizes.add(new Pair<>(-1L, SEGMENT_CAPACITY));
logAndSegmentSizes.add(new Pair<>(LOG_CAPACITY, -1L));
logAndSegmentSizes.add(new Pair<>(0L, SEGMENT_CAPACITY));
logAndSegmentSizes.add(new Pair<>(LOG_CAPACITY, 0L));
// log capacity is not perfectly divisible by segment capacity
logAndSegmentSizes.add(new Pair<>(LOG_CAPACITY, LOG_CAPACITY - 1));
for (Pair<Long, Long> logAndSegmentSize : logAndSegmentSizes) {
try {
new Log(tempDir.getAbsolutePath(), logAndSegmentSize.getFirst(), StoreTestUtils.DEFAULT_DISK_SPACE_ALLOCATOR, createStoreConfig(logAndSegmentSize.getSecond(), setFilePermissionEnabled), metrics, null);
fail("Construction should have failed");
} catch (IllegalArgumentException e) {
// expected. Nothing to do.
}
}
// file which is not a directory
File file = create(LogSegmentName.generateFirstSegmentName(false).toFilename());
try {
new Log(file.getAbsolutePath(), 1, StoreTestUtils.DEFAULT_DISK_SPACE_ALLOCATOR, createStoreConfig(1, setFilePermissionEnabled), metrics, null);
fail("Construction should have failed");
} catch (StoreException e) {
// expected. Nothing to do.
}
// Store exception occurred during construction
DiskSpaceAllocator mockDiskAllocator = Mockito.mock(DiskSpaceAllocator.class);
doThrow(new IOException(StoreException.IO_ERROR_STR)).when(mockDiskAllocator).allocate(any(File.class), anyLong(), anyString(), anyBoolean());
List<LogSegment> segmentsToLoad = Collections.emptyList();
try {
new Log(tempDir.getAbsolutePath(), LOG_CAPACITY, mockDiskAllocator, createStoreConfig(SEGMENT_CAPACITY, setFilePermissionEnabled), metrics, true, segmentsToLoad, Collections.EMPTY_LIST.iterator(), null);
fail("Should have failed to when constructing log");
} catch (StoreException e) {
assertEquals("Mismatch in error code", StoreErrorCodes.IOError, e.getErrorCode());
}
}
use of com.github.ambry.utils.Pair in project ambry by linkedin.
the class LogTest method ensureCapacityFailureTest.
/**
* Test several failure scenarios where exception occurs when instantiating new log segment in ensureCapacity() method.
* The method should catch the exception, free log segment and restore the counter.
* @throws Exception
*/
@Test
public void ensureCapacityFailureTest() throws Exception {
int numSegments = (int) (LOG_CAPACITY / SEGMENT_CAPACITY);
LogSegment segment = getLogSegment(LogSegmentName.fromPositionAndGeneration(0, 0), SEGMENT_CAPACITY, true);
LogSegmentName lastName = LogSegmentName.fromPositionAndGeneration(0, 0);
List<Pair<LogSegmentName, String>> segmentNameAndFileNamesDesired = new ArrayList<>();
for (int i = 0; i < numSegments; i++) {
lastName = lastName.getNextPositionName();
String fileName = lastName.toFilename();
segmentNameAndFileNamesDesired.add(new Pair<>(lastName, fileName));
}
File file = new File("1_0_log");
File mockFile = Mockito.spy(file);
when(mockFile.exists()).thenReturn(false);
DiskSpaceAllocator mockDiskAllocator = Mockito.spy(StoreTestUtils.DEFAULT_DISK_SPACE_ALLOCATOR);
Log log = new Log(tempDir.getAbsolutePath(), LOG_CAPACITY, mockDiskAllocator, createStoreConfig(SEGMENT_CAPACITY, setFilePermissionEnabled), metrics, true, Collections.singletonList(segment), segmentNameAndFileNamesDesired.iterator(), null);
Log mockLog = Mockito.spy(log);
when(mockLog.allocate(anyString(), anyLong(), anyBoolean())).thenReturn(mockFile);
long initialUnallocatedSegments = mockLog.getRemainingUnallocatedSegments();
// write enough so that all segments are allocated
ByteBuffer buffer = ByteBuffer.allocate((int) (segment.getCapacityInBytes() - segment.getStartOffset()));
// write first segment
buffer.rewind();
CHANNEL_APPENDER.append(mockLog, buffer);
// Test 1: try to write second segment triggering roll over and calls ensureCapacity()
try {
buffer.rewind();
CHANNEL_APPENDER.append(mockLog, buffer);
fail("Should fail because log segment instantiation encounters FileNotFound exception");
} catch (StoreException e) {
assertEquals("Mismatch in store error code", StoreErrorCodes.Unknown_Error, e.getErrorCode());
assertEquals("The number of unallocated segments should decrease because exception occurred when freeing the segment", initialUnallocatedSegments - 1, mockLog.getRemainingUnallocatedSegments());
}
// Test 2: the segment with exception is freed successfully and remainingUnallocatedSegments counter is restored.
doAnswer((Answer) invocation -> null).when(mockDiskAllocator).free(any(File.class), any(Long.class), anyString(), anyBoolean());
try {
buffer.rewind();
CHANNEL_APPENDER.append(mockLog, buffer);
fail("Should fail because log segment instantiation encounters FileNotFound exception");
} catch (StoreException e) {
assertEquals("Mismatch in store error code", StoreErrorCodes.File_Not_Found, e.getErrorCode());
// Note that the number should be initialUnallocatedSegments - 1 because in previous test the segment with exception
// didn't get freed due to exception in diskSpaceAllocator.free().
assertEquals("The number of unallocated segments should stay unchanged because exception segment is successfully freed", initialUnallocatedSegments - 1, mockLog.getRemainingUnallocatedSegments());
}
}
use of com.github.ambry.utils.Pair in project ambry by linkedin.
the class AzureCompactionTool method main.
public static void main(String[] args) throws Exception {
OptionParser parser = new OptionParser();
ArgumentAcceptingOptionSpec<String> propsFileOpt = parser.accepts(PROPS_FILE, "Properties file path").withRequiredArg().describedAs(PROPS_FILE).ofType(String.class);
String commandName = AzureCompactionTool.class.getSimpleName();
parser.accepts(PURGE_OPTION, "Flag to purge dead blobs from the partition");
parser.nonOptions("The partitions to compact").ofType(String.class);
OptionSet optionSet = parser.parse(args);
String propsFilePath = optionSet.valueOf(propsFileOpt);
if (propsFilePath == null) {
printHelpAndExit(parser);
}
Properties properties = Utils.loadProps(propsFilePath);
ToolUtils.addClusterMapProperties(properties);
VerifiableProperties verifiableProperties = new VerifiableProperties(properties);
// User needs to specify this option to actually delete blobs
boolean testMode = !optionSet.has(PURGE_OPTION);
List<String> partitions = (List<String>) optionSet.nonOptionArguments();
if (!testMode && partitions.isEmpty()) {
printHelpAndExit(parser);
}
Set<PartitionId> partitionIdSet = partitions.stream().map(path -> new PartitionPathId(path)).collect(Collectors.toSet());
AzureCloudDestination azureDest = null;
try {
azureDest = (AzureCloudDestination) new AzureCloudDestinationFactory(verifiableProperties, new MetricRegistry(), null).getCloudDestination();
CloudConfig cloudConfig = new CloudConfig(verifiableProperties);
CloudStorageCompactor compactor = new CloudStorageCompactor(azureDest, cloudConfig, partitionIdSet, new VcrMetrics(new MetricRegistry()));
// Attempt clean shutdown if someone Ctrl-C's us.
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
logger.info("Received shutdown signal. Shutting down compactor.");
compactor.shutdown();
}));
if (testMode) {
DateTimeFormatter dateTimeFormatter = DateTimeFormatter.ISO_OFFSET_DATE_TIME.withZone(ZoneId.systemDefault());
List<Pair<String, Long>> progressList = azureDest.getAzureStorageCompactor().getAllCompactionProgress();
progressList.forEach(pair -> {
String progress = dateTimeFormatter.format(Instant.ofEpochMilli(pair.getSecond()));
// TODO: write to user specified output file
System.out.println(pair.getFirst() + "\t" + progress);
});
} else {
compactor.compactPartitions();
}
System.exit(0);
} catch (Exception ex) {
logger.error("Command {} failed", commandName, ex);
System.exit(1);
} finally {
if (azureDest != null) {
azureDest.close();
}
}
}
use of com.github.ambry.utils.Pair in project ambry by linkedin.
the class ServerAdminTool method getBlobProperties.
/**
* Gets {@link BlobProperties} for {@code blobId}.
* @param dataNodeId the {@link DataNodeId} to contact.
* @param blobId the {@link BlobId} to operate on.
* @param getOption the {@link GetOption} to send with the {@link GetRequest}.
* @param clusterMap the {@link ClusterMap} to use.
* @return the {@link ServerErrorCode} and {@link BlobProperties} of {@code blobId}.
* @throws Exception
*/
public Pair<ServerErrorCode, BlobProperties> getBlobProperties(DataNodeId dataNodeId, BlobId blobId, GetOption getOption, ClusterMap clusterMap) throws Exception {
Pair<ServerErrorCode, InputStream> response = getGetResponse(dataNodeId, blobId, MessageFormatFlags.BlobProperties, getOption, clusterMap);
InputStream stream = response.getSecond();
BlobProperties blobProperties = stream != null ? MessageFormatRecord.deserializeBlobProperties(stream) : null;
return new Pair<>(response.getFirst(), blobProperties);
}
Aggregations