Search in sources :

Example 56 with Pair

use of com.github.ambry.utils.Pair in project ambry by linkedin.

the class LogSegmentTest method getSegmentAndFileChannel.

/**
 * Create a {@link LogSegment} and a mock file channel associated with it for testing.
 * @param name the name of log segment file.
 * @return a pair that contains log segment and mock file channel
 * @throws Exception
 */
private Pair<LogSegment, FileChannel> getSegmentAndFileChannel(String name) throws Exception {
    File file = new File(tempDir, name);
    if (file.exists()) {
        assertTrue(file.getAbsolutePath() + " already exists and could not be deleted", file.delete());
    }
    assertTrue("Segment file could not be created at path " + file.getAbsolutePath(), file.createNewFile());
    file.deleteOnExit();
    FileChannel fileChannel = Utils.openChannel(file, true);
    FileChannel mockFileChannel = Mockito.spy(fileChannel);
    LogSegment segment = new LogSegment(file, STANDARD_SEGMENT_SIZE, config, metrics, mockFileChannel);
    return new Pair<>(segment, mockFileChannel);
}
Also used : FileChannel(java.nio.channels.FileChannel) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Pair(com.github.ambry.utils.Pair)

Example 57 with Pair

use of com.github.ambry.utils.Pair in project ambry by linkedin.

the class LogTest method constructionFailureTest.

/**
 * Tests cases where construction of {@link Log} failed either because of bad arguments or because of store exception.
 * @throws IOException
 */
@Test
public void constructionFailureTest() throws IOException, StoreException {
    List<Pair<Long, Long>> logAndSegmentSizes = new ArrayList<>();
    // <=0 values for capacities
    logAndSegmentSizes.add(new Pair<>(-1L, SEGMENT_CAPACITY));
    logAndSegmentSizes.add(new Pair<>(LOG_CAPACITY, -1L));
    logAndSegmentSizes.add(new Pair<>(0L, SEGMENT_CAPACITY));
    logAndSegmentSizes.add(new Pair<>(LOG_CAPACITY, 0L));
    // log capacity is not perfectly divisible by segment capacity
    logAndSegmentSizes.add(new Pair<>(LOG_CAPACITY, LOG_CAPACITY - 1));
    for (Pair<Long, Long> logAndSegmentSize : logAndSegmentSizes) {
        try {
            new Log(tempDir.getAbsolutePath(), logAndSegmentSize.getFirst(), StoreTestUtils.DEFAULT_DISK_SPACE_ALLOCATOR, createStoreConfig(logAndSegmentSize.getSecond(), setFilePermissionEnabled), metrics, null);
            fail("Construction should have failed");
        } catch (IllegalArgumentException e) {
        // expected. Nothing to do.
        }
    }
    // file which is not a directory
    File file = create(LogSegmentName.generateFirstSegmentName(false).toFilename());
    try {
        new Log(file.getAbsolutePath(), 1, StoreTestUtils.DEFAULT_DISK_SPACE_ALLOCATOR, createStoreConfig(1, setFilePermissionEnabled), metrics, null);
        fail("Construction should have failed");
    } catch (StoreException e) {
    // expected. Nothing to do.
    }
    // Store exception occurred during construction
    DiskSpaceAllocator mockDiskAllocator = Mockito.mock(DiskSpaceAllocator.class);
    doThrow(new IOException(StoreException.IO_ERROR_STR)).when(mockDiskAllocator).allocate(any(File.class), anyLong(), anyString(), anyBoolean());
    List<LogSegment> segmentsToLoad = Collections.emptyList();
    try {
        new Log(tempDir.getAbsolutePath(), LOG_CAPACITY, mockDiskAllocator, createStoreConfig(SEGMENT_CAPACITY, setFilePermissionEnabled), metrics, true, segmentsToLoad, Collections.EMPTY_LIST.iterator(), null);
        fail("Should have failed to when constructing log");
    } catch (StoreException e) {
        assertEquals("Mismatch in error code", StoreErrorCodes.IOError, e.getErrorCode());
    }
}
Also used : ArrayList(java.util.ArrayList) IOException(java.io.IOException) File(java.io.File) Pair(com.github.ambry.utils.Pair) Test(org.junit.Test)

Example 58 with Pair

use of com.github.ambry.utils.Pair in project ambry by linkedin.

the class LogTest method ensureCapacityFailureTest.

/**
 * Test several failure scenarios where exception occurs when instantiating new log segment in ensureCapacity() method.
 * The method should catch the exception, free log segment and restore the counter.
 * @throws Exception
 */
@Test
public void ensureCapacityFailureTest() throws Exception {
    int numSegments = (int) (LOG_CAPACITY / SEGMENT_CAPACITY);
    LogSegment segment = getLogSegment(LogSegmentName.fromPositionAndGeneration(0, 0), SEGMENT_CAPACITY, true);
    LogSegmentName lastName = LogSegmentName.fromPositionAndGeneration(0, 0);
    List<Pair<LogSegmentName, String>> segmentNameAndFileNamesDesired = new ArrayList<>();
    for (int i = 0; i < numSegments; i++) {
        lastName = lastName.getNextPositionName();
        String fileName = lastName.toFilename();
        segmentNameAndFileNamesDesired.add(new Pair<>(lastName, fileName));
    }
    File file = new File("1_0_log");
    File mockFile = Mockito.spy(file);
    when(mockFile.exists()).thenReturn(false);
    DiskSpaceAllocator mockDiskAllocator = Mockito.spy(StoreTestUtils.DEFAULT_DISK_SPACE_ALLOCATOR);
    Log log = new Log(tempDir.getAbsolutePath(), LOG_CAPACITY, mockDiskAllocator, createStoreConfig(SEGMENT_CAPACITY, setFilePermissionEnabled), metrics, true, Collections.singletonList(segment), segmentNameAndFileNamesDesired.iterator(), null);
    Log mockLog = Mockito.spy(log);
    when(mockLog.allocate(anyString(), anyLong(), anyBoolean())).thenReturn(mockFile);
    long initialUnallocatedSegments = mockLog.getRemainingUnallocatedSegments();
    // write enough so that all segments are allocated
    ByteBuffer buffer = ByteBuffer.allocate((int) (segment.getCapacityInBytes() - segment.getStartOffset()));
    // write first segment
    buffer.rewind();
    CHANNEL_APPENDER.append(mockLog, buffer);
    // Test 1: try to write second segment triggering roll over and calls ensureCapacity()
    try {
        buffer.rewind();
        CHANNEL_APPENDER.append(mockLog, buffer);
        fail("Should fail because log segment instantiation encounters FileNotFound exception");
    } catch (StoreException e) {
        assertEquals("Mismatch in store error code", StoreErrorCodes.Unknown_Error, e.getErrorCode());
        assertEquals("The number of unallocated segments should decrease because exception occurred when freeing the segment", initialUnallocatedSegments - 1, mockLog.getRemainingUnallocatedSegments());
    }
    // Test 2: the segment with exception is freed successfully and remainingUnallocatedSegments counter is restored.
    doAnswer((Answer) invocation -> null).when(mockDiskAllocator).free(any(File.class), any(Long.class), anyString(), anyBoolean());
    try {
        buffer.rewind();
        CHANNEL_APPENDER.append(mockLog, buffer);
        fail("Should fail because log segment instantiation encounters FileNotFound exception");
    } catch (StoreException e) {
        assertEquals("Mismatch in store error code", StoreErrorCodes.File_Not_Found, e.getErrorCode());
        // Note that the number should be initialUnallocatedSegments - 1 because in previous test the segment with exception
        // didn't get freed due to exception in diskSpaceAllocator.free().
        assertEquals("The number of unallocated segments should stay unchanged because exception segment is successfully freed", initialUnallocatedSegments - 1, mockLog.getRemainingUnallocatedSegments());
    }
}
Also used : Arrays(java.util.Arrays) ArgumentMatchers(org.mockito.ArgumentMatchers) RunWith(org.junit.runner.RunWith) ByteBuffer(java.nio.ByteBuffer) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Answer(org.mockito.stubbing.Answer) TestUtils(com.github.ambry.utils.TestUtils) After(org.junit.After) Parameterized(org.junit.runners.Parameterized) StoreConfig(com.github.ambry.config.StoreConfig) MetricRegistry(com.codahale.metrics.MetricRegistry) Pair(com.github.ambry.utils.Pair) Iterator(java.util.Iterator) Files(java.nio.file.Files) Channels(java.nio.channels.Channels) StoreTestUtils(com.github.ambry.store.StoreTestUtils) Set(java.util.Set) Utils(com.github.ambry.utils.Utils) IOException(java.io.IOException) Test(org.junit.Test) File(java.io.File) Mockito(org.mockito.Mockito) List(java.util.List) ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) Assert(org.junit.Assert) Collections(java.util.Collections) ArrayList(java.util.ArrayList) ByteBuffer(java.nio.ByteBuffer) File(java.io.File) Pair(com.github.ambry.utils.Pair) Test(org.junit.Test)

Example 59 with Pair

use of com.github.ambry.utils.Pair in project ambry by linkedin.

the class AzureCompactionTool method main.

public static void main(String[] args) throws Exception {
    OptionParser parser = new OptionParser();
    ArgumentAcceptingOptionSpec<String> propsFileOpt = parser.accepts(PROPS_FILE, "Properties file path").withRequiredArg().describedAs(PROPS_FILE).ofType(String.class);
    String commandName = AzureCompactionTool.class.getSimpleName();
    parser.accepts(PURGE_OPTION, "Flag to purge dead blobs from the partition");
    parser.nonOptions("The partitions to compact").ofType(String.class);
    OptionSet optionSet = parser.parse(args);
    String propsFilePath = optionSet.valueOf(propsFileOpt);
    if (propsFilePath == null) {
        printHelpAndExit(parser);
    }
    Properties properties = Utils.loadProps(propsFilePath);
    ToolUtils.addClusterMapProperties(properties);
    VerifiableProperties verifiableProperties = new VerifiableProperties(properties);
    // User needs to specify this option to actually delete blobs
    boolean testMode = !optionSet.has(PURGE_OPTION);
    List<String> partitions = (List<String>) optionSet.nonOptionArguments();
    if (!testMode && partitions.isEmpty()) {
        printHelpAndExit(parser);
    }
    Set<PartitionId> partitionIdSet = partitions.stream().map(path -> new PartitionPathId(path)).collect(Collectors.toSet());
    AzureCloudDestination azureDest = null;
    try {
        azureDest = (AzureCloudDestination) new AzureCloudDestinationFactory(verifiableProperties, new MetricRegistry(), null).getCloudDestination();
        CloudConfig cloudConfig = new CloudConfig(verifiableProperties);
        CloudStorageCompactor compactor = new CloudStorageCompactor(azureDest, cloudConfig, partitionIdSet, new VcrMetrics(new MetricRegistry()));
        // Attempt clean shutdown if someone Ctrl-C's us.
        Runtime.getRuntime().addShutdownHook(new Thread(() -> {
            logger.info("Received shutdown signal. Shutting down compactor.");
            compactor.shutdown();
        }));
        if (testMode) {
            DateTimeFormatter dateTimeFormatter = DateTimeFormatter.ISO_OFFSET_DATE_TIME.withZone(ZoneId.systemDefault());
            List<Pair<String, Long>> progressList = azureDest.getAzureStorageCompactor().getAllCompactionProgress();
            progressList.forEach(pair -> {
                String progress = dateTimeFormatter.format(Instant.ofEpochMilli(pair.getSecond()));
                // TODO: write to user specified output file
                System.out.println(pair.getFirst() + "\t" + progress);
            });
        } else {
            compactor.compactPartitions();
        }
        System.exit(0);
    } catch (Exception ex) {
        logger.error("Command {} failed", commandName, ex);
        System.exit(1);
    } finally {
        if (azureDest != null) {
            azureDest.close();
        }
    }
}
Also used : LoggerFactory(org.slf4j.LoggerFactory) VcrMetrics(com.github.ambry.cloud.VcrMetrics) CloudConfig(com.github.ambry.config.CloudConfig) JSONObject(org.json.JSONObject) Map(java.util.Map) OptionParser(joptsimple.OptionParser) OptionSet(joptsimple.OptionSet) ReplicaState(com.github.ambry.clustermap.ReplicaState) MetricRegistry(com.codahale.metrics.MetricRegistry) Properties(java.util.Properties) Logger(org.slf4j.Logger) Pair(com.github.ambry.utils.Pair) PartitionState(com.github.ambry.clustermap.PartitionState) VerifiableProperties(com.github.ambry.config.VerifiableProperties) Set(java.util.Set) Utils(com.github.ambry.utils.Utils) IOException(java.io.IOException) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) CloudStorageCompactor(com.github.ambry.cloud.CloudStorageCompactor) ZoneId(java.time.ZoneId) ArgumentAcceptingOptionSpec(joptsimple.ArgumentAcceptingOptionSpec) ToolUtils(com.github.ambry.tools.util.ToolUtils) List(java.util.List) ReplicaId(com.github.ambry.clustermap.ReplicaId) DateTimeFormatter(java.time.format.DateTimeFormatter) PartitionId(com.github.ambry.clustermap.PartitionId) VcrMetrics(com.github.ambry.cloud.VcrMetrics) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) CloudConfig(com.github.ambry.config.CloudConfig) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) PartitionId(com.github.ambry.clustermap.PartitionId) OptionParser(joptsimple.OptionParser) IOException(java.io.IOException) CloudStorageCompactor(com.github.ambry.cloud.CloudStorageCompactor) List(java.util.List) OptionSet(joptsimple.OptionSet) DateTimeFormatter(java.time.format.DateTimeFormatter) Pair(com.github.ambry.utils.Pair)

Example 60 with Pair

use of com.github.ambry.utils.Pair in project ambry by linkedin.

the class ServerAdminTool method getBlobProperties.

/**
 * Gets {@link BlobProperties} for {@code blobId}.
 * @param dataNodeId the {@link DataNodeId} to contact.
 * @param blobId the {@link BlobId} to operate on.
 * @param getOption the {@link GetOption} to send with the {@link GetRequest}.
 * @param clusterMap the {@link ClusterMap} to use.
 * @return the {@link ServerErrorCode} and {@link BlobProperties} of {@code blobId}.
 * @throws Exception
 */
public Pair<ServerErrorCode, BlobProperties> getBlobProperties(DataNodeId dataNodeId, BlobId blobId, GetOption getOption, ClusterMap clusterMap) throws Exception {
    Pair<ServerErrorCode, InputStream> response = getGetResponse(dataNodeId, blobId, MessageFormatFlags.BlobProperties, getOption, clusterMap);
    InputStream stream = response.getSecond();
    BlobProperties blobProperties = stream != null ? MessageFormatRecord.deserializeBlobProperties(stream) : null;
    return new Pair<>(response.getFirst(), blobProperties);
}
Also used : DataInputStream(java.io.DataInputStream) NettyByteBufDataInputStream(com.github.ambry.utils.NettyByteBufDataInputStream) InputStream(java.io.InputStream) BlobProperties(com.github.ambry.messageformat.BlobProperties) ServerErrorCode(com.github.ambry.server.ServerErrorCode) Pair(com.github.ambry.utils.Pair)

Aggregations

Pair (com.github.ambry.utils.Pair)64 ArrayList (java.util.ArrayList)29 HashMap (java.util.HashMap)28 Map (java.util.Map)28 Test (org.junit.Test)20 IOException (java.io.IOException)15 MetricRegistry (com.codahale.metrics.MetricRegistry)14 List (java.util.List)14 ByteBuffer (java.nio.ByteBuffer)13 Collections (java.util.Collections)13 File (java.io.File)12 Assert (org.junit.Assert)12 VerifiableProperties (com.github.ambry.config.VerifiableProperties)11 Utils (com.github.ambry.utils.Utils)10 HashSet (java.util.HashSet)10 Properties (java.util.Properties)10 Container (com.github.ambry.account.Container)9 TestUtils (com.github.ambry.utils.TestUtils)9 Arrays (java.util.Arrays)9 Set (java.util.Set)9