Search in sources :

Example 36 with InMemAccountService

use of com.github.ambry.account.InMemAccountService in project ambry by linkedin.

the class PutOperationTest method testSlippedPutsWithServerErrors.

/**
 * Test PUT operation that handles ServerErrorCode = Temporarily_Disabled and Replica_Unavailable
 * @throws Exception
 */
@Test
public void testSlippedPutsWithServerErrors() throws Exception {
    Properties properties = new Properties();
    properties.setProperty("router.hostname", "localhost");
    properties.setProperty("router.datacenter.name", "DC1");
    properties.setProperty("router.max.put.chunk.size.bytes", Integer.toString(chunkSize));
    properties.setProperty("router.put.request.parallelism", Integer.toString(requestParallelism));
    // Expect at least two successes so that you can create slipped puts.
    properties.setProperty("router.put.success.target", Integer.toString(2));
    VerifiableProperties vProps = new VerifiableProperties(properties);
    RouterConfig routerConfig = new RouterConfig(vProps);
    int numChunks = 1;
    BlobProperties blobProperties = new BlobProperties(-1, "serviceId", "memberId", "contentType", false, Utils.Infinite_Time, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), false, null, null, null);
    byte[] userMetadata = new byte[10];
    byte[] content = new byte[chunkSize * numChunks];
    random.nextBytes(content);
    ReadableStreamChannel channel = new ByteBufferReadableStreamChannel(ByteBuffer.wrap(content));
    MockNetworkClient mockNetworkClient = new MockNetworkClient();
    PutOperation op = PutOperation.forUpload(routerConfig, routerMetrics, mockClusterMap, new LoggingNotificationSystem(), new InMemAccountService(true, false), userMetadata, channel, PutBlobOptions.DEFAULT, new FutureResult<>(), null, new RouterCallback(mockNetworkClient, new ArrayList<>()), null, null, null, null, time, blobProperties, MockClusterMap.DEFAULT_PARTITION_CLASS, quotaChargeCallback);
    op.startOperation();
    List<RequestInfo> requestInfos = new ArrayList<>();
    requestRegistrationCallback.setRequestsToSend(requestInfos);
    // fill chunks would end up filling the maximum number of PutChunks.
    op.fillChunks();
    Assert.assertTrue("ReadyForPollCallback should have been invoked as chunks were fully filled", mockNetworkClient.getAndClearWokenUpStatus());
    // poll to populate request
    op.poll(requestRegistrationCallback);
    // Set up server errors such that put fails on 2 out 3 nodes, hence creating a slipped put on the succeeding node.
    // Second attempts on all node succeed.
    List<ServerErrorCode> serverErrorList = new ArrayList<>();
    // Success on the first host, slipped put
    serverErrorList.add(ServerErrorCode.No_Error);
    // Fail on the second host
    serverErrorList.add(ServerErrorCode.Unknown_Error);
    // Fail on the third host
    serverErrorList.add(ServerErrorCode.Unknown_Error);
    // Success on the second attempts on all hosts
    serverErrorList.add(ServerErrorCode.No_Error);
    serverErrorList.add(ServerErrorCode.No_Error);
    serverErrorList.add(ServerErrorCode.No_Error);
    mockServer.setServerErrors(serverErrorList);
    // Send all requests.
    for (int i = 0; i < requestInfos.size(); i++) {
        ResponseInfo responseInfo = getResponseInfo(requestInfos.get(i));
        PutResponse putResponse = responseInfo.getError() == null ? PutResponse.readFrom(new NettyByteBufDataInputStream(responseInfo.content())) : null;
        op.handleResponse(responseInfo, putResponse);
        requestInfos.get(i).getRequest().release();
        responseInfo.release();
    }
    Assert.assertEquals("Number of slipped puts should be 1", 1, op.getSlippedPutBlobIds().size());
    // fill chunks again.
    op.fillChunks();
    requestInfos.clear();
    // poll to populate request
    op.poll(requestRegistrationCallback);
    // Send all requests again.
    for (int i = 0; i < requestInfos.size(); i++) {
        ResponseInfo responseInfo = getResponseInfo(requestInfos.get(i));
        PutResponse putResponse = responseInfo.getError() == null ? PutResponse.readFrom(new NettyByteBufDataInputStream(responseInfo.content())) : null;
        op.handleResponse(responseInfo, putResponse);
        requestInfos.get(i).getRequest().release();
        responseInfo.release();
    }
    Assert.assertEquals("Number of slipped puts should be 1", 1, op.getSlippedPutBlobIds().size());
    PutOperation.PutChunk putChunk = op.getPutChunks().get(0);
    // Make sure the chunk blob id which has been put successfully is not part of the slipped puts.
    Assert.assertFalse(op.getSlippedPutBlobIds().contains(putChunk.chunkBlobId));
}
Also used : ResponseInfo(com.github.ambry.network.ResponseInfo) NettyByteBufDataInputStream(com.github.ambry.utils.NettyByteBufDataInputStream) ByteBufferReadableStreamChannel(com.github.ambry.commons.ByteBufferReadableStreamChannel) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ArrayList(java.util.ArrayList) BlobProperties(com.github.ambry.messageformat.BlobProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) RequestInfo(com.github.ambry.network.RequestInfo) PutResponse(com.github.ambry.protocol.PutResponse) RouterConfig(com.github.ambry.config.RouterConfig) ServerErrorCode(com.github.ambry.server.ServerErrorCode) InMemAccountService(com.github.ambry.account.InMemAccountService) ByteBufferReadableStreamChannel(com.github.ambry.commons.ByteBufferReadableStreamChannel) LoggingNotificationSystem(com.github.ambry.commons.LoggingNotificationSystem) BlobProperties(com.github.ambry.messageformat.BlobProperties) Test(org.junit.Test)

Example 37 with InMemAccountService

use of com.github.ambry.account.InMemAccountService in project ambry by linkedin.

the class PutOperationTest method testHandleResponseWithServerErrors.

/**
 * Test PUT operation that handles ServerErrorCode = Temporarily_Disabled and Replica_Unavailable
 * @throws Exception
 */
@Test
public void testHandleResponseWithServerErrors() throws Exception {
    int numChunks = routerConfig.routerMaxInMemPutChunks + 1;
    BlobProperties blobProperties = new BlobProperties(-1, "serviceId", "memberId", "contentType", false, Utils.Infinite_Time, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), false, null, null, null);
    byte[] userMetadata = new byte[10];
    byte[] content = new byte[chunkSize * numChunks];
    random.nextBytes(content);
    ReadableStreamChannel channel = new ByteBufferReadableStreamChannel(ByteBuffer.wrap(content));
    PutOperation op = PutOperation.forUpload(routerConfig, routerMetrics, mockClusterMap, new LoggingNotificationSystem(), new InMemAccountService(true, false), userMetadata, channel, PutBlobOptions.DEFAULT, new FutureResult<>(), null, new RouterCallback(new MockNetworkClient(), new ArrayList<>()), null, null, null, null, time, blobProperties, MockClusterMap.DEFAULT_PARTITION_CLASS, quotaChargeCallback);
    op.startOperation();
    List<RequestInfo> requestInfos = new ArrayList<>();
    requestRegistrationCallback.setRequestsToSend(requestInfos);
    // fill chunks would end up filling the maximum number of PutChunks.
    op.fillChunks();
    // poll to populate request
    op.poll(requestRegistrationCallback);
    // make 1st request of first chunk encounter Temporarily_Disabled
    mockServer.setServerErrorForAllRequests(ServerErrorCode.Temporarily_Disabled);
    ResponseInfo responseInfo = getResponseInfo(requestInfos.get(0));
    PutResponse putResponse = responseInfo.getError() == null ? PutResponse.readFrom(new NettyByteBufDataInputStream(responseInfo.content())) : null;
    op.handleResponse(responseInfo, putResponse);
    responseInfo.release();
    PutOperation.PutChunk putChunk = op.getPutChunks().get(0);
    SimpleOperationTracker operationTracker = (SimpleOperationTracker) putChunk.getOperationTrackerInUse();
    Assert.assertEquals("Disabled count should be 1", 1, operationTracker.getDisabledCount());
    Assert.assertEquals("Disabled count should be 0", 0, operationTracker.getFailedCount());
    // make 2nd request of first chunk encounter Replica_Unavailable
    mockServer.setServerErrorForAllRequests(ServerErrorCode.Replica_Unavailable);
    responseInfo = getResponseInfo(requestInfos.get(1));
    putResponse = responseInfo.getError() == null ? PutResponse.readFrom(new NettyByteBufDataInputStream(responseInfo.content())) : null;
    op.handleResponse(responseInfo, putResponse);
    responseInfo.release();
    putChunk = op.getPutChunks().get(0);
    Assert.assertEquals("Failure count should be 1", 1, ((SimpleOperationTracker) putChunk.getOperationTrackerInUse()).getFailedCount());
    mockServer.resetServerErrors();
    // Release all the other requests
    requestInfos.forEach(info -> info.getRequest().release());
}
Also used : ResponseInfo(com.github.ambry.network.ResponseInfo) NettyByteBufDataInputStream(com.github.ambry.utils.NettyByteBufDataInputStream) ByteBufferReadableStreamChannel(com.github.ambry.commons.ByteBufferReadableStreamChannel) ArrayList(java.util.ArrayList) RequestInfo(com.github.ambry.network.RequestInfo) PutResponse(com.github.ambry.protocol.PutResponse) InMemAccountService(com.github.ambry.account.InMemAccountService) ByteBufferReadableStreamChannel(com.github.ambry.commons.ByteBufferReadableStreamChannel) LoggingNotificationSystem(com.github.ambry.commons.LoggingNotificationSystem) BlobProperties(com.github.ambry.messageformat.BlobProperties) Test(org.junit.Test)

Example 38 with InMemAccountService

use of com.github.ambry.account.InMemAccountService in project ambry by linkedin.

the class StatsManagerTest method testReplicaFromOfflineToDropped.

/**
 * Test Offline-To-Dropped transition (both failure and success cases)
 * @throws Exception
 */
@Test
public void testReplicaFromOfflineToDropped() throws Exception {
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
    ReplicationConfig replicationConfig = new ReplicationConfig(verifiableProperties);
    StoreConfig storeConfig = new StoreConfig(verifiableProperties);
    MockClusterMap clusterMap = new MockClusterMap();
    DataNodeId currentNode = clusterMap.getDataNodeIds().get(0);
    List<ReplicaId> localReplicas = clusterMap.getReplicaIds(currentNode);
    StorageManager storageManager = new StorageManager(storeConfig, new DiskManagerConfig(verifiableProperties), Utils.newScheduler(1, true), new MetricRegistry(), null, clusterMap, currentNode, null, Collections.singletonList(clusterParticipant), new MockTime(), null, new InMemAccountService(false, false));
    storageManager.start();
    MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
    storeKeyConverterFactory.setConversionMap(new HashMap<>());
    MockReplicationManager mockReplicationManager = new MockReplicationManager(replicationConfig, clusterMapConfig, storeConfig, storageManager, clusterMap, currentNode, storeKeyConverterFactory, clusterParticipant);
    MockStatsManager mockStatsManager = new MockStatsManager(storageManager, localReplicas, new MetricRegistry(), statsManagerConfig, clusterParticipant);
    // 1. attempt to remove replica while store is still running (remove store failure case)
    ReplicaId replicaToDrop = localReplicas.get(0);
    try {
        clusterParticipant.onPartitionBecomeDroppedFromOffline(replicaToDrop.getPartitionId().toPathString());
        fail("should fail because store is still running");
    } catch (StateTransitionException e) {
        assertEquals("Error code doesn't match", ReplicaOperationFailure, e.getErrorCode());
    }
    // 2. shutdown the store but introduce file deletion failure (put a invalid dir in store dir)
    storageManager.shutdownBlobStore(replicaToDrop.getPartitionId());
    File invalidDir = new File(replicaToDrop.getReplicaPath(), "invalidDir");
    invalidDir.deleteOnExit();
    assertTrue("Couldn't create dir within store dir", invalidDir.mkdir());
    assertTrue("Could not make unreadable", invalidDir.setReadable(false));
    try {
        clusterParticipant.onPartitionBecomeDroppedFromOffline(replicaToDrop.getPartitionId().toPathString());
        fail("should fail because store deletion fails");
    } catch (StateTransitionException e) {
        assertEquals("Error code doesn't match", ReplicaOperationFailure, e.getErrorCode());
    }
    // reset permission to allow deletion to succeed.
    assertTrue("Could not make readable", invalidDir.setReadable(true));
    assertTrue("Could not delete invalid dir", invalidDir.delete());
    // 3. success case (remove another replica because previous replica has been removed from in-mem data structures)
    ReplicaId replica = localReplicas.get(1);
    storageManager.shutdownBlobStore(replica.getPartitionId());
    MockHelixParticipant mockHelixParticipant = Mockito.spy(clusterParticipant);
    doNothing().when(mockHelixParticipant).setPartitionDisabledState(anyString(), anyBoolean());
    mockHelixParticipant.onPartitionBecomeDroppedFromOffline(replica.getPartitionId().toPathString());
    // verify that the replica is no longer present in StorageManager
    assertNull("Store of removed replica should not exist", storageManager.getStore(replica.getPartitionId(), true));
    // purposely remove the same replica in ReplicationManager again to verify it no longer exists
    assertFalse("Should return false because replica no longer exists", mockReplicationManager.removeReplica(replica));
    // purposely remove the same replica in StatsManager again to verify it no longer exists
    assertFalse("Should return false because replica no longer exists", mockStatsManager.removeReplica(replica));
    verify(mockHelixParticipant).setPartitionDisabledState(replica.getPartitionId().toPathString(), false);
    storageManager.shutdown();
    mockStatsManager.shutdown();
}
Also used : DiskManagerConfig(com.github.ambry.config.DiskManagerConfig) MockStoreKeyConverterFactory(com.github.ambry.store.MockStoreKeyConverterFactory) ReplicationConfig(com.github.ambry.config.ReplicationConfig) MetricRegistry(com.codahale.metrics.MetricRegistry) StorageManager(com.github.ambry.store.StorageManager) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) ReplicaId(com.github.ambry.clustermap.ReplicaId) MockReplicationManager(com.github.ambry.replication.MockReplicationManager) InMemAccountService(com.github.ambry.account.InMemAccountService) MockHelixParticipant(com.github.ambry.clustermap.MockHelixParticipant) StoreConfig(com.github.ambry.config.StoreConfig) DataNodeId(com.github.ambry.clustermap.DataNodeId) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) File(java.io.File) MockTime(com.github.ambry.utils.MockTime) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) StateTransitionException(com.github.ambry.clustermap.StateTransitionException) Test(org.junit.Test)

Example 39 with InMemAccountService

use of com.github.ambry.account.InMemAccountService in project ambry by linkedin.

the class BlobStoreTest method storeIoErrorCountTest.

/**
 * Tests that {@link BlobStore#onError()} and {@link BlobStore#onSuccess()} can correctly capture disk related I/O errors
 * and properly shutdown the store.
 * @throws StoreException
 */
@Test
public void storeIoErrorCountTest() throws StoreException, IOException {
    // setup testing environment
    store.shutdown();
    properties.put("store.io.error.count.to.trigger.shutdown", "2");
    MockId id1 = getUniqueId();
    MockId id2 = getUniqueId();
    MockId id3 = getUniqueId();
    MessageInfo corruptedInfo = new MessageInfo(getUniqueId(), PUT_RECORD_SIZE, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), Utils.Infinite_Time);
    MessageInfo info1 = new MessageInfo(id1, PUT_RECORD_SIZE, 3 * 24 * 60 * 60 * 1000, id1.getAccountId(), id1.getContainerId(), Utils.Infinite_Time);
    MessageInfo info2 = new MessageInfo(id2, PUT_RECORD_SIZE, id2.getAccountId(), id2.getContainerId(), Utils.Infinite_Time);
    MessageInfo info3 = new MessageInfo(id3, PUT_RECORD_SIZE, id3.getAccountId(), id3.getContainerId(), Utils.Infinite_Time);
    MessageWriteSet corruptedWriteSet = new MockMessageWriteSet(Collections.singletonList(corruptedInfo), Collections.singletonList(ByteBuffer.allocate(PUT_RECORD_SIZE)), new StoreException(StoreException.IO_ERROR_STR, StoreErrorCodes.IOError));
    MessageWriteSet validWriteSet1 = new MockMessageWriteSet(Collections.singletonList(info1), Collections.singletonList(ByteBuffer.allocate(PUT_RECORD_SIZE)), null);
    MessageWriteSet validWriteSet2 = new MockMessageWriteSet(Collections.singletonList(info2), Collections.singletonList(ByteBuffer.allocate(PUT_RECORD_SIZE)), null);
    MessageWriteSet validWriteSet3 = new MockMessageWriteSet(Collections.singletonList(info3), Collections.singletonList(ByteBuffer.allocate(PUT_RECORD_SIZE)), null);
    ReplicaStatusDelegate mockDelegate = mock(ReplicaStatusDelegate.class);
    // Test1: simulate StoreErrorCodes.IOError triggered by corrupted write set.
    // verify that store can capture disk I/O errors in Put/Delete/TtlUpdate methods and take proper actions.
    BlobStore testStore1 = createBlobStore(getMockReplicaId(tempDirStr), new StoreConfig(new VerifiableProperties(properties)), Collections.singletonList(mockDelegate));
    testStore1.start();
    assertTrue("Store should start successfully", testStore1.isStarted());
    // verify store can keep track of real I/O errors for Put operation and shutdown properly.
    try {
        testStore1.put(corruptedWriteSet);
        fail("should throw exception");
    } catch (StoreException e) {
        assertEquals("Mismatch in error code", StoreErrorCodes.IOError, e.getErrorCode());
    }
    assertTrue("Store should be up", testStore1.isStarted());
    // verify error count would be reset after successful Put operation
    testStore1.put(validWriteSet1);
    assertEquals("Error count should be reset", 0, testStore1.getErrorCount().get());
    // trigger a normal shutdown to persist data (otherwise following delete/ttl update operation will encounter ID_Not_Found error)
    testStore1.shutdown();
    // restart for subsequent tests
    testStore1.start();
    // verify consecutive two failed Puts would make store shutdown (storeIoErrorCountToTriggerShutdown = 2)
    for (int i = 0; i < 2; ++i) {
        try {
            testStore1.put(corruptedWriteSet);
        } catch (StoreException e) {
            assertEquals("Mismatch in error code", StoreErrorCodes.IOError, e.getErrorCode());
        }
    }
    assertFalse("Store should shutdown because error count exceeded threshold", testStore1.isStarted());
    testStore1.start();
    // verify store can keep track of real I/O errors for Delete and TtlUpdate operations and shutdown properly.
    assertEquals("Error count should be reset", 0, testStore1.getErrorCount().get());
    testStore1.shutdown();
    // Test2: Simulate StoreErrorCodes.IOError occurred in getStoreKey step even though WriteSet is valid
    // verify that store can capture disk I/O errors in GET method and take proper actions. Put/Delete/TtlUpdates are also tested.
    properties.put("store.index.max.number.of.inmem.elements", "1");
    properties.put("store.io.error.count.to.trigger.shutdown", "3");
    MetricRegistry registry = new MetricRegistry();
    StoreMetrics metrics = new StoreMetrics(registry);
    StoreKeyFactory mockStoreKeyFactory = Mockito.spy(STORE_KEY_FACTORY);
    BlobStore testStore2 = new BlobStore(getMockReplicaId(tempDirStr), new StoreConfig(new VerifiableProperties(properties)), scheduler, storeStatsScheduler, diskIOScheduler, diskSpaceAllocator, metrics, metrics, mockStoreKeyFactory, recovery, hardDelete, Collections.singletonList(mockDelegate), time, new InMemAccountService(false, false), null);
    testStore2.start();
    assertTrue("Store should start up", testStore2.isStarted());
    testStore2.put(validWriteSet2);
    testStore2.put(validWriteSet3);
    // shutdown and restart to make the segments be memory mapped (this is used to simulate IOException generated by mockStoreKeyFactory)
    testStore2.shutdown();
    testStore2.start();
    doThrow(new IOException(StoreException.IO_ERROR_STR)).when(mockStoreKeyFactory).getStoreKey(any(DataInputStream.class));
    // verify that store exceptions (caused by IOException and InternalError) could be captured by Get operation
    try {
        testStore2.get(Collections.singletonList(id2), EnumSet.noneOf(StoreGetOptions.class));
        fail("should throw exception");
    } catch (StoreException e) {
        assertEquals("Mismatch in error code", StoreErrorCodes.IOError, e.getErrorCode());
    }
    doThrow(new InternalError(StoreException.INTERNAL_ERROR_STR)).when(mockStoreKeyFactory).getStoreKey(any(DataInputStream.class));
    try {
        testStore2.get(Collections.singletonList(id2), EnumSet.noneOf(StoreGetOptions.class));
        fail("should throw exception");
    } catch (StoreException e) {
        assertEquals("Mismatch in error code", StoreErrorCodes.IOError, e.getErrorCode());
    }
    assertEquals("Mismatch in error count", 2, testStore2.getErrorCount().get());
    // test that when InternalError's error message is null, the error code should be Unknown_Error and store error count
    // stays unchanged.
    doThrow(new InternalError()).when(mockStoreKeyFactory).getStoreKey(any(DataInputStream.class));
    try {
        testStore2.get(Collections.singletonList(id2), EnumSet.noneOf(StoreGetOptions.class));
        fail("should throw exception");
    } catch (StoreException e) {
        assertEquals("Mismatch in error code", StoreErrorCodes.Unknown_Error, e.getErrorCode());
    }
    assertEquals("Mismatch in error count", 2, testStore2.getErrorCount().get());
    // verify that StoreException.Unknown_Error could be captured by Get and error count stays unchanged.
    doThrow(new IOException("Unknown exception")).when(mockStoreKeyFactory).getStoreKey(any(DataInputStream.class));
    try {
        testStore2.get(Collections.singletonList(id2), EnumSet.noneOf(StoreGetOptions.class));
        fail("should throw exception");
    } catch (StoreException e) {
        assertEquals("Mismatch in error code", StoreErrorCodes.Unknown_Error, e.getErrorCode());
    }
    doThrow(new InternalError("Unknown exception")).when(mockStoreKeyFactory).getStoreKey(any(DataInputStream.class));
    try {
        testStore2.get(Collections.singletonList(id2), EnumSet.noneOf(StoreGetOptions.class));
        fail("should throw exception");
    } catch (StoreException e) {
        assertEquals("Mismatch in error code", StoreErrorCodes.Unknown_Error, e.getErrorCode());
    }
    assertEquals("Mismatch in error count", 2, testStore2.getErrorCount().get());
    // verify error count would be reset after successful Get operation
    Mockito.reset(mockStoreKeyFactory);
    StoreInfo storeInfo = testStore2.get(Collections.singletonList(id2), EnumSet.noneOf(StoreGetOptions.class));
    assertNotNull(storeInfo);
    assertEquals("Error count should be reset", 0, testStore2.getErrorCount().get());
    doThrow(new IOException(StoreException.IO_ERROR_STR)).when(mockStoreKeyFactory).getStoreKey(any(DataInputStream.class));
    // call put method to trigger StoreException
    try {
        testStore2.put(validWriteSet1);
        fail("should throw exception");
    } catch (StoreException e) {
        assertEquals("Mismatch in error code", StoreErrorCodes.IOError, e.getErrorCode());
    }
    // call TtlUpdate method to trigger StoreException
    MessageInfo ttlUpdateInfo = new MessageInfo(id2, TTL_UPDATE_RECORD_SIZE, false, true, Utils.Infinite_Time, id2.getAccountId(), id2.getContainerId(), time.milliseconds());
    try {
        testStore2.updateTtl(Collections.singletonList(ttlUpdateInfo));
        fail("should throw exception");
    } catch (StoreException e) {
        assertEquals("Mismatch in error code", StoreErrorCodes.IOError, e.getErrorCode());
    }
    // call delete method to trigger StoreException
    MessageInfo deleteInfo = new MessageInfo(id2, DELETE_RECORD_SIZE, id2.getAccountId(), id2.getContainerId(), time.milliseconds());
    try {
        testStore2.delete(Collections.singletonList(deleteInfo));
        fail("should throw exception");
    } catch (StoreException e) {
        assertEquals("Mismatch in error code", StoreErrorCodes.IOError, e.getErrorCode());
    }
    // verify error count keeps track of StoreException and shut down store properly
    assertEquals("Mismatch in triggered shutdown counter", 1, metrics.storeIoErrorTriggeredShutdownCount.getCount());
    assertFalse("Store should shutdown because error count exceeded threshold", testStore2.isStarted());
    reloadStore();
}
Also used : VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) IOException(java.io.IOException) DataInputStream(java.io.DataInputStream) ReplicaStatusDelegate(com.github.ambry.clustermap.ReplicaStatusDelegate) InMemAccountService(com.github.ambry.account.InMemAccountService) StoreConfig(com.github.ambry.config.StoreConfig) Test(org.junit.Test)

Example 40 with InMemAccountService

use of com.github.ambry.account.InMemAccountService in project ambry by linkedin.

the class BlobStoreTest method deleteStoreFilesTest.

/**
 * Test both success and failure cases when deleting store files.
 * @throws Exception
 */
@Test
public void deleteStoreFilesTest() throws Exception {
    store.shutdown();
    // create test store directory
    File storeDir = StoreTestUtils.createTempDirectory("store-" + storeId);
    File reserveDir = StoreTestUtils.createTempDirectory("reserve-pool");
    reserveDir.deleteOnExit();
    DiskSpaceAllocator diskAllocator = new DiskSpaceAllocator(true, reserveDir, 0, new StorageManagerMetrics(new MetricRegistry()));
    StoreConfig config = new StoreConfig(new VerifiableProperties(properties));
    MetricRegistry registry = new MetricRegistry();
    StoreMetrics metrics = new StoreMetrics(registry);
    BlobStore testStore = new BlobStore(getMockReplicaId(storeDir.getAbsolutePath()), config, scheduler, storeStatsScheduler, diskIOScheduler, diskAllocator, metrics, metrics, STORE_KEY_FACTORY, recovery, hardDelete, null, time, new InMemAccountService(false, false), null);
    testStore.start();
    DiskSpaceRequirements diskSpaceRequirements = testStore.getDiskSpaceRequirements();
    diskAllocator.initializePool(diskSpaceRequirements == null ? Collections.emptyList() : Collections.singletonList(testStore.getDiskSpaceRequirements()));
    // ensure store directory and file exist
    assertTrue("Store directory doesn't exist", storeDir.exists());
    File storeSegmentDir = new File(reserveDir, DiskSpaceAllocator.STORE_DIR_PREFIX + storeId);
    if (isLogSegmented) {
        assertTrue("Store segment directory doesn't exist", storeSegmentDir.exists());
        assertTrue("In-mem store file map should contain entry associated with test store", diskAllocator.getStoreReserveFileMap().containsKey(storeId));
    }
    // test that deletion on started store should fail
    try {
        testStore.deleteStoreFiles();
    } catch (IllegalStateException e) {
    // expected
    }
    // create a unreadable dir in store dir to induce deletion failure
    File invalidDir = new File(storeDir, "invalidDir");
    assertTrue("Couldn't create dir within store dir", invalidDir.mkdir());
    assertTrue("Could not make unreadable", invalidDir.setReadable(false));
    testStore.shutdown();
    try {
        testStore.deleteStoreFiles();
        fail("should fail because one invalid dir is unreadable");
    } catch (Exception e) {
    // expected
    }
    assertTrue("store directory should exist because deletion failed", storeDir.exists());
    // reset permission to allow deletion to succeed.
    assertTrue("Could not make readable", invalidDir.setReadable(true));
    // put a swap segment into store dir
    File tempFile = File.createTempFile("sample-swap", LogSegmentName.SUFFIX + BlobStoreCompactor.TEMP_LOG_SEGMENT_NAME_SUFFIX, storeDir);
    // test success case (swap segment is returned and store dir is correctly deleted)
    assertEquals("Swap reserve dir should be empty initially", 0, diskAllocator.getSwapReserveFileMap().getFileSizeSet().size());
    testStore.deleteStoreFiles();
    assertFalse("swap segment still exists", tempFile.exists());
    assertEquals("Swap reserve dir should have one swap segment", 1, diskAllocator.getSwapReserveFileMap().getFileSizeSet().size());
    assertFalse("store directory shouldn't exist", storeDir.exists());
    assertFalse("store segment directory shouldn't exist", storeSegmentDir.exists());
    assertFalse("test store entry should have been removed from in-mem store file map ", diskAllocator.getStoreReserveFileMap().containsKey(storeId));
    reloadStore();
}
Also used : InMemAccountService(com.github.ambry.account.InMemAccountService) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) StoreConfig(com.github.ambry.config.StoreConfig) File(java.io.File) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) Test(org.junit.Test)

Aggregations

InMemAccountService (com.github.ambry.account.InMemAccountService)40 Test (org.junit.Test)30 VerifiableProperties (com.github.ambry.config.VerifiableProperties)24 ArrayList (java.util.ArrayList)16 Properties (java.util.Properties)16 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)14 LoggingNotificationSystem (com.github.ambry.commons.LoggingNotificationSystem)14 BlobProperties (com.github.ambry.messageformat.BlobProperties)14 PartitionId (com.github.ambry.clustermap.PartitionId)11 IOException (java.io.IOException)10 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)9 BlobId (com.github.ambry.commons.BlobId)9 MockTime (com.github.ambry.utils.MockTime)9 Map (java.util.Map)9 AccountService (com.github.ambry.account.AccountService)8 ByteBufferReadableStreamChannel (com.github.ambry.commons.ByteBufferReadableStreamChannel)8 RouterConfig (com.github.ambry.config.RouterConfig)8 StoreConfig (com.github.ambry.config.StoreConfig)8 MetricRegistry (com.codahale.metrics.MetricRegistry)7 MockDataNodeId (com.github.ambry.clustermap.MockDataNodeId)7