use of org.apache.samza.AzureException in project samza by apache.
the class TestAzureBlobOutputStream method testRespectFlushTimeout.
/**
* Test to ensure that flush timeout is respected even if the block upload to azure is stuck/ taking longer than flush timeout
* a countdown latch is used to mimic the upload to azure stuck
* if flush timeout is respected then an exception is thrown when the flushtimeout_ms duration expires
* else if timeout is not respected (aka bug is not fixed) then no exception is thrown and test hangs
* In this test, the flush timeout is chosen to be 10 milliseconds, at the end of which, an AzureException of upload failed is thrown.
* @throws Exception
* @throws InterruptedException
*/
@Test(expected = AzureException.class)
public void testRespectFlushTimeout() throws Exception, InterruptedException {
// get the threadpool to be the exactly the same as that passed down to AzureBlobOutputStream from AzureBlobSystemProducer
threadPool = new ThreadPoolExecutor(1, 1, 60, TimeUnit.SECONDS, new LinkedBlockingDeque<Runnable>(1), new ThreadPoolExecutor.CallerRunsPolicy());
// set a very small flushtimeout of 10ms to avoid test taking too long to complete
azureBlobOutputStream = spy(new AzureBlobOutputStream(mockBlobAsyncClient, threadPool, mockMetrics, blobMetadataGeneratorFactory, blobMetadataGeneratorConfig, FAKE_STREAM, 10, THRESHOLD, mockByteArrayOutputStream, mockCompression));
doNothing().when(azureBlobOutputStream).clearAndMarkClosed();
doReturn(mockBlobMetadataGenerator).when(azureBlobOutputStream).getBlobMetadataGenerator();
when(mockCompression.compress(BYTES)).thenReturn(COMPRESSED_BYTES, COMPRESSED_BYTES, COMPRESSED_BYTES, COMPRESSED_BYTES);
// create a latch to mimic uploads getting stuck
// and hence unable to honor flush timeout without the fix in stageblock
// fix in stageBlock = subscribeOn(Schedulers.boundedElastic()).block(flushtimeout)
CountDownLatch latch = new CountDownLatch(1);
doAnswer(invocation -> {
String blockid = invocation.getArgumentAt(0, String.class);
return Mono.just(1).map(integer -> {
try {
LOG.info("For block id = " + blockid + " start waiting on the countdown latch ");
// start indefinite stuck -> mimic upload stuck
latch.await();
// below log will never be reached
LOG.info("For block id = " + blockid + " done waiting on the countdown latch ");
} catch (Exception e) {
LOG.info("For block id = " + blockid + " an exception was caught " + e);
}
return "One";
});
}).when(azureBlobOutputStream).invokeBlobClientStageBlock(anyString(), anyObject(), anyInt());
doAnswer(invocation -> {
LOG.info("commit block ");
return null;
}).when(azureBlobOutputStream).commitBlob(anyObject(), anyMap());
azureBlobOutputStream.write(BYTES, 0, THRESHOLD / 2);
azureBlobOutputStream.write(BYTES, THRESHOLD / 2, THRESHOLD / 2);
azureBlobOutputStream.write(BYTES, 0, THRESHOLD / 2);
azureBlobOutputStream.write(BYTES, THRESHOLD / 2, THRESHOLD / 2);
azureBlobOutputStream.write(BYTES, 0, THRESHOLD / 2);
azureBlobOutputStream.write(BYTES, THRESHOLD / 2, THRESHOLD / 2);
// close will wait for all pending uploads to finish
// since the uploads are "stuck" (waiting for latch countdown), flushtimeout will get triggered
// and throw an exception saying upload failed.
azureBlobOutputStream.close();
}
use of org.apache.samza.AzureException in project samza by apache.
the class BlobUtils method getLiveProcessorList.
/**
* Reads the list of live processors published on the blob.
* @return String list of live processors.
* @throws AzureException If an Azure storage service error occurred.
* @throws SamzaException If data retrieved from blob could not be parsed by SamzaObjectMapper.
*/
public List<String> getLiveProcessorList() {
LOG.info("Read the the list of live processors from blob.");
byte[] data = new byte[(int) PROCESSOR_LIST_BLOCK_SIZE];
try {
blob.downloadRangeToByteArray(JOB_MODEL_BLOCK_SIZE + BARRIER_STATE_BLOCK_SIZE, PROCESSOR_LIST_BLOCK_SIZE, data, 0);
} catch (StorageException e) {
LOG.error("Failed to read the list of live processors from the blob.", new AzureException(e));
throw new AzureException(e);
}
try {
return SamzaObjectMapper.getObjectMapper().readValue(data, List.class);
} catch (IOException e) {
LOG.error("Failed to parse byte data: " + Arrays.toString(data) + " for live processor list retrieved from the blob", new SamzaException(e));
throw new SamzaException(e);
}
}
use of org.apache.samza.AzureException in project samza by apache.
the class TableUtils method addProcessorEntity.
/**
* Add a row which denotes an active processor to the processor table.
* @param jmVersion Job model version that the processor is operating on.
* @param pid Unique processor ID.
* @param isLeader Denotes whether the processor is a leader or not.
* @throws AzureException If an Azure storage service error occurred.
*/
public void addProcessorEntity(String jmVersion, String pid, boolean isLeader) {
ProcessorEntity entity = new ProcessorEntity(jmVersion, pid);
entity.setIsLeader(isLeader);
entity.updateLiveness();
TableOperation add = TableOperation.insert(entity);
try {
table.execute(add);
} catch (StorageException e) {
LOG.error("Azure storage exception while adding processor entity with job model version: " + jmVersion + "and pid: " + pid, e);
throw new AzureException(e);
}
}
use of org.apache.samza.AzureException in project samza by apache.
the class TableUtils method deleteProcessorEntity.
/**
* Deletes a specified row in the processor table.
*
* Note: Table service uses optimistic locking by default. In order to disable it, set the ETag on the ProcessorEntity
* to "*" before invoking this method.
*
* @param entity ProcessorEntity that has to be deleted
* @throws AzureException If an Azure storage service error occurred.
*/
public void deleteProcessorEntity(ProcessorEntity entity) {
try {
TableOperation remove = TableOperation.delete(entity);
table.execute(remove);
} catch (StorageException e) {
LOG.error("Azure storage exception while deleting processor entity with job model version: " + entity.getJobModelVersion() + "and pid: " + entity.getProcessorId(), e);
throw new AzureException(e);
}
}
use of org.apache.samza.AzureException in project samza by apache.
the class TableUtils method updateIsLeader.
/**
* Updates the isLeader value when the processor starts or stops being a leader.
* @param jmVersion Job model version of the processor row to be updated.
* @param pid Unique processor ID of the processor row to be updated.
* @param isLeader Denotes whether the processor is a leader or not.
* @throws AzureException If an Azure storage service error occurred.
*/
public void updateIsLeader(String jmVersion, String pid, boolean isLeader) {
try {
TableOperation retrieveEntity = TableOperation.retrieve(jmVersion, pid, ProcessorEntity.class);
ProcessorEntity entity = table.execute(retrieveEntity).getResultAsType();
entity.setIsLeader(isLeader);
TableOperation update = TableOperation.replace(entity);
table.execute(update);
} catch (StorageException e) {
LOG.error("Azure storage exception while updating isLeader value for job model version: " + jmVersion + "and pid: " + pid, e);
throw new AzureException(e);
}
}
Aggregations