use of org.junit.Assert.assertEquals in project flink by apache.
the class HiveTableSinkITCase method testBatchAppend.
@Test
public void testBatchAppend() throws Exception {
TableEnvironment tEnv = HiveTestUtils.createTableEnvInBatchMode(SqlDialect.HIVE);
tEnv.registerCatalog(hiveCatalog.getName(), hiveCatalog);
tEnv.useCatalog(hiveCatalog.getName());
tEnv.executeSql("create database db1");
tEnv.useDatabase("db1");
try {
tEnv.executeSql("create table append_table (i int, j int)");
tEnv.executeSql("insert into append_table select 1, 1").await();
tEnv.executeSql("insert into append_table select 2, 2").await();
List<Row> rows = CollectionUtil.iteratorToList(tEnv.executeSql("select * from append_table").collect());
rows.sort(Comparator.comparingInt(o -> (int) o.getField(0)));
Assert.assertEquals(Arrays.asList(Row.of(1, 1), Row.of(2, 2)), rows);
} finally {
tEnv.executeSql("drop database db1 cascade");
}
}
use of org.junit.Assert.assertEquals in project flink by apache.
the class StateAssignmentOperationTest method verifyAndCollectStateInfo.
// ------------------------------------------------------------------------
/**
* Verify that after repartition states, state of different modes works as expected and collect
* the information of state-name -> how many operator stat handles would be used for new
* sub-tasks to initialize in total.
*/
private void verifyAndCollectStateInfo(OperatorState operatorState, OperatorID operatorID, int oldParallelism, int newParallelism, Map<String, Integer> stateInfoCounts) {
final Map<OperatorInstanceID, List<OperatorStateHandle>> newManagedOperatorStates = new HashMap<>();
StateAssignmentOperation.reDistributePartitionableStates(Collections.singletonMap(operatorID, operatorState), newParallelism, OperatorSubtaskState::getManagedOperatorState, RoundRobinOperatorStateRepartitioner.INSTANCE, newManagedOperatorStates);
// Verify the repartitioned managed operator states per sub-task.
for (List<OperatorStateHandle> operatorStateHandles : newManagedOperatorStates.values()) {
final EnumMap<OperatorStateHandle.Mode, Map<String, Integer>> stateModeOffsets = new EnumMap<>(OperatorStateHandle.Mode.class);
for (OperatorStateHandle.Mode mode : OperatorStateHandle.Mode.values()) {
stateModeOffsets.put(mode, new HashMap<>());
}
for (OperatorStateHandle operatorStateHandle : operatorStateHandles) {
for (Map.Entry<String, OperatorStateHandle.StateMetaInfo> stateNameToMetaInfo : operatorStateHandle.getStateNameToPartitionOffsets().entrySet()) {
String stateName = stateNameToMetaInfo.getKey();
stateInfoCounts.merge(stateName, 1, (count, inc) -> count + inc);
OperatorStateHandle.StateMetaInfo stateMetaInfo = stateNameToMetaInfo.getValue();
stateModeOffsets.get(stateMetaInfo.getDistributionMode()).merge(stateName, stateMetaInfo.getOffsets().length, (count, inc) -> count + inc);
}
}
for (Map.Entry<OperatorStateHandle.Mode, Map<String, Integer>> modeMapEntry : stateModeOffsets.entrySet()) {
OperatorStateHandle.Mode mode = modeMapEntry.getKey();
Map<String, Integer> stateOffsets = modeMapEntry.getValue();
if (OperatorStateHandle.Mode.SPLIT_DISTRIBUTE.equals(mode)) {
if (oldParallelism < newParallelism) {
// SPLIT_DISTRIBUTE: when rescale up, split the state and re-distribute it
// -> each one will go to one task
stateOffsets.values().forEach(length -> Assert.assertEquals(1, (int) length));
} else {
// SPLIT_DISTRIBUTE: when rescale down to 1 or not rescale, not
// re-distribute them.
stateOffsets.values().forEach(length -> Assert.assertEquals(2, (int) length));
}
} else if (OperatorStateHandle.Mode.UNION.equals(mode)) {
// UNION: all to all
stateOffsets.values().forEach(length -> Assert.assertEquals(2, (int) length));
} else {
// BROADCAST: so all to all
stateOffsets.values().forEach(length -> Assert.assertEquals(3, (int) length));
}
}
}
}
use of org.junit.Assert.assertEquals in project flink by apache.
the class ReporterSetupTest method testSeveralReportersWithArgumentForwarding.
/**
* Verifies that multiple reporters can be configured with all their arguments being forwarded.
*/
@Test
public void testSeveralReportersWithArgumentForwarding() {
final Configuration config = new Configuration();
configureReporter1(config);
configureReporter2(config);
final List<ReporterSetup> reporterSetups = ReporterSetup.fromConfiguration(config, null);
Assert.assertEquals(2, reporterSetups.size());
final Optional<ReporterSetup> reporter1Config = reporterSetups.stream().filter(c -> "reporter1".equals(c.getName())).findFirst();
Assert.assertTrue(reporter1Config.isPresent());
assertReporter1Configured(reporter1Config.get());
final Optional<ReporterSetup> reporter2Config = reporterSetups.stream().filter(c -> "reporter2".equals(c.getName())).findFirst();
Assert.assertTrue(reporter2Config.isPresent());
assertReporter2Configured(reporter2Config.get());
}
use of org.junit.Assert.assertEquals in project flink by apache.
the class ZooKeeperUtilsTest method testStartCuratorFrameworkFailed.
@Test
public void testStartCuratorFrameworkFailed() throws Exception {
TestingFatalErrorHandler handler = new TestingFatalErrorHandler();
String errorMsg = "unexpected exception";
final CuratorFrameworkFactory.Builder curatorFrameworkBuilder = CuratorFrameworkFactory.builder().connectString("localhost:2181").retryPolicy(new ExponentialBackoffRetry(1, 1)).zookeeperFactory((s, i, watcher, b) -> {
throw new RuntimeException(errorMsg);
}).namespace("flink");
ZooKeeperUtils.startCuratorFramework(curatorFrameworkBuilder, handler);
Assert.assertEquals(errorMsg, handler.getErrorFuture().get().getMessage());
}
use of org.junit.Assert.assertEquals in project hbase by apache.
the class TestAsyncTableGetMultiThreaded method test.
@Test
public void test() throws Exception {
LOG.info("====== Test started ======");
int numThreads = 7;
AtomicBoolean stop = new AtomicBoolean(false);
ExecutorService executor = Executors.newFixedThreadPool(numThreads, new ThreadFactoryBuilder().setNameFormat("TestAsyncGet-pool-%d").setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build());
List<Future<?>> futures = new ArrayList<>();
IntStream.range(0, numThreads).forEach(i -> futures.add(executor.submit(() -> {
run(stop);
return null;
})));
LOG.info("====== Scheduled {} read threads ======", numThreads);
Collections.shuffle(Arrays.asList(SPLIT_KEYS), new Random(123));
Admin admin = TEST_UTIL.getAdmin();
for (byte[] splitPoint : SPLIT_KEYS) {
int oldRegionCount = admin.getRegions(TABLE_NAME).size();
LOG.info("====== Splitting at {} ======, region count before splitting is {}", Bytes.toStringBinary(splitPoint), oldRegionCount);
admin.split(TABLE_NAME, splitPoint);
TEST_UTIL.waitFor(30000, new ExplainingPredicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return TEST_UTIL.getMiniHBaseCluster().getRegions(TABLE_NAME).size() > oldRegionCount;
}
@Override
public String explainFailure() throws Exception {
return "Split has not finished yet";
}
});
List<HRegion> regions = TEST_UTIL.getMiniHBaseCluster().getRegions(TABLE_NAME);
LOG.info("====== Split at {} ======, region count after splitting is {}", Bytes.toStringBinary(splitPoint), regions.size());
for (HRegion region : regions) {
LOG.info("====== Compact {} ======", region.getRegionInfo());
region.compact(true);
}
for (HRegion region : regions) {
// Waiting for compaction to complete and references are cleaned up
LOG.info("====== Waiting for compaction on {} ======", region.getRegionInfo());
RetryCounter retrier = new RetryCounter(30, 1, TimeUnit.SECONDS);
for (; ; ) {
try {
if (admin.getCompactionStateForRegion(region.getRegionInfo().getRegionName()) == CompactionState.NONE) {
break;
}
} catch (IOException e) {
LOG.warn("Failed to query");
}
if (!retrier.shouldRetry()) {
throw new IOException("Can not finish compaction in time after attempt " + retrier.getAttemptTimes() + " times");
}
retrier.sleepUntilNextRetry();
}
LOG.info("====== Compaction on {} finished, close and archive compacted files ======", region.getRegionInfo());
region.getStores().get(0).closeAndArchiveCompactedFiles();
LOG.info("====== Close and archive compacted files on {} done ======", region.getRegionInfo());
}
Thread.sleep(5000);
LOG.info("====== Balancing cluster ======");
admin.balance(BalanceRequest.newBuilder().setIgnoreRegionsInTransition(true).build());
LOG.info("====== Balance cluster done ======");
Thread.sleep(5000);
ServerName metaServer = TEST_UTIL.getHBaseCluster().getServerHoldingMeta();
ServerName newMetaServer = TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream().map(t -> t.getRegionServer().getServerName()).filter(s -> !s.equals(metaServer)).findAny().get();
LOG.info("====== Moving meta from {} to {} ======", metaServer, newMetaServer);
admin.move(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(), newMetaServer);
LOG.info("====== Move meta done ======");
Thread.sleep(5000);
}
List<LogEntry> balancerDecisionRecords = admin.getLogEntries(null, "BALANCER_DECISION", ServerType.MASTER, 2, null);
Assert.assertEquals(balancerDecisionRecords.size(), 2);
LOG.info("====== Read test finished, shutdown thread pool ======");
stop.set(true);
executor.shutdown();
for (int i = 0; i < numThreads; i++) {
LOG.info("====== Waiting for {} threads to finish, remaining {} ======", numThreads, numThreads - i);
futures.get(i).get();
}
LOG.info("====== Test test finished ======");
}
Aggregations