focal_method stringlengths 13 60.9k | test_case stringlengths 25 109k |
|---|---|
public static Regression<double[]> fit(double[][] x, double[] y, double eps, double C, double tol) {
smile.base.svm.SVR<double[]> svr = new smile.base.svm.SVR<>(new LinearKernel(), eps, C, tol);
KernelMachine<double[]> svm = svr.fit(x, y);
return new Regression<>() {
final LinearKernelMachine model = LinearKernelMachine.of(svm);
@Override
public double predict(double[] x) {
return model.f(x);
}
};
} | @Test
public void tesProstate() {
System.out.println("Prostate");
GaussianKernel kernel = new GaussianKernel(6.0);
RegressionValidation<Regression<double[]>> result = RegressionValidation.of(Prostate.x, Prostate.y,
Prostate.testx, Prostate.testy, (x, y) -> SVM.fit(x, y, kernel, 0.5, 5, 1E-3));
System.out.println(result);
assertEquals(0.9112183360712871, result.metrics.rmse, 1E-4);
} |
public Map<String, List<ColumnMeta>> loadColumnMetas(List<String> tables) throws DataXException {
Map<String, List<ColumnMeta>> ret = new HashMap<>();
for (String table : tables) {
List<ColumnMeta> columnMetaList = new ArrayList<>();
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("describe " + table);
for (int i = 0; rs.next(); i++) {
ColumnMeta columnMeta = buildColumnMeta(rs, i == 0);
columnMetaList.add(columnMeta);
}
} catch (SQLException e) {
throw DataXException.asDataXException(TDengineWriterErrorCode.RUNTIME_EXCEPTION, e.getMessage());
}
if (columnMetaList.isEmpty()) {
LOG.error("column metadata of " + table + " is empty!");
continue;
}
columnMetaList.stream().filter(colMeta -> colMeta.isTag).forEach(colMeta -> {
String sql = "select " + colMeta.field + " from " + table;
Object value = null;
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery(sql);
for (int i = 0; rs.next(); i++) {
value = rs.getObject(colMeta.field);
if (i > 0) {
value = null;
break;
}
}
} catch (SQLException e) {
e.printStackTrace();
}
colMeta.value = value;
});
LOG.debug("load column metadata of " + table + ": " + Arrays.toString(columnMetaList.toArray()));
ret.put(table, columnMetaList);
}
return ret;
} | @Test
public void loadColumnMetas() {
// given
SchemaManager schemaManager = new SchemaManager(conn);
List<String> tables = Arrays.asList("stb1", "stb2", "tb1", "tb3", "weather");
// when
Map<String, List<ColumnMeta>> columnMetaMap = schemaManager.loadColumnMetas(tables);
// then
List<ColumnMeta> stb1 = columnMetaMap.get("stb1");
Assert.assertEquals(4, stb1.size());
} |
@Override
public Map<String, Metric> getMetrics() {
final Map<String, Metric> gauges = new HashMap<>();
gauges.put("total.init", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getInit() +
mxBean.getNonHeapMemoryUsage().getInit());
gauges.put("total.used", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getUsed() +
mxBean.getNonHeapMemoryUsage().getUsed());
gauges.put("total.max", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getMax() == -1 ?
-1 : mxBean.getHeapMemoryUsage().getMax() + mxBean.getNonHeapMemoryUsage().getMax());
gauges.put("total.committed", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getCommitted() +
mxBean.getNonHeapMemoryUsage().getCommitted());
gauges.put("heap.init", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getInit());
gauges.put("heap.used", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getUsed());
gauges.put("heap.max", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getMax());
gauges.put("heap.committed", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getCommitted());
gauges.put("heap.usage", new RatioGauge() {
@Override
protected Ratio getRatio() {
final MemoryUsage usage = mxBean.getHeapMemoryUsage();
return Ratio.of(usage.getUsed(), usage.getMax());
}
});
gauges.put("non-heap.init", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getInit());
gauges.put("non-heap.used", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getUsed());
gauges.put("non-heap.max", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getMax());
gauges.put("non-heap.committed", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getCommitted());
gauges.put("non-heap.usage", new RatioGauge() {
@Override
protected Ratio getRatio() {
final MemoryUsage usage = mxBean.getNonHeapMemoryUsage();
return Ratio.of(usage.getUsed(), usage.getMax() == -1 ? usage.getCommitted() : usage.getMax());
}
});
for (final MemoryPoolMXBean pool : memoryPools) {
final String poolName = name("pools", WHITESPACE.matcher(pool.getName()).replaceAll("-"));
gauges.put(name(poolName, "usage"), new RatioGauge() {
@Override
protected Ratio getRatio() {
MemoryUsage usage = pool.getUsage();
return Ratio.of(usage.getUsed(),
usage.getMax() == -1 ? usage.getCommitted() : usage.getMax());
}
});
gauges.put(name(poolName, "max"), (Gauge<Long>) () -> pool.getUsage().getMax());
gauges.put(name(poolName, "used"), (Gauge<Long>) () -> pool.getUsage().getUsed());
gauges.put(name(poolName, "committed"), (Gauge<Long>) () -> pool.getUsage().getCommitted());
// Only register GC usage metrics if the memory pool supports usage statistics.
if (pool.getCollectionUsage() != null) {
gauges.put(name(poolName, "used-after-gc"), (Gauge<Long>) () ->
pool.getCollectionUsage().getUsed());
}
gauges.put(name(poolName, "init"), (Gauge<Long>) () -> pool.getUsage().getInit());
}
return Collections.unmodifiableMap(gauges);
} | @Test
public void hasAGaugeForHeapInit() {
final Gauge gauge = (Gauge) gauges.getMetrics().get("heap.init");
assertThat(gauge.getValue())
.isEqualTo(20L);
} |
public static Matrix.EVD eigen(IMatrix A, int k) {
return eigen(A, k, 1.0E-8, 10 * A.nrow());
} | @Test
public void testLanczos() {
System.out.println("eigen");
Matrix a = Matrix.of(A);
a.uplo(UPLO.LOWER);
Matrix.EVD result = Lanczos.eigen(a, 3);
assertTrue(MathEx.equals(eigenValues, result.wr, 1E-7));
assertEquals(eigenVectors.length, result.Vr.nrow());
assertEquals(eigenVectors[0].length, result.Vr.ncol());
for (int i = 0; i < eigenVectors.length; i++) {
for (int j = 0; j < eigenVectors[i].length; j++) {
assertEquals(Math.abs(eigenVectors[i][j]), Math.abs(result.Vr.get(i, j)), 1E-7);
}
}
} |
@Override
public void execute(final ConnectionSession connectionSession) throws SQLException {
Map<String, String> sessionVariables = extractSessionVariables();
validateSessionVariables(sessionVariables.keySet());
new CharsetSetExecutor(databaseType, connectionSession).set(sessionVariables);
new SessionVariableRecordExecutor(databaseType, connectionSession).recordVariable(sessionVariables);
executeSetGlobalVariablesIfPresent(connectionSession);
} | @Test
void assertSetUnknownSystemVariable() {
VariableAssignSegment unknownVariableAssignSegment = new VariableAssignSegment();
unknownVariableAssignSegment.setVariable(new VariableSegment(0, 0, "unknown_variable"));
unknownVariableAssignSegment.setAssignValue("");
SetStatement setStatement = new MySQLSetStatement();
setStatement.getVariableAssigns().add(unknownVariableAssignSegment);
MySQLSetVariableAdminExecutor executor = new MySQLSetVariableAdminExecutor(setStatement);
assertThrows(UnknownSystemVariableException.class, () -> executor.execute(mock(ConnectionSession.class)));
} |
public static <IN> CompletableFuture<Void> thenAcceptAsyncIfNotDone(
CompletableFuture<IN> completableFuture,
Executor executor,
Consumer<? super IN> consumer) {
return completableFuture.isDone()
? completableFuture.thenAccept(consumer)
: completableFuture.thenAcceptAsync(consumer, executor);
} | @Test
void testThenAcceptAsyncIfNotDone() {
testFutureContinuation(
(CompletableFuture<?> future, Executor executor) ->
FutureUtils.thenAcceptAsyncIfNotDone(future, executor, o -> {}));
} |
private static void properties2SystemEnv(Properties properties) {
if (properties == null) {
return;
}
String rmqAddressServerDomain = properties.getProperty("rmqAddressServerDomain", MixAll.WS_DOMAIN_NAME);
String rmqAddressServerSubGroup = properties.getProperty("rmqAddressServerSubGroup", MixAll.WS_DOMAIN_SUBGROUP);
System.setProperty("rocketmq.namesrv.domain", rmqAddressServerDomain);
System.setProperty("rocketmq.namesrv.domain.subgroup", rmqAddressServerSubGroup);
} | @Test
public void testProperties2SystemEnv() throws NoSuchMethodException, InvocationTargetException,
IllegalAccessException {
Properties properties = new Properties();
Class<BrokerStartup> clazz = BrokerStartup.class;
Method method = clazz.getDeclaredMethod("properties2SystemEnv", Properties.class);
method.setAccessible(true);
{
properties.put("rmqAddressServerDomain", "value1");
properties.put("rmqAddressServerSubGroup", "value2");
method.invoke(null, properties);
Assert.assertEquals("value1", System.getProperty("rocketmq.namesrv.domain"));
Assert.assertEquals("value2", System.getProperty("rocketmq.namesrv.domain.subgroup"));
}
{
properties.put("rmqAddressServerDomain", MixAll.WS_DOMAIN_NAME);
properties.put("rmqAddressServerSubGroup", MixAll.WS_DOMAIN_SUBGROUP);
method.invoke(null, properties);
Assert.assertEquals(MixAll.WS_DOMAIN_NAME, System.getProperty("rocketmq.namesrv.domain"));
Assert.assertEquals(MixAll.WS_DOMAIN_SUBGROUP, System.getProperty("rocketmq.namesrv.domain.subgroup"));
}
} |
RpcMessage(int xid, Type messageType) {
if (messageType != Type.RPC_CALL && messageType != Type.RPC_REPLY) {
throw new IllegalArgumentException("Invalid message type " + messageType);
}
this.xid = xid;
this.messageType = messageType;
} | @Test
public void testRpcMessage() {
RpcMessage msg = getRpcMessage(0, RpcMessage.Type.RPC_CALL);
Assert.assertEquals(0, msg.getXid());
Assert.assertEquals(RpcMessage.Type.RPC_CALL, msg.getMessageType());
} |
public TimelineEvent unblock(String workflowId, User caller) {
TimelineActionEvent.TimelineActionEventBuilder eventBuilder =
TimelineActionEvent.builder().author(caller).reason("Unblock workflow [%s]", workflowId);
int totalUnblocked = 0;
int unblocked = Constants.UNBLOCK_BATCH_SIZE;
while (unblocked == Constants.UNBLOCK_BATCH_SIZE) {
unblocked =
instanceDao.tryUnblockFailedWorkflowInstances(
workflowId, Constants.UNBLOCK_BATCH_SIZE, eventBuilder.build());
totalUnblocked += unblocked;
}
workflowHelper.publishStartWorkflowEvent(workflowId, totalUnblocked > 0);
return eventBuilder
.message("Unblocked [%s] failed workflow instances.", totalUnblocked)
.build();
} | @Test
public void testUnblockNothing() {
when(instanceDao.tryUnblockFailedWorkflowInstances(eq("sample-minimal-wf"), anyInt(), any()))
.thenReturn(0);
TimelineEvent event = actionHandler.unblock("sample-minimal-wf", tester);
assertEquals("Unblocked [0] failed workflow instances.", event.getMessage());
verify(maestroJobEventPublisher, times(0)).publishOrThrow(any(StartWorkflowJobEvent.class));
} |
public static String mask(String info, String passwordPattern, String mask) {
if (null == info || null == passwordPattern || null == mask) {
return info;
}
Pattern p = Pattern.compile(passwordPattern);
Matcher m = p.matcher(info);
StringBuffer sb = new StringBuffer();
while (m.find()) {
m.appendReplacement(sb, mask);
}
m.appendTail(sb);
return sb.toString();
} | @Test
public void testMask() {
String sql = "stock_trade:='connector' ="
+ " 'jdbc',useUnicode=yes&characterEncoding=UTF-8&useSSL=false',\\n"
+ " 'username' = 'trade',\\n"
+ " 'password' = 'c6634672b535f968b'\\n"
+ ";\\n"
+ "tidb_test:='connector' = 'jdbc',\\n"
+ "'url' ="
+ " 'jdbc:mysql://localhost:4000/test?useUnicode=yes&characterEncoding=UTF-8&useSSL=false',\\n"
+ " 'username' = 'root',\\n"
+ " 'password' = 'wwz@test'\\n"
+ ";";
String out = SecurityAspect.mask(sql, SecurityAspect.SENSITIVE, SecurityAspect.MASK);
Assert.assertEquals(out.contains(SecurityAspect.MASK), true);
} |
public final InternalTopologyBuilder setApplicationId(final String applicationId) {
Objects.requireNonNull(applicationId, "applicationId can't be null");
this.applicationId = applicationId;
return this;
} | @Test
public void shouldNotSetApplicationIdToNull() {
assertThrows(NullPointerException.class, () -> builder.setApplicationId(null));
} |
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) {
return api.send(request);
} | @Test
public void sendDice() {
SendResponse response = bot.execute(new SendDice(chatId));
Dice dice = response.message().dice();
assertNotNull(dice);
assertTrue(dice.value() >= 1 && dice.value() <= 6);
assertEquals("🎲", dice.emoji());
response = bot.execute(new SendDice(chatId).darts());
dice = response.message().dice();
assertNotNull(dice);
assertTrue(dice.value() >= 1 && dice.value() <= 6);
assertEquals("🎯", dice.emoji());
response = bot.execute(new SendDice(chatId).basketball());
dice = response.message().dice();
assertNotNull(dice);
assertTrue(dice.value() >= 1 && dice.value() <= 5);
assertEquals("🏀", dice.emoji());
response = bot.execute(new SendDice(chatId).football());
dice = response.message().dice();
assertNotNull(dice);
assertTrue(dice.value() >= 1 && dice.value() <= 5);
assertEquals("⚽", dice.emoji());
response = bot.execute(new SendDice(chatId).slotMachine());
dice = response.message().dice();
assertNotNull(dice);
assertTrue(dice.value() >= 1 && dice.value() <= 64);
assertEquals("🎰", dice.emoji());
response = bot.execute(new SendDice(chatId).bowling());
dice = response.message().dice();
assertNotNull(dice);
assertTrue(dice.value() >= 1 && dice.value() <= 6);
assertEquals("🎳", dice.emoji());
} |
@Override
public Set<byte[]> keys(RedisClusterNode node, byte[] pattern) {
RedisClient entry = getEntry(node);
RFuture<Collection<byte[]>> f = executorService.readAsync(entry, ByteArrayCodec.INSTANCE, KEYS, pattern);
Collection<byte[]> keys = syncFuture(f);
return new HashSet<>(keys);
} | @Test
public void testKeys() {
RedisClusterNode node1 = connection.clusterGetNodeForSlot(1);
for (int i = 0; i < 1000; i++) {
connection.set(("test" + i).getBytes(), ("" + i).getBytes());
}
Set<byte[]> keys = connection.keys(node1, "test*".getBytes(StandardCharsets.UTF_8));
assertThat(keys).hasSize(342);
} |
public void getUsedArguments( JobMeta jobMeta, String[] commandLineArguments, IMetaStore metaStore ) {
for ( JobEntryCopy jobEntryCopy : jobMeta.jobcopies ) {
if ( jobEntryCopy.isTransformation() ) {
JobEntryTrans jobEntryTrans = (JobEntryTrans) jobEntryCopy.getEntry();
try {
TransMeta transMeta = jobEntryTrans.getTransMeta( repository, metaStore, jobMeta );
Map<String, String> map = transMeta.getUsedArguments( commandLineArguments );
for ( Map.Entry<String, String> entry : map.entrySet() ) {
arguments.putIfAbsent( entry.getKey(), entry.getValue() );
}
} catch ( KettleException ke ) {
// suppress exceptions at this time - we will let the runtime report on any errors
}
}
}
} | @Test
public void testGetUsedArguments() throws KettleException {
JobExecutionConfiguration executionConfiguration = new JobExecutionConfiguration();
JobMeta jobMeta = new JobMeta( );
jobMeta.jobcopies = new ArrayList<>( );
String[] commandLineArguments = new String[ 0 ];
IMetaStore metaStore = mock( IMetaStore.class );
JobEntryCopy jobEntryCopy0 = new JobEntryCopy( );
TransMeta transMeta0 = mock( TransMeta.class );
Map<String, String> map0 = new HashMap<>( );
map0.put( "arg0", "argument0" );
when( transMeta0.getUsedArguments( commandLineArguments ) ).thenReturn( map0 );
JobEntryInterface jobEntryInterface0 = mock( JobEntryInterface.class );
when( jobEntryInterface0.isTransformation() ).thenReturn( false );
jobEntryCopy0.setEntry( jobEntryInterface0 );
jobMeta.jobcopies.add( jobEntryCopy0 );
JobEntryCopy jobEntryCopy1 = new JobEntryCopy( );
TransMeta transMeta1 = mock( TransMeta.class );
Map<String, String> map1 = new HashMap<>( );
map1.put( "arg1", "argument1" );
when( transMeta1.getUsedArguments( commandLineArguments ) ).thenReturn( map1 );
JobEntryTrans jobEntryTrans1 = mock( JobEntryTrans.class );
when( jobEntryTrans1.isTransformation() ).thenReturn( true );
when( jobEntryTrans1.getTransMeta( executionConfiguration.getRepository(), metaStore, jobMeta ) ).thenReturn( transMeta1 );
jobEntryCopy1.setEntry( jobEntryTrans1 );
jobMeta.jobcopies.add( jobEntryCopy1 );
JobEntryCopy jobEntryCopy2 = new JobEntryCopy( );
TransMeta transMeta2 = mock( TransMeta.class );
Map<String, String> map2 = new HashMap<>( );
map2.put( "arg1", "argument1" );
map2.put( "arg2", "argument2" );
when( transMeta2.getUsedArguments( commandLineArguments ) ).thenReturn( map2 );
JobEntryTrans jobEntryTrans2 = mock( JobEntryTrans.class );
when( jobEntryTrans2.isTransformation() ).thenReturn( true );
when( jobEntryTrans2.getTransMeta( executionConfiguration.getRepository(), metaStore, jobMeta ) ).thenReturn( transMeta2 );
jobEntryCopy2.setEntry( jobEntryTrans2 );
jobMeta.jobcopies.add( jobEntryCopy2 );
executionConfiguration.getUsedArguments( jobMeta, commandLineArguments, metaStore );
assertEquals( 2, executionConfiguration.getArguments().size() );
} |
@Override
@Lock(Lock.NoLock.class)
public Allocation allocate(ApplicationAttemptId applicationAttemptId,
List<ResourceRequest> ask, List<SchedulingRequest> schedulingRequests,
List<ContainerId> release, List<String> blacklistAdditions,
List<String> blacklistRemovals, ContainerUpdates updateRequests) {
FiCaSchedulerApp application = getApplicationAttempt(applicationAttemptId);
if (application == null) {
LOG.error("Calling allocate on removed or non existent application " +
applicationAttemptId.getApplicationId());
return EMPTY_ALLOCATION;
}
// The allocate may be the leftover from previous attempt, and it will
// impact current attempt, such as confuse the request and allocation for
// current attempt's AM container.
// Note outside precondition check for the attempt id may be
// outdated here, so double check it here is necessary.
if (!application.getApplicationAttemptId().equals(applicationAttemptId)) {
LOG.error("Calling allocate on previous or removed " +
"or non existent application attempt " + applicationAttemptId);
return EMPTY_ALLOCATION;
}
// Handle all container updates
handleContainerUpdates(application, updateRequests);
// Release containers
releaseContainers(release, application);
AbstractLeafQueue updateDemandForQueue = null;
// Sanity check for new allocation requests
normalizeResourceRequests(ask);
// Normalize scheduling requests
normalizeSchedulingRequests(schedulingRequests);
Allocation allocation;
// make sure we aren't stopping/removing the application
// when the allocate comes in
application.getWriteLock().lock();
try {
if (application.isStopped()) {
return EMPTY_ALLOCATION;
}
// Process resource requests
if (!ask.isEmpty() || (schedulingRequests != null && !schedulingRequests
.isEmpty())) {
if (LOG.isDebugEnabled()) {
LOG.debug(
"allocate: pre-update " + applicationAttemptId + " ask size ="
+ ask.size());
application.showRequests();
}
// Update application requests
if (application.updateResourceRequests(ask) || application
.updateSchedulingRequests(schedulingRequests)) {
updateDemandForQueue = (AbstractLeafQueue) application.getQueue();
}
if (LOG.isDebugEnabled()) {
LOG.debug("allocate: post-update");
application.showRequests();
}
}
application.updateBlacklist(blacklistAdditions, blacklistRemovals);
allocation = application.getAllocation(getResourceCalculator(),
getClusterResource(), getMinimumResourceCapability());
} finally {
application.getWriteLock().unlock();
}
if (updateDemandForQueue != null && !application
.isWaitingForAMContainer()) {
updateDemandForQueue.getOrderingPolicy().demandUpdated(application);
}
LOG.debug("Allocation for application {} : {} with cluster resource : {}",
applicationAttemptId, allocation, getClusterResource());
return allocation;
} | @Test
public void testPendingResourceUpdatedAccordingToIncreaseRequestChanges()
throws Exception {
Configuration conf =
TestUtils.getConfigurationWithQueueLabels(new Configuration(false));
conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
final RMNodeLabelsManager mgr = new NullRMNodeLabelsManager();
mgr.init(conf);
MockRM rm = new MockRM(conf) {
protected RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm.start();
MockNM nm1 = // label = ""
new MockNM("h1:1234", 200 * GB, rm.getResourceTrackerService());
nm1.registerNode();
// Launch app1 in queue=a1
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withUnmanagedAM(false)
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm, data);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1);
// Allocate two more containers
am1.allocate(
Arrays.asList(ResourceRequest.newInstance(Priority.newInstance(1),
"*", Resources.createResource(2 * GB), 2)),
null);
ContainerId containerId1 =
ContainerId.newContainerId(am1.getApplicationAttemptId(), 1);
ContainerId containerId2 =
ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
ContainerId containerId3 =
ContainerId.newContainerId(am1.getApplicationAttemptId(), 3);
Assert.assertTrue(rm.waitForState(nm1, containerId3,
RMContainerState.ALLOCATED));
// Acquire them
am1.allocate(null, null);
sentRMContainerLaunched(rm,
ContainerId.newContainerId(am1.getApplicationAttemptId(), 1L));
sentRMContainerLaunched(rm,
ContainerId.newContainerId(am1.getApplicationAttemptId(), 2L));
sentRMContainerLaunched(rm,
ContainerId.newContainerId(am1.getApplicationAttemptId(), 3L));
// am1 asks to change its AM container from 1GB to 3GB
am1.sendContainerResizingRequest(Arrays.asList(
UpdateContainerRequest
.newInstance(0, containerId1,
ContainerUpdateType.INCREASE_RESOURCE,
Resources.createResource(3 * GB), null)));
FiCaSchedulerApp app = getFiCaSchedulerApp(rm, app1.getApplicationId());
Assert.assertEquals(2 * GB,
app.getAppAttemptResourceUsage().getPending().getMemorySize());
checkPendingResource(rm, "a1", 2 * GB, null);
checkPendingResource(rm, "a", 2 * GB, null);
checkPendingResource(rm, "root", 2 * GB, null);
// am1 asks to change containerId2 (2G -> 3G) and containerId3 (2G -> 5G)
am1.sendContainerResizingRequest(Arrays.asList(
UpdateContainerRequest
.newInstance(0, containerId2,
ContainerUpdateType.INCREASE_RESOURCE,
Resources.createResource(3 * GB), null),
UpdateContainerRequest
.newInstance(0, containerId3,
ContainerUpdateType.INCREASE_RESOURCE,
Resources.createResource(5 * GB), null)));
Assert.assertEquals(6 * GB,
app.getAppAttemptResourceUsage().getPending().getMemorySize());
checkPendingResource(rm, "a1", 6 * GB, null);
checkPendingResource(rm, "a", 6 * GB, null);
checkPendingResource(rm, "root", 6 * GB, null);
// am1 asks to change containerId1 (1G->3G), containerId2 (2G -> 4G) and
// containerId3 (2G -> 2G)
am1.sendContainerResizingRequest(Arrays.asList(
UpdateContainerRequest
.newInstance(0, containerId1,
ContainerUpdateType.INCREASE_RESOURCE,
Resources.createResource(3 * GB), null),
UpdateContainerRequest
.newInstance(0, containerId2,
ContainerUpdateType.INCREASE_RESOURCE,
Resources.createResource(4 * GB), null),
UpdateContainerRequest
.newInstance(0, containerId3,
ContainerUpdateType.INCREASE_RESOURCE,
Resources.createResource(2 * GB), null)));
Assert.assertEquals(4 * GB,
app.getAppAttemptResourceUsage().getPending().getMemorySize());
checkPendingResource(rm, "a1", 4 * GB, null);
checkPendingResource(rm, "a", 4 * GB, null);
checkPendingResource(rm, "root", 4 * GB, null);
rm.stop();
} |
@Override
public DefaultAuthenticationContext build(Metadata metadata, GeneratedMessageV3 request) {
try {
DefaultAuthenticationContext context = new DefaultAuthenticationContext();
context.setChannelId(metadata.get(GrpcConstants.CHANNEL_ID));
context.setRpcCode(request.getDescriptorForType().getFullName());
String authorization = metadata.get(GrpcConstants.AUTHORIZATION);
if (StringUtils.isEmpty(authorization)) {
return context;
}
String datetime = metadata.get(GrpcConstants.DATE_TIME);
if (StringUtils.isEmpty(datetime)) {
throw new AuthenticationException("datetime is null.");
}
String[] result = authorization.split(CommonConstants.SPACE, 2);
if (result.length != 2) {
throw new AuthenticationException("authentication header is incorrect.");
}
String[] keyValues = result[1].split(CommonConstants.COMMA);
for (String keyValue : keyValues) {
String[] kv = keyValue.trim().split(CommonConstants.EQUAL, 2);
int kvLength = kv.length;
if (kv.length != 2) {
throw new AuthenticationException("authentication keyValues length is incorrect, actual length={}.", kvLength);
}
String authItem = kv[0];
if (CREDENTIAL.equals(authItem)) {
String[] credential = kv[1].split(CommonConstants.SLASH);
int credentialActualLength = credential.length;
if (credentialActualLength == 0) {
throw new AuthenticationException("authentication credential length is incorrect, actual length={}.", credentialActualLength);
}
context.setUsername(credential[0]);
continue;
}
if (SIGNATURE.equals(authItem)) {
context.setSignature(this.hexToBase64(kv[1]));
}
}
context.setContent(datetime.getBytes(StandardCharsets.UTF_8));
return context;
} catch (AuthenticationException e) {
throw e;
} catch (Throwable e) {
throw new AuthenticationException("create authentication context error.", e);
}
} | @Test
public void build1() {
Resource topic = Resource.newBuilder().setName("topic-test").build();
{
SendMessageRequest request = SendMessageRequest.newBuilder()
.addMessages(Message.newBuilder().setTopic(topic)
.setBody(ByteString.copyFromUtf8("message-body"))
.build())
.build();
Metadata metadata = new Metadata();
metadata.put(GrpcConstants.AUTHORIZATION, "MQv2-HMAC-SHA1 Credential=abc, SignedHeaders=x-mq-date-time, Signature=D18A9CBCDDBA9041D6693268FEF15A989E64430B");
metadata.put(GrpcConstants.DATE_TIME, "20231227T194619Z");
DefaultAuthenticationContext context = builder.build(metadata, request);
Assert.assertNotNull(context);
Assert.assertEquals("abc", context.getUsername());
Assert.assertEquals("0YqcvN26kEHWaTJo/vFamJ5kQws=", context.getSignature());
Assert.assertEquals("20231227T194619Z", new String(context.getContent(), StandardCharsets.UTF_8));
}
} |
@Override
public Num calculate(BarSeries series, Position position) {
return position.hasProfit() ? series.one() : series.zero();
} | @Test
public void calculateWithTwoShortPositions() {
MockBarSeries series = new MockBarSeries(numFunction, 110, 105, 110, 100, 95, 105);
TradingRecord tradingRecord = new BaseTradingRecord(Trade.sellAt(0, series), Trade.buyAt(1, series),
Trade.sellAt(2, series), Trade.buyAt(4, series));
assertNumEquals(2, getCriterion().calculate(series, tradingRecord));
} |
public void reset() {
sync.reset();
} | @Test
public void testReset() throws InterruptedException {
int count = 2;
CountDownLatch2 latch = new CountDownLatch2(count);
latch.countDown();
assertEquals("Expected equal", count - 1, latch.getCount());
latch.reset();
assertEquals("Expected equal", count, latch.getCount());
latch.countDown();
latch.countDown();
latch.await();
assertEquals("Expected equal", 0, latch.getCount());
// coverage Sync#tryReleaseShared, c==0
latch.countDown();
assertEquals("Expected equal", 0, latch.getCount());
} |
public Function.FunctionDetails.ComponentType calculateSubjectType(Function.FunctionDetails functionDetails) {
if (functionDetails.getComponentType() != Function.FunctionDetails.ComponentType.UNKNOWN) {
return functionDetails.getComponentType();
}
Function.SourceSpec sourceSpec = functionDetails.getSource();
Function.SinkSpec sinkSpec = functionDetails.getSink();
if (sourceSpec.getInputSpecsCount() == 0) {
return Function.FunctionDetails.ComponentType.SOURCE;
}
// Now its between sink and function
if (!isEmpty(sinkSpec.getBuiltin())) {
// if its built in, its a sink
return Function.FunctionDetails.ComponentType.SINK;
}
if (isEmpty(sinkSpec.getClassName()) || sinkSpec.getClassName().equals(PulsarSink.class.getName())) {
return Function.FunctionDetails.ComponentType.FUNCTION;
}
return Function.FunctionDetails.ComponentType.SINK;
} | @Test
public void testCalculateSubjectTypeForFunction() {
FunctionDetails.Builder builder = FunctionDetails.newBuilder();
// an input but no sink classname is a function
builder.setSource(
Function.SourceSpec.newBuilder().putInputSpecs("topic", Function.ConsumerSpec.newBuilder().build())
.build());
assertEquals(InstanceUtils.calculateSubjectType(builder.build()), FunctionDetails.ComponentType.FUNCTION);
// make sure that if the componenttype is set, that gets precedence.
builder.setComponentType(FunctionDetails.ComponentType.SOURCE);
assertEquals(InstanceUtils.calculateSubjectType(builder.build()), FunctionDetails.ComponentType.SOURCE);
builder.setComponentType(FunctionDetails.ComponentType.SINK);
assertEquals(InstanceUtils.calculateSubjectType(builder.build()), FunctionDetails.ComponentType.SINK);
} |
public SmppCommand createSmppCommand(SMPPSession session, Exchange exchange) {
SmppCommandType commandType = SmppCommandType.fromExchange(exchange);
return commandType.createCommand(session, configuration);
} | @Test
public void createSmppSubmitSmCommand() {
SMPPSession session = new SMPPSession();
Exchange exchange = new DefaultExchange(new DefaultCamelContext());
SmppCommand command = binding.createSmppCommand(session, exchange);
assertTrue(command instanceof SmppSubmitSmCommand);
} |
public static String getDesc(Class<?> c) {
StringBuilder ret = new StringBuilder();
while (c.isArray()) {
ret.append('[');
c = c.getComponentType();
}
if (c.isPrimitive()) {
String t = c.getName();
if ("void".equals(t)) {
ret.append(JVM_VOID);
} else if ("boolean".equals(t)) {
ret.append(JVM_BOOLEAN);
} else if ("byte".equals(t)) {
ret.append(JVM_BYTE);
} else if ("char".equals(t)) {
ret.append(JVM_CHAR);
} else if ("double".equals(t)) {
ret.append(JVM_DOUBLE);
} else if ("float".equals(t)) {
ret.append(JVM_FLOAT);
} else if ("int".equals(t)) {
ret.append(JVM_INT);
} else if ("long".equals(t)) {
ret.append(JVM_LONG);
} else if ("short".equals(t)) {
ret.append(JVM_SHORT);
}
} else {
ret.append('L');
ret.append(c.getName().replace('.', '/'));
ret.append(';');
}
return ret.toString();
} | @Test
void testGetDesc() {
// getDesc
assertEquals("Z", ReflectUtils.getDesc(boolean.class));
assertEquals("[[[I", ReflectUtils.getDesc(int[][][].class));
assertEquals("[[Ljava/lang/Object;", ReflectUtils.getDesc(Object[][].class));
} |
static PMMLStep getStep(final PMML_STEP pmmlStep, final KiePMMLModel model, final PMMLRequestData requestData) {
final PMMLStep toReturn = new PMMLRuntimeStep(pmmlStep);
toReturn.addInfo("MODEL", model.getName());
toReturn.addInfo("CORRELATION ID", requestData.getCorrelationId());
toReturn.addInfo("REQUEST MODEL", requestData.getModelName());
requestData.getRequestParams()
.forEach(requestParam -> toReturn.addInfo(requestParam.getName(), requestParam.getValue()));
return toReturn;
} | @Test
public void getStep() {
final PMMLRequestData requestData = getPMMLRequestData(MODEL_NAME, FILE_NAME);
Arrays.stream(PMML_STEP.values()).forEach(pmml_step -> {
PMMLStep retrieved = PMMLRuntimeHelper.getStep(pmml_step, modelMock, requestData);
assertThat(retrieved).isNotNull();
commonValuateStep(retrieved, pmml_step, modelMock, requestData);
});
} |
@Override
public void write(final PostgreSQLPacketPayload payload, final Object value) {
throw new UnsupportedSQLOperationException("PostgreSQLInt2ArrayBinaryProtocolValue.write()");
} | @Test
void assertWrite() {
assertThrows(UnsupportedSQLOperationException.class, () -> newInstance().write(new PostgreSQLPacketPayload(null, StandardCharsets.UTF_8), "val"));
} |
static String computeDetailsAsString(SearchRequest searchRequest) {
StringBuilder message = new StringBuilder();
message.append(String.format("ES search request '%s'", searchRequest));
if (searchRequest.indices().length > 0) {
message.append(String.format(ON_INDICES_MESSAGE, Arrays.toString(searchRequest.indices())));
}
return message.toString();
} | @Test
public void should_format_ClearIndicesCacheRequest() {
ClearIndicesCacheRequest clearIndicesCacheRequest = new ClearIndicesCacheRequest()
.indices("index-1")
.fields("field-1")
.queryCache(true)
.fieldDataCache(true)
.requestCache(true);
assertThat(EsRequestDetails.computeDetailsAsString(clearIndicesCacheRequest))
.isEqualTo("ES clear cache request on indices 'index-1' on fields 'field-1' with filter cache with field data cache with request cache");
} |
public PushNotificationProps get() {
return mNotification;
} | @Test
public void initialState() throws Exception {
final InitialNotificationHolder uut = createUUT();
assertNull(uut.get());
} |
@VisibleForTesting
public List<ChunkRange> splitEvenlySizedChunks(
TableId tableId,
Object min,
Object max,
long approximateRowCnt,
int chunkSize,
int dynamicChunkSize) {
LOG.info(
"Use evenly-sized chunk optimization for table {}, the approximate row count is {}, the chunk size is {}, the dynamic chunk size is {}",
tableId,
approximateRowCnt,
chunkSize,
dynamicChunkSize);
if (approximateRowCnt <= chunkSize) {
// there is no more than one chunk, return full table as a chunk
return Collections.singletonList(ChunkRange.all());
}
final List<ChunkRange> splits = new ArrayList<>();
Object chunkStart = null;
Object chunkEnd = ObjectUtils.plus(min, dynamicChunkSize);
while (ObjectUtils.compare(chunkEnd, max) <= 0) {
splits.add(ChunkRange.of(chunkStart, chunkEnd));
chunkStart = chunkEnd;
try {
chunkEnd = ObjectUtils.plus(chunkEnd, dynamicChunkSize);
} catch (ArithmeticException e) {
// Stop chunk split to avoid dead loop when number overflows.
break;
}
}
// add the ending split
splits.add(ChunkRange.of(chunkStart, null));
return splits;
} | @Test
public void testSplitEvenlySizedChunksNormal() {
MySqlChunkSplitter splitter = new MySqlChunkSplitter(null, null);
List<ChunkRange> res =
splitter.splitEvenlySizedChunks(
new TableId("catalog", "db", "tab"),
Integer.MAX_VALUE - 20,
Integer.MAX_VALUE,
20,
10,
10);
assertEquals(3, res.size());
assertEquals(ChunkRange.of(null, 2147483637), res.get(0));
assertEquals(ChunkRange.of(2147483637, 2147483647), res.get(1));
assertEquals(ChunkRange.of(2147483647, null), res.get(2));
} |
@Override
public List<?> deserialize(final String topic, final byte[] bytes) {
if (bytes == null) {
return null;
}
try {
final String recordCsvString = new String(bytes, StandardCharsets.UTF_8);
final List<CSVRecord> csvRecords = CSVParser.parse(recordCsvString, csvFormat)
.getRecords();
if (csvRecords.isEmpty()) {
throw new SerializationException("No fields in record");
}
final CSVRecord csvRecord = csvRecords.get(0);
if (csvRecord == null || csvRecord.size() == 0) {
throw new SerializationException("No fields in record.");
}
SerdeUtils.throwOnColumnCountMismatch(parsers.size(), csvRecord.size(), false, topic);
final List<Object> values = new ArrayList<>(parsers.size());
final Iterator<Parser> pIt = parsers.iterator();
for (int i = 0; i < csvRecord.size(); i++) {
final String value = csvRecord.get(i);
final Parser parser = pIt.next();
final Object parsed = value == null || value.isEmpty()
? null
: parser.parse(value);
values.add(parsed);
}
return values;
} catch (final Exception e) {
throw new SerializationException("Error deserializing delimited", e);
}
} | @Test
public void shouldThrowOnOverflowTime() {
// Given:
KsqlDelimitedDeserializer deserializer = createDeserializer(persistenceSchema(
column(
"ids",
SqlTypes.TIME
)
));
final byte[] bytes = "3000000000".getBytes(StandardCharsets.UTF_8);
// When:
final Exception e = assertThrows(
SerializationException.class,
() -> deserializer.deserialize("", bytes)
);
// Then:
assertThat(e.getCause().getMessage(),
containsString("Time values must use number of milliseconds greater than 0 and less than 86400000."));
} |
public ValidationResult validateMessagesAndAssignOffsets(PrimitiveRef.LongRef offsetCounter,
MetricsRecorder metricsRecorder,
BufferSupplier bufferSupplier) {
if (sourceCompressionType == CompressionType.NONE && targetCompression.type() == CompressionType.NONE) {
// check the magic value
if (!records.hasMatchingMagic(toMagic))
return convertAndAssignOffsetsNonCompressed(offsetCounter, metricsRecorder);
else
// Do in-place validation, offset assignment and maybe set timestamp
return assignOffsetsNonCompressed(offsetCounter, metricsRecorder);
} else
return validateMessagesAndAssignOffsetsCompressed(offsetCounter, metricsRecorder, bufferSupplier);
} | @Test
public void testOffsetAssignmentAfterUpConversionV0ToV2Compressed() {
Compression compression = Compression.gzip().build();
MemoryRecords records = createRecords(RecordBatch.MAGIC_VALUE_V0, RecordBatch.NO_TIMESTAMP, compression);
checkOffsets(records, 0);
long offset = 1234567;
LogValidator.ValidationResult validatedResults = new LogValidator(
records,
new TopicPartition("topic", 0),
time,
CompressionType.GZIP,
compression,
false,
RecordBatch.MAGIC_VALUE_V2,
TimestampType.LOG_APPEND_TIME,
1000L,
1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT,
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset),
metricsRecorder,
RequestLocal.withThreadConfinedCaching().bufferSupplier()
);
checkOffsets(validatedResults.validatedRecords, offset);
verifyRecordValidationStats(
validatedResults.recordValidationStats,
3, // numConvertedRecords
records,
true // compressed
);
} |
@Override
public List<PluginWrapper> getStartedPlugins() {
return getPlugins(PluginState.STARTED);
} | @Test
public void getStartedPlugins() {
pluginManager.loadPlugins();
pluginManager.startPlugins();
assertEquals(2, pluginManager.getStartedPlugins().size());
assertEquals(1, wrappedPluginManager.getStartedPlugins().size());
} |
public String getSource() {
return source;
} | @Test
public void testDefaultSource() {
SplunkHECConfiguration config = new SplunkHECConfiguration();
assertEquals("camel", config.getSource());
} |
public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, Timer timer) {
invokeCompletedOffsetCommitCallbacks();
if (offsets.isEmpty()) {
// We guarantee that the callbacks for all commitAsync() will be invoked when
// commitSync() completes, even if the user tries to commit empty offsets.
return invokePendingAsyncCommits(timer);
}
long attempts = 0L;
do {
if (coordinatorUnknownAndUnreadySync(timer)) {
return false;
}
RequestFuture<Void> future = sendOffsetCommitRequest(offsets);
client.poll(future, timer);
// We may have had in-flight offset commits when the synchronous commit began. If so, ensure that
// the corresponding callbacks are invoked prior to returning in order to preserve the order that
// the offset commits were applied.
invokeCompletedOffsetCommitCallbacks();
if (future.succeeded()) {
if (interceptors != null)
interceptors.onCommit(offsets);
return true;
}
if (future.failed() && !future.isRetriable())
throw future.exception();
timer.sleep(retryBackoff.backoff(attempts++));
} while (timer.notExpired());
return false;
} | @Test
public void testCommitOffsetSyncCoordinatorNotAvailable() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// sync commit with coordinator disconnected (should connect, get metadata, and then submit the commit request)
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.COORDINATOR_NOT_AVAILABLE);
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NONE);
coordinator.commitOffsetsSync(singletonMap(t1p, new OffsetAndMetadata(100L)), time.timer(Long.MAX_VALUE));
} |
public CMap parsePredefined(String name) throws IOException
{
try (RandomAccessRead randomAccessRead = getExternalCMap(name))
{
// deactivate strict mode
strictMode = false;
return parse(randomAccessRead);
}
} | @Test
void testPredefinedMap() throws IOException
{
CMap cMap = new CMapParser().parsePredefined("Adobe-Korea1-UCS2");
assertNotNull(cMap, "Failed to parse predefined CMap Adobe-Korea1-UCS2");
assertEquals("Adobe-Korea1-UCS2", cMap.getName(), "wrong CMap name");
assertEquals(0, cMap.getWMode(), "wrong WMode");
assertFalse(cMap.hasCIDMappings());
assertTrue(cMap.hasUnicodeMappings());
cMap = new CMapParser().parsePredefined("Identity-V");
assertNotNull(cMap, "Failed to parse predefined CMap Identity-V");
} |
@Override
public SchemaKStream<?> buildStream(final PlanBuildContext buildContext) {
final QueryContext.Stacker contextStacker = buildContext.buildNodeContext(getId().toString());
final SchemaKStream<?> schemaKStream = getSource().buildStream(buildContext);
if (!(schemaKStream instanceof SchemaKTable)) {
throw new KsqlException("Failed in suppress node. Expected to find a Table, but "
+ "found a stream instead.");
}
return (((SchemaKTable<?>) schemaKStream)
.suppress(
refinementInfo,
valueFormat.getFormatInfo(),
contextStacker
));
} | @Test
public void shouldThrowOnSuppressOnStream() {
// Given:
when(sourceNode.getNodeOutputType()).thenReturn(DataSourceType.KSTREAM);
node = new SuppressNode(NODE_ID, sourceNode, refinementInfo);
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> node.buildStream(planBuildContext)
);
// Then
assertThat(e.getMessage(), containsString("Failed in suppress node. Expected to find a Table, but found a stream instead"));
} |
@Override
public void decorateRouteContext(final RouteContext routeContext, final QueryContext queryContext, final ShardingSphereDatabase database,
final ReadwriteSplittingRule rule, final ConfigurationProperties props, final ConnectionContext connectionContext) {
Collection<RouteUnit> toBeRemoved = new LinkedList<>();
Collection<RouteUnit> toBeAdded = new LinkedList<>();
for (RouteUnit each : routeContext.getRouteUnits()) {
String logicDataSourceName = each.getDataSourceMapper().getActualName();
rule.findDataSourceGroupRule(logicDataSourceName).ifPresent(optional -> {
toBeRemoved.add(each);
String actualDataSourceName = new ReadwriteSplittingDataSourceRouter(optional, connectionContext).route(queryContext.getSqlStatementContext(), queryContext.getHintValueContext());
toBeAdded.add(new RouteUnit(new RouteMapper(logicDataSourceName, actualDataSourceName), each.getTableMappers()));
});
}
routeContext.getRouteUnits().removeAll(toBeRemoved);
routeContext.getRouteUnits().addAll(toBeAdded);
} | @Test
void assertDecorateRouteContextToPrimaryDataSource() {
RouteContext actual = mockRouteContext();
QueryContext queryContext =
new QueryContext(mock(SQLStatementContext.class), "", Collections.emptyList(), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class));
RuleMetaData ruleMetaData = new RuleMetaData(Collections.singleton(staticRule));
ShardingSphereDatabase database = new ShardingSphereDatabase(DefaultDatabase.LOGIC_NAME,
mock(DatabaseType.class), mock(ResourceMetaData.class, RETURNS_DEEP_STUBS), ruleMetaData, Collections.emptyMap());
sqlRouter.decorateRouteContext(actual, queryContext, database, staticRule, new ConfigurationProperties(new Properties()), new ConnectionContext(Collections::emptySet));
Iterator<String> routedDataSourceNames = actual.getActualDataSourceNames().iterator();
assertThat(routedDataSourceNames.next(), is(NONE_READWRITE_SPLITTING_DATASOURCE_NAME));
assertThat(routedDataSourceNames.next(), is(WRITE_DATASOURCE));
} |
public static SecretIdentifier fromExternalForm(String urn) {
if (urn == null || !urnPattern.matcher(urn).matches()) {
throw new IllegalArgumentException("Invalid external form " + urn);
}
String[] parts = colonPattern.split(urn, 5);
return new SecretIdentifier(validateWithTransform(parts[4], "key"));
} | @Test(expected = IllegalArgumentException.class)
public void testFromExternalInvalid() {
SecretIdentifier.fromExternalForm("urn:logstash:secret:invalid:foo");
} |
public static <T> AsSingleton<T> asSingleton() {
return new AsSingleton<>();
} | @Test
@Category(ValidatesRunner.class)
public void testEmptySingletonSideInput() throws Exception {
final PCollectionView<Integer> view =
pipeline
.apply("CreateEmptyIntegers", Create.empty(VarIntCoder.of()))
.apply(View.asSingleton());
pipeline
.apply("Create123", Create.of(1, 2, 3))
.apply(
"OutputSideInputs",
ParDo.of(
new DoFn<Integer, Integer>() {
@ProcessElement
public void processElement(ProcessContext c) {
c.output(c.sideInput(view));
}
})
.withSideInputs(view));
// As long as we get an error, be flexible with how a runner surfaces it
thrown.expect(Exception.class);
pipeline.run();
} |
@Override
public String getShortRevision(String revision) {
if (revision == null) return null;
if (revision.length() < 7) return revision;
return revision.substring(0, 7);
} | @Test
void shouldTruncateHashTo7CharsForAShortRevision() {
Material git = new GitMaterial("file:///foo");
assertThat(git.getShortRevision("dc3d7e656831d1b203d8b7a63c4de82e26604e52")).isEqualTo("dc3d7e6");
assertThat(git.getShortRevision("24")).isEqualTo("24");
assertThat(git.getShortRevision(null)).isNull();
} |
@Override
public void upgrade() {
// Only run this migration once.
if (clusterConfigService.get(MigrationCompleted.class) != null) {
LOG.debug("Migration already completed.");
return;
}
final IndexSetConfig indexSetConfig = findDefaultIndexSet();
final ImmutableSet.Builder<String> completedStreamIds = ImmutableSet.builder();
final ImmutableSet.Builder<String> failedStreamIds = ImmutableSet.builder();
// Assign the "default index set" to all existing streams. Until now, there was no way to manually create
// index sets, so the only one that exists is the "default" one created by an earlier migration.
for (Stream stream : streamService.loadAll()) {
if (isNullOrEmpty(stream.getIndexSetId())) {
LOG.info("Assigning index set <{}> ({}) to stream <{}> ({})", indexSetConfig.id(),
indexSetConfig.title(), stream.getId(), stream.getTitle());
stream.setIndexSetId(indexSetConfig.id());
try {
streamService.save(stream);
completedStreamIds.add(stream.getId());
} catch (ValidationException e) {
LOG.error("Unable to save stream <{}>", stream.getId(), e);
failedStreamIds.add(stream.getId());
}
}
}
// Mark this migration as done.
clusterConfigService.write(MigrationCompleted.create(indexSetConfig.id(), completedStreamIds.build(), failedStreamIds.build()));
} | @Test
public void upgradeWithAlreadyAssignedIndexSet() throws Exception {
final Stream stream1 = mock(Stream.class);
final Stream stream2 = mock(Stream.class);
final IndexSetConfig indexSetConfig = mock(IndexSetConfig.class);
when(indexSetService.findAll()).thenReturn(Collections.singletonList(indexSetConfig));
when(indexSetConfig.id()).thenReturn("abc123");
when(stream1.getId()).thenReturn("stream1");
when(stream2.getId()).thenReturn("stream2");
when(streamService.loadAll()).thenReturn(Lists.newArrayList(stream1, stream2));
when(stream2.getIndexSetId()).thenReturn("abc123");
migration.upgrade();
verify(stream1).setIndexSetId(indexSetConfig.id());
verify(stream2, never()).setIndexSetId(indexSetConfig.id());
verify(streamService, times(1)).save(stream1);
verify(streamService, never()).save(stream2);
verify(clusterConfigService, times(1)).write(
V20161122174500_AssignIndexSetsToStreamsMigration.MigrationCompleted.create(
indexSetConfig.id(), Sets.newHashSet("stream1"), Collections.emptySet()));
} |
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof Set)) return false;
if (o instanceof RangeSet) {
RangeSet other = (RangeSet) o;
return this.from == other.from && this.to == other.to;
}
Set<?> otherSet = (Set<?>) o;
if (otherSet.size() != this.size()) return false;
for (int i = from; i < to; i++) {
if (!otherSet.contains(i)) return false;
}
return true;
} | @Test
void testEquals() {
RangeSet rangeSet1 = new RangeSet(5, 10);
RangeSet rangeSet2 = new RangeSet(5, 10);
RangeSet rangeSet3 = new RangeSet(6, 10);
Set<Integer> set = mkSet(5, 6, 7, 8, 9);
HashSet<Integer> hashSet = new HashSet<>(mkSet(6, 7, 8, 9));
assertEquals(rangeSet1, rangeSet2);
assertNotEquals(rangeSet1, rangeSet3);
assertEquals(rangeSet1, set);
assertEquals(rangeSet3, hashSet);
assertNotEquals(rangeSet1, new Object());
} |
@Bean
public TimerRegistry timerRegistry(
TimerConfigurationProperties timerConfigurationProperties,
EventConsumerRegistry<TimerEvent> timerEventConsumerRegistry,
RegistryEventConsumer<Timer> timerRegistryEventConsumer,
@Qualifier("compositeTimerCustomizer") CompositeCustomizer<TimerConfigCustomizer> compositeTimerCustomizer,
@Autowired(required = false) MeterRegistry registry
) {
TimerRegistry timerRegistry = createTimerRegistry(timerConfigurationProperties, timerRegistryEventConsumer, compositeTimerCustomizer, registry);
registerEventConsumer(timerRegistry, timerEventConsumerRegistry, timerConfigurationProperties);
initTimerRegistry(timerRegistry, timerConfigurationProperties, compositeTimerCustomizer);
return timerRegistry;
} | @Test
public void shouldNotConfigureInstanceUsingUnknownSharedConfig() {
InstanceProperties instanceProperties = new InstanceProperties()
.setBaseConfig("unknown");
TimerConfigurationProperties configurationProperties = new TimerConfigurationProperties();
configurationProperties.getInstances().put("backend", instanceProperties);
TimerConfiguration configuration = new TimerConfiguration();
assertThatThrownBy(() -> configuration.timerRegistry(
configurationProperties, new DefaultEventConsumerRegistry<>(), new CompositeRegistryEventConsumer<>(emptyList()), new CompositeCustomizer<>(emptyList()), new SimpleMeterRegistry()
))
.isInstanceOf(ConfigurationNotFoundException.class)
.hasMessage("Configuration with name 'unknown' does not exist");
} |
static void commonPopulateGetCreatedKiePMMLMiningFieldsMethod(final MethodDeclaration methodDeclaration,
final List<org.dmg.pmml.MiningField> miningFields,
final List<org.dmg.pmml.Field<?>> fields) {
BlockStmt body = new BlockStmt();
NodeList<Expression> arguments = new NodeList<>();
for (org.dmg.pmml.MiningField miningField : miningFields) {
String miningFieldVariableName = getSanitizedVariableName(miningField.getName()).toLowerCase();
BlockStmt toAdd = getMiningFieldVariableDeclaration(miningFieldVariableName, miningField, fields);
toAdd.getStatements().forEach(body::addStatement);
arguments.add(new NameExpr(miningFieldVariableName));
}
MethodCallExpr methodCallExpr = new MethodCallExpr();
methodCallExpr.setScope(new NameExpr(Arrays.class.getSimpleName()));
methodCallExpr.setName("asList");
methodCallExpr.setArguments(arguments);
ReturnStmt returnStmt = new ReturnStmt();
returnStmt.setExpression(methodCallExpr);
body.addStatement(returnStmt);
methodDeclaration.setBody(body);
} | @Test
void commonPopulateGetCreatedKiePMMLMiningFieldsMethod() throws IOException {
final CompilationDTO compilationDTO = CommonCompilationDTO.fromGeneratedPackageNameAndFields(PACKAGE_NAME,
pmmlModel,
model,
new PMMLCompilationContextMock(), SOURCE_BASE);
final MethodDeclaration methodDeclaration = new MethodDeclaration();
org.kie.pmml.compiler.commons.codegenfactories.KiePMMLModelFactoryUtils.commonPopulateGetCreatedKiePMMLMiningFieldsMethod(methodDeclaration,
compilationDTO.getMiningSchema().getMiningFields(), compilationDTO.getFields());
String text = getFileContent(TEST_06_SOURCE);
MethodDeclaration expected = JavaParserUtils.parseMethod(text);
assertThat(JavaParserUtils.equalsNode(expected, methodDeclaration)).isTrue();
} |
@Override
public ListTransactionsRequest.Builder buildBatchedRequest(
int brokerId,
Set<AllBrokersStrategy.BrokerKey> keys
) {
ListTransactionsRequestData request = new ListTransactionsRequestData();
request.setProducerIdFilters(new ArrayList<>(options.filteredProducerIds()));
request.setStateFilters(options.filteredStates().stream()
.map(TransactionState::toString)
.collect(Collectors.toList()));
request.setDurationFilter(options.filteredDuration());
return new ListTransactionsRequest.Builder(request);
} | @Test
public void testBuildRequestWithFilteredProducerId() {
int brokerId = 1;
BrokerKey brokerKey = new BrokerKey(OptionalInt.of(brokerId));
long filteredProducerId = 23423L;
ListTransactionsOptions options = new ListTransactionsOptions()
.filterProducerIds(singleton(filteredProducerId));
ListTransactionsHandler handler = new ListTransactionsHandler(options, logContext);
ListTransactionsRequest request = handler.buildBatchedRequest(brokerId, singleton(brokerKey)).build();
assertEquals(Collections.singletonList(filteredProducerId), request.data().producerIdFilters());
assertEquals(Collections.emptyList(), request.data().stateFilters());
} |
public static Configuration getTimelineServiceHBaseConf(Configuration conf)
throws IOException {
if (conf == null) {
throw new NullPointerException();
}
Configuration hbaseConf;
String timelineServiceHBaseConfFilePath =
conf.get(YarnConfiguration.TIMELINE_SERVICE_HBASE_CONFIGURATION_FILE);
if (timelineServiceHBaseConfFilePath != null
&& timelineServiceHBaseConfFilePath.length() > 0) {
LOG.info("Using hbase configuration at " +
timelineServiceHBaseConfFilePath);
// create a clone so that we don't mess with out input one
hbaseConf = new Configuration(conf);
Configuration plainHBaseConf = new Configuration(false);
Path hbaseConfigPath = new Path(timelineServiceHBaseConfFilePath);
try (FileSystem fs =
FileSystem.newInstance(hbaseConfigPath.toUri(), conf);
FSDataInputStream in = fs.open(hbaseConfigPath)) {
plainHBaseConf.addResource(in);
HBaseConfiguration.merge(hbaseConf, plainHBaseConf);
}
} else {
// default to what is on the classpath
hbaseConf = HBaseConfiguration.create(conf);
}
return hbaseConf;
} | @Test
void testWithHbaseConfAtLocalFileSystem() throws IOException {
// Verifying With Hbase Conf from Local FileSystem
Configuration conf = new Configuration();
conf.set(YarnConfiguration.TIMELINE_SERVICE_HBASE_CONFIGURATION_FILE,
hbaseConfigPath);
Configuration hbaseConfFromLocal =
HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(conf);
assertEquals("test", hbaseConfFromLocal.get("input"),
"Failed to read hbase config from Local FileSystem");
} |
@Override
public List<ServiceDTO> getServiceInstances(String serviceId) {
Application application = eurekaClient.getApplication(serviceId);
if (application == null || CollectionUtils.isEmpty(application.getInstances())) {
Tracer.logEvent("Apollo.Discovery.NotFound", serviceId);
return Collections.emptyList();
}
return application.getInstances().stream().map(instanceInfoToServiceDTOFunc)
.collect(Collectors.toList());
} | @Test
public void testGetServiceInstancesWithEmptyInstances() {
when(eurekaClient.getApplication(someServiceId)).thenReturn(someApplication);
when(someApplication.getInstances()).thenReturn(new ArrayList<>());
assertTrue(defaultDiscoveryService.getServiceInstances(someServiceId).isEmpty());
} |
public CredentialRetriever googleApplicationDefaultCredentials() {
return () -> {
try {
if (imageReference.getRegistry().endsWith("gcr.io")
|| imageReference.getRegistry().endsWith("docker.pkg.dev")) {
GoogleCredentials googleCredentials = googleCredentialsProvider.get();
logger.accept(LogEvent.info("Google ADC found"));
if (googleCredentials.createScopedRequired()) { // not scoped if service account
// The short-lived OAuth2 access token to be generated from the service account with
// refreshIfExpired() below will have one-hour expiry (as of Aug 2019). Instead of using
// an access token, it is technically possible to use the service account private key to
// auth with GCR, but it does not worth writing complex code to achieve that.
logger.accept(LogEvent.info("ADC is a service account. Setting GCS read-write scope"));
List<String> scope = Collections.singletonList(OAUTH_SCOPE_STORAGE_READ_WRITE);
googleCredentials = googleCredentials.createScoped(scope);
}
googleCredentials.refreshIfExpired();
logGotCredentialsFrom("Google Application Default Credentials");
AccessToken accessToken = googleCredentials.getAccessToken();
// https://cloud.google.com/container-registry/docs/advanced-authentication#access_token
return Optional.of(Credential.from("oauth2accesstoken", accessToken.getTokenValue()));
}
} catch (IOException ex) { // Includes the case where ADC is simply not available.
logger.accept(
LogEvent.info("ADC not present or error fetching access token: " + ex.getMessage()));
}
return Optional.empty();
};
} | @Test
public void testGoogleApplicationDefaultCredentials_notGoogleContainerRegistry()
throws CredentialRetrievalException {
CredentialRetrieverFactory credentialRetrieverFactory =
createCredentialRetrieverFactory("non.gcr.registry", "repository");
Assert.assertFalse(
credentialRetrieverFactory.googleApplicationDefaultCredentials().retrieve().isPresent());
Mockito.verifyNoInteractions(mockLogger);
} |
@Override
protected Session<?> borrow(final Connection type) throws BackgroundException {
switch(type) {
case source:
return source.borrow(new BackgroundActionState() {
@Override
public boolean isCanceled() {
return ConcurrentTransferWorker.this.isCanceled();
}
@Override
public boolean isRunning() {
return true;
}
});
case destination:
return destination.borrow(new BackgroundActionState() {
@Override
public boolean isCanceled() {
return ConcurrentTransferWorker.this.isCanceled();
}
@Override
public boolean isRunning() {
return true;
}
});
}
return null;
} | @Test
public void testBorrow() throws Exception {
final Host host = new Host(new TestProtocol(), "test.cyberduck.ch");
final Transfer t = new UploadTransfer(host,
new Path("/t", EnumSet.of(Path.Type.directory)),
new NullLocal("l"));
final LoginConnectionService connection = new TestLoginConnectionService();
final DefaultSessionPool pool = new DefaultSessionPool(connection, new DisabledX509TrustManager(), new DefaultX509KeyManager(),
new DefaultVaultRegistry(new DisabledPasswordCallback()),
new DisabledTranscriptListener(), host);
final ConcurrentTransferWorker worker = new ConcurrentTransferWorker(
pool, SessionPool.DISCONNECTED, t, new TransferOptions(), new TransferSpeedometer(t),
new DisabledTransferPrompt(), new DisabledTransferErrorCallback(),
new DisabledLoginCallback(), new DisabledProgressListener(), new DisabledStreamListener(), new DisabledNotificationService()
);
assertNotSame(worker.borrow(ConcurrentTransferWorker.Connection.source), worker.borrow(ConcurrentTransferWorker.Connection.source));
worker.cleanup(true);
} |
@Override
public Map<String, Metric> getMetrics() {
return metricRegistry.getMetrics();
} | @Test
public void shouldReturnTotalNumberOfRequestsAs1ForSuccessAsync() {
AsyncHelloWorldService helloWorldService = mock(AsyncHelloWorldService.class);
ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor();
given(helloWorldService.returnHelloWorld())
.willReturn(completedFuture("Success"));
Retry retry = Retry.of("metrics", RetryConfig.<String>custom()
.retryExceptions(Exception.class)
.maxAttempts(5)
.build());
Supplier<CompletionStage<String>> supplier = Retry.decorateCompletionStage(retry, scheduler, helloWorldService::returnHelloWorld);
String result = awaitResult(supplier.get(), 5);
assertThat(retry.getMetrics().getNumberOfTotalCalls()).isEqualTo(1);
assertThat(result).isEqualTo("Success");
} |
public String stringify(boolean value) {
throw new UnsupportedOperationException(
"stringify(boolean) was called on a non-boolean stringifier: " + toString());
} | @Test
public void testUnsignedStringifier() {
PrimitiveStringifier stringifier = UNSIGNED_STRINGIFIER;
assertEquals("0", stringifier.stringify(0));
assertEquals("2147483647", stringifier.stringify(2147483647));
assertEquals("4294967295", stringifier.stringify(0xFFFFFFFF));
assertEquals("0", stringifier.stringify(0l));
assertEquals("9223372036854775807", stringifier.stringify(9223372036854775807l));
assertEquals("18446744073709551615", stringifier.stringify(0xFFFFFFFFFFFFFFFFl));
checkThrowingUnsupportedException(stringifier, Integer.TYPE, Long.TYPE);
} |
@Override
public void run() {
updateElasticSearchHealthStatus();
updateFileSystemMetrics();
} | @Test
public void elasticsearch_free_disk_space_is_updated() throws IOException {
URL esNodeResponseUrl = getClass().getResource("es-node-response.json");
String jsonPayload = StringUtils.trim(IOUtils.toString(esNodeResponseUrl, StandardCharsets.UTF_8));
JsonObject jsonObject = new Gson().fromJson(jsonPayload, JsonObject.class);
NodeStatsResponse nodeStats = NodeStatsResponse.toNodeStatsResponse(jsonObject);
when(esClient.nodesStats()).thenReturn(nodeStats);
underTest.run();
String nodeName = nodeStats.getNodeStats().get(0).getName();
verify(serverMonitoringMetrics, times(1)).setElasticSearchDiskSpaceFreeBytes(nodeName, 136144027648L);
verify(serverMonitoringMetrics, times(1)).setElasticSearchDiskSpaceTotalBytes(nodeName, 250685575168L);
// elasticsearch health status is not mocked in this test, so this part raise an exception
assertThat(logTester.logs()).hasSize(1);
assertThat(logTester.logs(Level.ERROR)).containsOnly("Failed to query ES status");
} |
@Override
public void execute(ComputationStep.Context context) {
new PathAwareCrawler<>(
FormulaExecutorComponentVisitor.newBuilder(metricRepository, measureRepository)
.buildFor(List.of(duplicationFormula)))
.visit(treeRootHolder.getRoot());
} | @Test
public void compute_and_aggregate_duplicated_blocks_from_single_duplication() {
addDuplicatedBlock(FILE_1_REF, 11);
addDuplicatedBlock(FILE_2_REF, 2);
addDuplicatedBlock(FILE_4_REF, 7);
setNewLines(FILE_1, FILE_2, FILE_3, FILE_4);
underTest.execute(new TestComputationStepContext());
assertRawMeasureValue(FILE_1_REF, NEW_BLOCKS_DUPLICATED_KEY, 10);
assertRawMeasureValue(FILE_2_REF, NEW_BLOCKS_DUPLICATED_KEY, 2);
assertRawMeasureValue(FILE_3_REF, NEW_BLOCKS_DUPLICATED_KEY, 0);
assertRawMeasureValue(FILE_4_REF, NEW_BLOCKS_DUPLICATED_KEY, 6);
assertRawMeasureValue(DIRECTORY_REF, NEW_BLOCKS_DUPLICATED_KEY, 12);
assertRawMeasureValue(ROOT_REF, NEW_BLOCKS_DUPLICATED_KEY, 18);
} |
static Cluster parseDescribeClusterResponse(DescribeClusterResponseData response) {
ApiError apiError = new ApiError(response.errorCode(), response.errorMessage());
if (apiError.isFailure()) {
throw apiError.exception();
}
if (response.endpointType() != EndpointType.CONTROLLER.id()) {
throw new MismatchedEndpointTypeException("Expected response from CONTROLLER " +
"endpoint, but got response from endpoint type " + (int) response.endpointType());
}
List<Node> nodes = new ArrayList<>();
Node controllerNode = null;
for (DescribeClusterResponseData.DescribeClusterBroker node : response.brokers()) {
Node newNode = new Node(node.brokerId(), node.host(), node.port(), node.rack());
nodes.add(newNode);
if (node.brokerId() == response.controllerId()) {
controllerNode = newNode;
}
}
return new Cluster(response.clusterId(),
nodes,
Collections.emptyList(),
Collections.emptySet(),
Collections.emptySet(),
controllerNode);
} | @Test
public void testParseDescribeClusterResponseWithUnexpectedEndpointType() {
assertThrows(MismatchedEndpointTypeException.class,
() -> KafkaAdminClient.parseDescribeClusterResponse(new DescribeClusterResponseData().
setEndpointType(EndpointType.BROKER.id())));
} |
@Override
public void exportData(JsonWriter writer) throws IOException {
// version tag at the root
writer.name(THIS_VERSION);
writer.beginObject();
// clients list
writer.name(CLIENTS);
writer.beginArray();
writeClients(writer);
writer.endArray();
writer.name(GRANTS);
writer.beginArray();
writeGrants(writer);
writer.endArray();
writer.name(WHITELISTEDSITES);
writer.beginArray();
writeWhitelistedSites(writer);
writer.endArray();
writer.name(BLACKLISTEDSITES);
writer.beginArray();
writeBlacklistedSites(writer);
writer.endArray();
writer.name(AUTHENTICATIONHOLDERS);
writer.beginArray();
writeAuthenticationHolders(writer);
writer.endArray();
writer.name(ACCESSTOKENS);
writer.beginArray();
writeAccessTokens(writer);
writer.endArray();
writer.name(REFRESHTOKENS);
writer.beginArray();
writeRefreshTokens(writer);
writer.endArray();
writer.name(SYSTEMSCOPES);
writer.beginArray();
writeSystemScopes(writer);
writer.endArray();
for (MITREidDataServiceExtension extension : extensions) {
if (extension.supportsVersion(THIS_VERSION)) {
extension.exportExtensionData(writer);
break;
}
}
writer.endObject(); // end mitreid-connect-1.3
} | @Test
public void testExportBlacklistedSites() throws IOException {
BlacklistedSite site1 = new BlacklistedSite();
site1.setId(1L);
site1.setUri("http://foo.com");
BlacklistedSite site2 = new BlacklistedSite();
site2.setId(2L);
site2.setUri("http://bar.com");
BlacklistedSite site3 = new BlacklistedSite();
site3.setId(3L);
site3.setUri("http://baz.com");
Set<BlacklistedSite> allBlacklistedSites = ImmutableSet.of(site1, site2, site3);
Mockito.when(clientRepository.getAllClients()).thenReturn(new HashSet<ClientDetailsEntity>());
Mockito.when(approvedSiteRepository.getAll()).thenReturn(new HashSet<ApprovedSite>());
Mockito.when(wlSiteRepository.getAll()).thenReturn(new HashSet<WhitelistedSite>());
Mockito.when(blSiteRepository.getAll()).thenReturn(allBlacklistedSites);
Mockito.when(authHolderRepository.getAll()).thenReturn(new ArrayList<AuthenticationHolderEntity>());
Mockito.when(tokenRepository.getAllAccessTokens()).thenReturn(new HashSet<OAuth2AccessTokenEntity>());
Mockito.when(tokenRepository.getAllRefreshTokens()).thenReturn(new HashSet<OAuth2RefreshTokenEntity>());
Mockito.when(sysScopeRepository.getAll()).thenReturn(new HashSet<SystemScope>());
// do the data export
StringWriter stringWriter = new StringWriter();
JsonWriter writer = new JsonWriter(stringWriter);
writer.beginObject();
dataService.exportData(writer);
writer.endObject();
writer.close();
// parse the output as a JSON object for testing
JsonElement elem = new JsonParser().parse(stringWriter.toString());
JsonObject root = elem.getAsJsonObject();
// make sure the root is there
assertThat(root.has(MITREidDataService.MITREID_CONNECT_1_3), is(true));
JsonObject config = root.get(MITREidDataService.MITREID_CONNECT_1_3).getAsJsonObject();
// make sure all the root elements are there
assertThat(config.has(MITREidDataService.CLIENTS), is(true));
assertThat(config.has(MITREidDataService.GRANTS), is(true));
assertThat(config.has(MITREidDataService.WHITELISTEDSITES), is(true));
assertThat(config.has(MITREidDataService.BLACKLISTEDSITES), is(true));
assertThat(config.has(MITREidDataService.REFRESHTOKENS), is(true));
assertThat(config.has(MITREidDataService.ACCESSTOKENS), is(true));
assertThat(config.has(MITREidDataService.SYSTEMSCOPES), is(true));
assertThat(config.has(MITREidDataService.AUTHENTICATIONHOLDERS), is(true));
// make sure the root elements are all arrays
assertThat(config.get(MITREidDataService.CLIENTS).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.GRANTS).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.WHITELISTEDSITES).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.BLACKLISTEDSITES).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.REFRESHTOKENS).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.ACCESSTOKENS).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.SYSTEMSCOPES).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.AUTHENTICATIONHOLDERS).isJsonArray(), is(true));
// check our scope list (this test)
JsonArray sites = config.get(MITREidDataService.BLACKLISTEDSITES).getAsJsonArray();
assertThat(sites.size(), is(3));
// check for both of our sites in turn
Set<BlacklistedSite> checked = new HashSet<>();
for (JsonElement e : sites) {
assertThat(e.isJsonObject(), is(true));
JsonObject site = e.getAsJsonObject();
BlacklistedSite compare = null;
if (site.get("id").getAsLong() == site1.getId().longValue()) {
compare = site1;
} else if (site.get("id").getAsLong() == site2.getId().longValue()) {
compare = site2;
} else if (site.get("id").getAsLong() == site3.getId().longValue()) {
compare = site3;
}
if (compare == null) {
fail("Could not find matching blacklisted site id: " + site.get("id").getAsString());
} else {
assertThat(site.get("uri").getAsString(), equalTo(compare.getUri()));
checked.add(compare);
}
}
// make sure all of our clients were found
assertThat(checked.containsAll(allBlacklistedSites), is(true));
} |
public MediaType detect(InputStream input, Metadata metadata) throws IOException {
if (input == null) {
return MediaType.OCTET_STREAM;
}
input.mark(offsetRangeEnd + length);
try {
int offset = 0;
// Skip bytes at the beginning, using skip() or read()
while (offset < offsetRangeBegin) {
long n = input.skip(offsetRangeBegin - offset);
if (n > 0) {
offset += n;
} else if (input.read() != -1) {
offset += 1;
} else {
return MediaType.OCTET_STREAM;
}
}
// Fill in the comparison window
byte[] buffer = new byte[length + (offsetRangeEnd - offsetRangeBegin)];
int n = input.read(buffer);
if (n > 0) {
offset += n;
}
while (n != -1 && offset < offsetRangeEnd + length) {
int bufferOffset = offset - offsetRangeBegin;
n = input.read(buffer, bufferOffset, buffer.length - bufferOffset);
// increment offset - in case not all read (see testDetectStreamReadProblems)
if (n > 0) {
offset += n;
}
}
if (this.isRegex) {
int flags = 0;
if (this.isStringIgnoreCase) {
flags = Pattern.CASE_INSENSITIVE;
}
Pattern p = Pattern.compile(new String(this.pattern, UTF_8), flags);
ByteBuffer bb = ByteBuffer.wrap(buffer);
CharBuffer result = ISO_8859_1.decode(bb);
Matcher m = p.matcher(result);
boolean match = false;
// Loop until we've covered the entire offset range
for (int i = 0; i <= offsetRangeEnd - offsetRangeBegin; i++) {
m.region(i, length + i);
match = m.lookingAt(); // match regex from start of region
if (match) {
return type;
}
}
} else {
if (offset < offsetRangeBegin + length) {
return MediaType.OCTET_STREAM;
}
// Loop until we've covered the entire offset range
for (int i = 0; i <= offsetRangeEnd - offsetRangeBegin; i++) {
boolean match = true;
int masked;
for (int j = 0; match && j < length; j++) {
masked = (buffer[i + j] & mask[j]);
if (this.isStringIgnoreCase) {
masked = Character.toLowerCase(masked);
}
match = (masked == pattern[j]);
}
if (match) {
return type;
}
}
}
return MediaType.OCTET_STREAM;
} finally {
input.reset();
}
} | @Test
public void testBZ2Detection() throws Exception {
Detector detector = new TikaConfig().getDetector();
for (String bz2 : new String[]{"bzip2-8-file.txt.bz2",
"empty-file.txt.bz2", "lbzip2-8-file.txt.bz2",
"small-file.txt.bz2", "test-file-1.csv.bz2",
"test-file-2.csv.bz2"}) {
assertEquals("application/x-bzip2", detect(detector, bz2));
}
} |
@Override
public Boolean isUsedInFetchArtifact(PipelineConfig pipelineConfig) {
List<FetchTask> fetchTasks = pipelineConfig.getFetchTasks();
for (FetchTask fetchTask : fetchTasks) {
if (pipelineName.equals(fetchTask.getDirectParentInAncestorPath()))
return true;
}
return false;
} | @Test
void shouldDetectDependencyMaterialNotUsedInFetchArtifact() {
DependencyMaterial material = new DependencyMaterial(new CaseInsensitiveString("pipeline-foo"), new CaseInsensitiveString("stage-bar"));
PipelineConfig pipelineConfig = mock(PipelineConfig.class);
ArrayList<FetchTask> fetchTasks = new ArrayList<>();
fetchTasks.add(new FetchTask(new CaseInsensitiveString("something"), new CaseInsensitiveString("new"), "src", "dest"));
fetchTasks.add(new FetchTask(new CaseInsensitiveString("another"), new CaseInsensitiveString("boo"), new CaseInsensitiveString("foo"), "src", "dest"));
when(pipelineConfig.getFetchTasks()).thenReturn(fetchTasks);
assertThat(material.isUsedInFetchArtifact(pipelineConfig)).isFalse();
} |
public static Class<?> getClassByName(String className) throws ClassNotFoundException {
return Class.forName(className, true, Thread.currentThread().getContextClassLoader());
} | @Test
public void testGetClassByName() throws ClassNotFoundException {
Assertions.assertEquals(String.class,
ReflectionUtil.getClassByName("java.lang.String"));
} |
@Override
public KTable<K, V> reduce(final Reducer<V> reducer) {
return reduce(reducer, Materialized.with(keySerde, valueSerde));
} | @Test
public void shouldNotHaveInvalidStoreNameOnReduce() {
assertThrows(TopologyException.class, () -> groupedStream.reduce(MockReducer.STRING_ADDER, Materialized.as(INVALID_STORE_NAME)));
} |
public FilterAggregationBuilder buildTopAggregation(String topAggregationName, TopAggregationDefinition<?> topAggregation,
Consumer<BoolQueryBuilder> extraFilters, Consumer<FilterAggregationBuilder> subAggregations) {
BoolQueryBuilder filter = filterComputer.getTopAggregationFilter(topAggregation)
.orElseGet(QueryBuilders::boolQuery);
// optionally add extra filter(s)
extraFilters.accept(filter);
FilterAggregationBuilder res = AggregationBuilders.filter(topAggregationName, filter);
subAggregations.accept(res);
checkState(
!res.getSubAggregations().isEmpty(),
"no sub-aggregation has been added to top-aggregation %s", topAggregationName);
return res;
} | @Test
public void buildTopAggregation_adds_filter_from_FiltersComputer_for_TopAggregation_and_extra_one() {
String topAggregationName = randomAlphabetic(10);
SimpleFieldTopAggregationDefinition topAggregation = new SimpleFieldTopAggregationDefinition("bar", false);
SimpleFieldTopAggregationDefinition otherTopAggregation = new SimpleFieldTopAggregationDefinition("acme", false);
BoolQueryBuilder computerFilter = boolQuery();
BoolQueryBuilder otherFilter = boolQuery();
BoolQueryBuilder extraFilter = boolQuery();
when(filtersComputer.getTopAggregationFilter(topAggregation)).thenReturn(Optional.of(computerFilter));
when(filtersComputer.getTopAggregationFilter(otherTopAggregation)).thenReturn(Optional.of(otherFilter));
MinAggregationBuilder subAggregation = AggregationBuilders.min("donut");
FilterAggregationBuilder aggregationBuilder = underTest.buildTopAggregation(topAggregationName, topAggregation,
t -> t.must(extraFilter), t -> t.subAggregation(subAggregation));
assertThat(aggregationBuilder.getName()).isEqualTo(topAggregationName);
assertThat(aggregationBuilder.getFilter()).isEqualTo(computerFilter);
assertThat(((BoolQueryBuilder) aggregationBuilder.getFilter()).must()).containsExactly(extraFilter);
} |
@Override
public String toString() {
return toStringHelper(getClass())
.add("nextHeader", Byte.toString(nextHeader))
.add("headerExtLength", Byte.toString(headerExtLength))
.add("routingType", Byte.toString(routingType))
.add("segmentsLeft", Byte.toString(segmentsLeft))
.add("routingData", Arrays.toString(routingData))
.toString();
} | @Test
public void testToStringRouting() throws Exception {
Routing routing = deserializer.deserialize(bytePacket, 0, bytePacket.length);
String str = routing.toString();
assertTrue(StringUtils.contains(str, "nextHeader=" + (byte) 0x11));
assertTrue(StringUtils.contains(str, "headerExtLength=" + (byte) 0x02));
assertTrue(StringUtils.contains(str, "routingType=" + (byte) 0x00));
assertTrue(StringUtils.contains(str, "segmentsLeft=" + (byte) 0x03));
assertTrue(StringUtils.contains(str, "routingData=" + Arrays.toString(routingData)));
} |
public static COSString parseHex(String hex) throws IOException
{
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
StringBuilder hexBuffer = new StringBuilder(hex.trim());
// if odd number then the last hex digit is assumed to be 0
if (hexBuffer.length() % 2 != 0)
{
hexBuffer.append('0');
}
int length = hexBuffer.length();
for (int i = 0; i < length; i += 2)
{
try
{
bytes.write(Integer.parseInt(hexBuffer.substring(i, i + 2), 16));
}
catch (NumberFormatException e)
{
if (FORCE_PARSING)
{
LOG.warn("Encountered a malformed hex string");
bytes.write('?'); // todo: what does Acrobat do? Any example PDFs?
}
else
{
throw new IOException("Invalid hex string: " + hex, e);
}
}
}
return new COSString(bytes.toByteArray());
} | @Test
void testFromHex()
{
String expected = "Quick and simple test";
String hexForm = createHex(expected);
try
{
COSString test1 = COSString.parseHex(hexForm);
writePDFTests("(" + expected + ")", test1);
COSString test2 = COSString.parseHex(createHex(ESC_CHAR_STRING));
writePDFTests("(" + ESC_CHAR_STRING_PDF_FORMAT + ")", test2);
}
catch (IOException e)
{
fail("IOException thrown: " + e.getMessage());
}
assertThrows(IOException.class, () -> COSString.parseHex(hexForm + "xx"),
"Should have thrown an IOException here");
} |
@Override
public Map<String, Metric> getMetrics() {
final Map<String, Metric> gauges = new HashMap<>();
for (final Thread.State state : Thread.State.values()) {
gauges.put(name(state.toString().toLowerCase(), "count"),
(Gauge<Object>) () -> getThreadCount(state));
}
gauges.put("count", (Gauge<Integer>) threads::getThreadCount);
gauges.put("daemon.count", (Gauge<Integer>) threads::getDaemonThreadCount);
gauges.put("peak.count", (Gauge<Integer>) threads::getPeakThreadCount);
gauges.put("total_started.count", (Gauge<Long>) threads::getTotalStartedThreadCount);
gauges.put("deadlock.count", (Gauge<Integer>) () -> deadlockDetector.getDeadlockedThreads().size());
gauges.put("deadlocks", (Gauge<Set<String>>) deadlockDetector::getDeadlockedThreads);
return Collections.unmodifiableMap(gauges);
} | @Test
public void hasAGaugeForTheNumberOfThreads() {
assertThat(((Gauge<?>) gauges.getMetrics().get("count")).getValue())
.isEqualTo(12);
} |
@Override
public Map<K, V> getCachedMap() {
return localCacheView.getCachedMap();
} | @Test
public void testFastPutIfAbsent() throws Exception {
RLocalCachedMap<SimpleKey, SimpleValue> map = redisson.getLocalCachedMap(LocalCachedMapOptions.name("test"));
Map<SimpleKey, SimpleValue> cache = map.getCachedMap();
SimpleKey key = new SimpleKey("1");
SimpleValue value = new SimpleValue("2");
map.put(key, value);
assertThat(map.fastPutIfAbsent(key, new SimpleValue("3"))).isFalse();
assertThat(cache.size()).isEqualTo(1);
assertThat(map.get(key)).isEqualTo(value);
SimpleKey key1 = new SimpleKey("2");
SimpleValue value1 = new SimpleValue("4");
assertThat(map.fastPutIfAbsent(key1, value1)).isTrue();
Thread.sleep(50);
assertThat(cache.size()).isEqualTo(2);
assertThat(map.get(key1)).isEqualTo(value1);
} |
@Override
public X process(T input, Context context) throws Exception {
if (!this.initialized) {
initialize(context);
}
// record must be PulsarFunctionRecord.
Record<T> record = (Record<T>) context.getCurrentRecord();
// windows function processing semantics requires separate processing
if (windowConfig.getProcessingGuarantees() == WindowConfig.ProcessingGuarantees.ATMOST_ONCE) {
record.ack();
}
if (isEventTime()) {
long ts = this.timestampExtractor.extractTimestamp(record.getValue());
if (this.waterMarkEventGenerator.track(record.getTopicName().get(), ts)) {
this.windowManager.add(record, ts, record);
} else {
if (this.windowConfig.getLateDataTopic() != null) {
context.newOutputMessage(this.windowConfig.getLateDataTopic(), null).value(input).sendAsync();
} else {
log.info(String.format(
"Received a late tuple %s with ts %d. This will not be " + "processed"
+ ".", input, ts));
}
}
} else {
this.windowManager.add(record, System.currentTimeMillis(), record);
}
return null;
} | @Test
public void testExecuteWithLateTupleStream() throws Exception {
windowConfig.setLateDataTopic("$late");
doReturn(Optional.of(new Gson().fromJson(new Gson().toJson(windowConfig), Map.class)))
.when(context).getUserConfigValue(WindowConfig.WINDOW_CONFIG_KEY);
TypedMessageBuilder typedMessageBuilder = mock(TypedMessageBuilder.class);
when(typedMessageBuilder.value(any())).thenReturn(typedMessageBuilder);
when(typedMessageBuilder.sendAsync()).thenReturn(CompletableFuture.anyOf());
when(context.newOutputMessage(anyString(), any())).thenReturn(typedMessageBuilder);
long[] timestamps = {603, 605, 607, 618, 626, 636, 600};
List<Long> events = new ArrayList<>(timestamps.length);
for (long ts : timestamps) {
events.add(ts);
Record<?> record = mock(Record.class);
doReturn(Optional.of("test-topic")).when(record).getTopicName();
doReturn(record).when(context).getCurrentRecord();
doReturn(ts).when(record).getValue();
testWindowedPulsarFunction.process(ts, context);
//Update the watermark to this timestamp
testWindowedPulsarFunction.waterMarkEventGenerator.run();
}
System.out.println(testWindowedPulsarFunction.windows);
long event = events.get(events.size() - 1);
} |
public void deleteSubscriptionGroup(final String addr, final String groupName, final boolean removeOffset,
final long timeoutMillis)
throws RemotingException, InterruptedException, MQClientException {
DeleteSubscriptionGroupRequestHeader requestHeader = new DeleteSubscriptionGroupRequestHeader();
requestHeader.setGroupName(groupName);
requestHeader.setCleanOffset(removeOffset);
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.DELETE_SUBSCRIPTIONGROUP, requestHeader);
RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr),
request, timeoutMillis);
assert response != null;
switch (response.getCode()) {
case ResponseCode.SUCCESS: {
return;
}
default:
break;
}
throw new MQClientException(response.getCode(), response.getRemark());
} | @Test
public void testDeleteSubscriptionGroup() throws RemotingException, InterruptedException, MQClientException {
mockInvokeSync();
mqClientAPI.deleteSubscriptionGroup(defaultBrokerAddr, "", true, defaultTimeout);
} |
@InvokeOnHeader(Web3jConstants.ETH_CALL)
void ethCall(Message message) throws IOException {
String fromAddress = message.getHeader(Web3jConstants.FROM_ADDRESS, configuration::getFromAddress, String.class);
String toAddress = message.getHeader(Web3jConstants.TO_ADDRESS, configuration::getToAddress, String.class);
BigInteger nonce = message.getHeader(Web3jConstants.NONCE, configuration::getNonce, BigInteger.class);
BigInteger gasPrice = message.getHeader(Web3jConstants.GAS_PRICE, configuration::getGasPrice, BigInteger.class);
BigInteger gasLimit = message.getHeader(Web3jConstants.GAS_LIMIT, configuration::getGasLimit, BigInteger.class);
BigInteger value = message.getHeader(Web3jConstants.VALUE, configuration::getValue, BigInteger.class);
String data = message.getHeader(Web3jConstants.DATA, configuration::getData, String.class);
DefaultBlockParameter atBlock
= toDefaultBlockParameter(message.getHeader(Web3jConstants.AT_BLOCK, configuration::getAtBlock, String.class));
org.web3j.protocol.core.methods.request.Transaction transaction
= new org.web3j.protocol.core.methods.request.Transaction(
fromAddress, nonce, gasPrice, gasLimit, toAddress, value, data);
Request<?, EthCall> request = web3j.ethCall(transaction, atBlock);
setRequestId(message, request);
EthCall response = request.send();
boolean hasError = checkForError(message, response);
if (!hasError) {
message.setBody(response.getValue());
}
} | @Test
public void ethCallTest() throws Exception {
EthCall response = Mockito.mock(EthCall.class);
Mockito.when(mockWeb3j.ethCall(any(), any())).thenReturn(request);
Mockito.when(request.send()).thenReturn(response);
Mockito.when(response.getValue()).thenReturn("test");
Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.ETH_CALL);
template.send(exchange);
String body = exchange.getIn().getBody(String.class);
assertEquals("test", body);
} |
@SuppressWarnings("unchecked")
public static <T> T deep(final T input) {
if (input instanceof Map<?, ?>) {
return (T) deepMap((Map<?, ?>) input);
} else if (input instanceof List<?>) {
return (T) deepList((List<?>) input);
} else if (input instanceof RubyString) {
// new instance but sharing ByteList (until either String is modified)
return (T) ((RubyString) input).dup();
} else if (input instanceof Collection<?>) {
throw new ClassCastException("unexpected Collection type " + input.getClass());
}
return input;
} | @Test // @Tag("Performance Optimization")
public void testRubyStringCloningMemoryOptimization() {
ByteList bytes = ByteList.create("0123456789");
RubyString original = RubyString.newString(RubyUtil.RUBY, bytes);
RubyString result = Cloner.deep(original);
assertNotSame(original, result);
assertSame(bytes, original.getByteList());
// NOTE: this is an implementation detail or the underlying sharing :
assertSame(bytes, result.getByteList()); // bytes-list shared
// but when string is modified it will stop using the same byte container
result.concat(RubyUtil.RUBY.getCurrentContext(), RubyUtil.RUBY.newString(" "));
assertNotSame(bytes, result.getByteList()); // byte-list copied on write
} |
public static String generateResourceId(
String baseString,
Pattern illegalChars,
String replaceChar,
int targetLength,
DateTimeFormatter timeFormat) {
// first, make sure the baseString, typically the test ID, is not empty
checkArgument(baseString.length() != 0, "baseString cannot be empty.");
// next, replace all illegal characters from given string with given replacement character
String illegalCharsRemoved =
illegalChars.matcher(baseString.toLowerCase()).replaceAll(replaceChar);
// finally, append the date/time and return the substring that does not exceed the length limit
LocalDateTime localDateTime = LocalDateTime.now(ZoneId.of(TIME_ZONE));
String timeAddOn = localDateTime.format(timeFormat);
return illegalCharsRemoved.subSequence(
0, min(targetLength - timeAddOn.length() - 1, illegalCharsRemoved.length()))
+ replaceChar
+ localDateTime.format(timeFormat);
} | @Test
public void testGenerateResourceIdShouldThrowErrorWithSingleLetterInput() {
String testBaseString = "";
assertThrows(
IllegalArgumentException.class,
() ->
generateResourceId(
testBaseString,
ILLEGAL_INSTANCE_CHARS,
REPLACE_INSTANCE_CHAR,
MAX_INSTANCE_ID_LENGTH,
TIME_FORMAT));
} |
void start() throws TransientKinesisException {
ImmutableMap.Builder<String, ShardRecordsIterator> shardsMap = ImmutableMap.builder();
for (ShardCheckpoint checkpoint : initialCheckpoint) {
shardsMap.put(checkpoint.getShardId(), createShardIterator(kinesis, checkpoint));
}
shardIteratorsMap.set(shardsMap.build());
if (!shardIteratorsMap.get().isEmpty()) {
int capacityPerShard =
read.getMaxCapacityPerShard() != null
? read.getMaxCapacityPerShard()
: DEFAULT_CAPACITY_PER_SHARD;
recordsQueue = new ArrayBlockingQueue<>(capacityPerShard * shardIteratorsMap.get().size());
String streamName = initialCheckpoint.getStreamName();
startReadingShards(shardIteratorsMap.get().values(), streamName);
} else {
// There are no shards to handle when restoring from an empty checkpoint. Empty checkpoints
// are generated when the last shard handled by this pool was closed
recordsQueue = new ArrayBlockingQueue<>(1);
}
} | @Test
public void shouldStopReadersPoolWhenLastShardReaderStopped() throws Exception {
when(firstIterator.readNextBatch()).thenThrow(KinesisShardClosedException.class);
when(firstIterator.findSuccessiveShardRecordIterators()).thenReturn(Collections.emptyList());
shardReadersPool.start();
verify(firstIterator, timeout(TIMEOUT_IN_MILLIS).times(1)).readNextBatch();
} |
public static ProxyProvider.TypeSpec builder() {
return new ProxyProvider.Build();
} | @Test
void connectTimeoutWithDefault() {
ProxyProvider provider = ProxyProvider.builder()
.type(ProxyProvider.Proxy.SOCKS5)
.socketAddress(ADDRESS_1)
.build();
assertThat(provider.connectTimeoutMillis).isEqualTo(10000);
} |
@Override
public String put(String key, String value) {
if (value == null) throw new IllegalArgumentException("Null value not allowed as an environment variable: " + key);
return super.put(key, value);
} | @Test
public void overrideOrderCalculatorSimple() {
EnvVars env = new EnvVars();
EnvVars overrides = new EnvVars();
overrides.put("A", "NoReference");
overrides.put("A+B", "NoReference");
overrides.put("B", "Refer1${A}");
overrides.put("C", "Refer2${B}");
overrides.put("D", "Refer3${B}${Nosuch}");
OverrideOrderCalculator calc = new OverrideOrderCalculator(env, overrides);
List<String> order = calc.getOrderedVariableNames();
assertEquals(Arrays.asList("A", "B", "C", "D", "A+B"), order);
} |
@Override
public void setViewFragmentName(View view, String fragmentName) {
} | @Test
public void setViewFragmentName() {
View view = new View(mApplication);
mSensorsAPI.setViewFragmentName(view, "com.sensorsdata.fragment");
Object tag = view.getTag(R.id.sensors_analytics_tag_view_fragment_name2);
Assert.assertNull(tag);
} |
@Override
public void execute(String commandName, BufferedReader reader, BufferedWriter writer)
throws Py4JException, IOException {
String targetObjectId = reader.readLine();
String methodName = reader.readLine();
List<Object> arguments = getArguments(reader);
ReturnObject returnObject = invokeMethod(methodName, targetObjectId, arguments);
String returnCommand = Protocol.getOutputCommand(returnObject);
logger.finest("Returning command: " + returnCommand);
writer.write(returnCommand);
writer.flush();
} | @Test
public void testStatic() {
String inputCommand = "z:java.lang.String\nvalueOf\ni123\ne\n";
try {
command.execute("c", new BufferedReader(new StringReader(inputCommand)), writer);
assertEquals("!ys123\n", sWriter.toString());
} catch (Exception e) {
e.printStackTrace();
fail();
}
} |
@Udf(description = "Converts a string representation of a time in the given format"
+ " into the TIME value.")
public Time parseTime(
@UdfParameter(
description = "The string representation of a time.") final String formattedTime,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.time.format.DateTimeFormatter.") final String formatPattern) {
if (formattedTime == null | formatPattern == null) {
return null;
}
try {
final TemporalAccessor ta = formatters.get(formatPattern).parse(formattedTime);
final Optional<ChronoField> dateField = Arrays.stream(ChronoField.values())
.filter(ChronoField::isDateBased)
.filter(ta::isSupported)
.findFirst();
if (dateField.isPresent()) {
throw new KsqlFunctionException("Time format contains date field.");
}
return new Time(TimeUnit.NANOSECONDS.toMillis(LocalTime.from(ta).toNanoOfDay()));
} catch (ExecutionException | RuntimeException e) {
throw new KsqlFunctionException("Failed to parse time '" + formattedTime
+ "' with formatter '" + formatPattern
+ "': " + e.getMessage(), e);
}
} | @Test
public void shouldThrowIfParseFails() {
// When:
final Exception e = assertThrows(
KsqlFunctionException.class,
() -> udf.parseTime("invalid", "HHmmss")
);
// Then:
assertThat(e.getMessage(), containsString("Failed to parse time 'invalid' with formatter 'HHmmss'"));
} |
public void deleteStatusOverride(final String appName, final String id, final InstanceInfo info) {
long expiryTime = System.currentTimeMillis() + maxProcessingDelayMs;
batchingDispatcher.process(
taskId("deleteStatusOverride", appName, id),
new InstanceReplicationTask(targetHost, Action.DeleteStatusOverride, info, null, false) {
@Override
public EurekaHttpResponse<Void> execute() {
return replicationClient.deleteStatusOverride(appName, id, info);
}
},
expiryTime);
} | @Test
public void testDeleteStatusOverrideBatchReplication() throws Throwable {
createPeerEurekaNode().deleteStatusOverride(instanceInfo.getAppName(), instanceInfo.getId(), instanceInfo);
ReplicationInstance replicationInstance = expectSingleBatchRequest();
assertThat(replicationInstance.getAction(), is(equalTo(Action.DeleteStatusOverride)));
} |
public static FunctionDetails convert(SourceConfig sourceConfig, ExtractedSourceDetails sourceDetails)
throws IllegalArgumentException {
FunctionDetails.Builder functionDetailsBuilder = FunctionDetails.newBuilder();
boolean isBuiltin = !StringUtils.isEmpty(sourceConfig.getArchive()) && sourceConfig.getArchive()
.startsWith(org.apache.pulsar.common.functions.Utils.BUILTIN);
if (sourceConfig.getTenant() != null) {
functionDetailsBuilder.setTenant(sourceConfig.getTenant());
}
if (sourceConfig.getNamespace() != null) {
functionDetailsBuilder.setNamespace(sourceConfig.getNamespace());
}
if (sourceConfig.getName() != null) {
functionDetailsBuilder.setName(sourceConfig.getName());
}
if (sourceConfig.getLogTopic() != null) {
functionDetailsBuilder.setLogTopic(sourceConfig.getLogTopic());
}
functionDetailsBuilder.setRuntime(FunctionDetails.Runtime.JAVA);
if (sourceConfig.getParallelism() != null) {
functionDetailsBuilder.setParallelism(sourceConfig.getParallelism());
} else {
functionDetailsBuilder.setParallelism(1);
}
functionDetailsBuilder.setClassName(IdentityFunction.class.getName());
functionDetailsBuilder.setAutoAck(true);
if (sourceConfig.getProcessingGuarantees() != null) {
functionDetailsBuilder.setProcessingGuarantees(
convertProcessingGuarantee(sourceConfig.getProcessingGuarantees()));
}
// set source spec
Function.SourceSpec.Builder sourceSpecBuilder = Function.SourceSpec.newBuilder();
if (sourceDetails.getSourceClassName() != null) {
sourceSpecBuilder.setClassName(sourceDetails.getSourceClassName());
}
if (isBuiltin) {
String builtin = sourceConfig.getArchive().replaceFirst("^builtin://", "");
sourceSpecBuilder.setBuiltin(builtin);
}
Map<String, Object> configs = new HashMap<>();
if (sourceConfig.getConfigs() != null) {
configs.putAll(sourceConfig.getConfigs());
}
// Batch source handling
if (sourceConfig.getBatchSourceConfig() != null) {
configs.put(BatchSourceConfig.BATCHSOURCE_CONFIG_KEY,
new Gson().toJson(sourceConfig.getBatchSourceConfig()));
configs.put(BatchSourceConfig.BATCHSOURCE_CLASSNAME_KEY, sourceSpecBuilder.getClassName());
sourceSpecBuilder.setClassName("org.apache.pulsar.functions.source.batch.BatchSourceExecutor");
}
sourceSpecBuilder.setConfigs(new Gson().toJson(configs));
if (sourceConfig.getSecrets() != null && !sourceConfig.getSecrets().isEmpty()) {
functionDetailsBuilder.setSecretsMap(new Gson().toJson(sourceConfig.getSecrets()));
}
if (sourceDetails.getTypeArg() != null) {
sourceSpecBuilder.setTypeClassName(sourceDetails.getTypeArg());
}
functionDetailsBuilder.setSource(sourceSpecBuilder);
// set up sink spec.
// Sink spec classname should be empty so that the default pulsar sink will be used
Function.SinkSpec.Builder sinkSpecBuilder = Function.SinkSpec.newBuilder();
if (!org.apache.commons.lang3.StringUtils.isEmpty(sourceConfig.getSchemaType())) {
sinkSpecBuilder.setSchemaType(sourceConfig.getSchemaType());
}
if (!org.apache.commons.lang3.StringUtils.isEmpty(sourceConfig.getSerdeClassName())) {
sinkSpecBuilder.setSerDeClassName(sourceConfig.getSerdeClassName());
}
if (!isEmpty(sourceConfig.getTopicName())) {
sinkSpecBuilder.setTopic(sourceConfig.getTopicName());
}
if (sourceDetails.getTypeArg() != null) {
sinkSpecBuilder.setTypeClassName(sourceDetails.getTypeArg());
}
if (sourceConfig.getProducerConfig() != null) {
sinkSpecBuilder.setProducerSpec(convertProducerConfigToProducerSpec(sourceConfig.getProducerConfig()));
}
if (sourceConfig.getBatchBuilder() != null) {
Function.ProducerSpec.Builder builder = sinkSpecBuilder.getProducerSpec() != null
? sinkSpecBuilder.getProducerSpec().toBuilder()
: Function.ProducerSpec.newBuilder();
sinkSpecBuilder.setProducerSpec(builder.setBatchBuilder(sourceConfig.getBatchBuilder()).build());
}
sinkSpecBuilder.setForwardSourceMessageProperty(true);
functionDetailsBuilder.setSink(sinkSpecBuilder);
// use default resources if resources not set
Resources resources = Resources.mergeWithDefault(sourceConfig.getResources());
Function.Resources.Builder bldr = Function.Resources.newBuilder();
bldr.setCpu(resources.getCpu());
bldr.setRam(resources.getRam());
bldr.setDisk(resources.getDisk());
functionDetailsBuilder.setResources(bldr);
if (!org.apache.commons.lang3.StringUtils.isEmpty(sourceConfig.getRuntimeFlags())) {
functionDetailsBuilder.setRuntimeFlags(sourceConfig.getRuntimeFlags());
}
functionDetailsBuilder.setComponentType(FunctionDetails.ComponentType.SOURCE);
if (!StringUtils.isEmpty(sourceConfig.getCustomRuntimeOptions())) {
functionDetailsBuilder.setCustomRuntimeOptions(sourceConfig.getCustomRuntimeOptions());
}
return FunctionConfigUtils.validateFunctionDetails(functionDetailsBuilder.build());
} | @Test
public void testSupportsBatchBuilderDefinedInProducerConfigWhenTopLevelBatchBuilderIsUndefined() {
SourceConfig sourceConfig = createSourceConfig();
sourceConfig.setBatchBuilder(null);
sourceConfig.getProducerConfig().setBatchBuilder("KEY_BASED");
Function.FunctionDetails functionDetails =
SourceConfigUtils.convert(sourceConfig, new SourceConfigUtils.ExtractedSourceDetails(null, null));
assertEquals(functionDetails.getSink().getProducerSpec().getBatchBuilder(), "KEY_BASED");
} |
public boolean matches(CaseInsensitiveString templateName) {
return this.name.equals(templateName);
} | @Test
public void shouldIgnoreCaseWhileMatchingATemplateWithName() {
assertThat(new PipelineTemplateConfig(new CaseInsensitiveString("FOO")).matches(new CaseInsensitiveString("foo")), is(true));
assertThat(new PipelineTemplateConfig(new CaseInsensitiveString("FOO")).matches(new CaseInsensitiveString("FOO")), is(true));
assertThat(new PipelineTemplateConfig(new CaseInsensitiveString("FOO")).matches(new CaseInsensitiveString("bar")), is(false));
} |
protected static TransferItem resolve(final Path remote, final Local local, final boolean append) {
if(local.isDirectory()) {
// Local path resolves to folder
if(remote.isDirectory()) {
if(append) {
return new TransferItem(new Path(remote, local.getName(), EnumSet.of(Path.Type.directory)), local);
}
}
// Append local name to remote target
return new TransferItem(remote, local);
}
// Local path resolves to file
if(remote.isDirectory()) {
// Append local name to remote target
return new TransferItem(new Path(remote, local.getName(), EnumSet.of(Path.Type.file)), local);
}
// Keep from input for file transfer
return new TransferItem(remote, local);
} | @Test
public void testResolveFileToFolder() {
final Local temp = new FlatTemporaryFileService().create(new AlphanumericRandomStringService().random());
final Path folder = new Path("/d", EnumSet.of(Path.Type.directory));
final TransferItem item = UploadTransferItemFinder.resolve(folder, temp, false);
assertEquals(new Path("/d/" + temp.getName(), EnumSet.of(Path.Type.file)), item.remote);
assertEquals(temp, item.local);
} |
public static <T> CsvReaderFormat<T> forPojo(Class<T> pojoType) {
return forSchema(
JacksonMapperFactory::createCsvMapper,
mapper -> mapper.schemaFor(pojoType).withoutQuoteChar(),
TypeInformation.of(pojoType));
} | @Test
void testForPojoSerializability() throws IOException, ClassNotFoundException {
final CsvReaderFormat<Pojo> format = CsvReaderFormat.forPojo(Pojo.class);
final byte[] bytes = InstantiationUtil.serializeObject(format);
InstantiationUtil.deserializeObject(bytes, CsvReaderFormatTest.class.getClassLoader());
} |
@Override
public <T> T clone(T object) {
if (object instanceof String) {
return object;
} else if (object instanceof Collection) {
Object firstElement = findFirstNonNullElement((Collection) object);
if (firstElement != null && !(firstElement instanceof Serializable)) {
JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), firstElement.getClass());
return objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type);
}
} else if (object instanceof Map) {
Map.Entry firstEntry = this.findFirstNonNullEntry((Map) object);
if (firstEntry != null) {
Object key = firstEntry.getKey();
Object value = firstEntry.getValue();
if (!(key instanceof Serializable) || !(value instanceof Serializable)) {
JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), key.getClass(), value.getClass());
return (T) objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type);
}
}
} else if (object instanceof JsonNode) {
return (T) ((JsonNode) object).deepCopy();
}
if (object instanceof Serializable) {
try {
return (T) SerializationHelper.clone((Serializable) object);
} catch (SerializationException e) {
//it is possible that object itself implements java.io.Serializable, but underlying structure does not
//in this case we switch to the other JSON marshaling strategy which doesn't use the Java serialization
}
}
return jsonClone(object);
} | @Test
public void should_clone_map_of_serializable_key_and_value() {
Map<String, SerializableObject> original = new HashMap<>();
original.put("key", new SerializableObject("value"));
Object cloned = serializer.clone(original);
assertEquals(original, cloned);
assertNotSame(original, cloned);
} |
public boolean isSubscriberRegistered(String serviceName, String groupName, String cluster) {
String key = ServiceInfo.getKey(NamingUtils.getGroupedName(serviceName, groupName), cluster);
synchronized (subscribes) {
SubscriberRedoData redoData = subscribes.get(key);
return null != redoData && redoData.isRegistered();
}
} | @Test
void testIsSubscriberRegistered() {
assertFalse(redoService.isSubscriberRegistered(SERVICE, GROUP, CLUSTER));
redoService.cacheSubscriberForRedo(SERVICE, GROUP, CLUSTER);
redoService.subscriberRegistered(SERVICE, GROUP, CLUSTER);
assertTrue(redoService.isSubscriberRegistered(SERVICE, GROUP, CLUSTER));
} |
@Override
@NonNull
public Predicate<E> convert(SelectorCriteria criteria) {
// current we only support name field.
return ext -> {
if ("name".equals(criteria.key())) {
var name = ext.getMetadata().getName();
if (name == null) {
return false;
}
switch (criteria.operator()) {
case Equals, IN -> {
return criteria.values().contains(name);
}
case NotEquals -> {
return !criteria.values().contains(name);
}
default -> {
return false;
}
}
}
return false;
};
} | @Test
void shouldReturnAlwaysFalseIfCriteriaKeyNotSupported() {
var criteria =
new SelectorCriteria("unsupported-field", Operator.Equals, Set.of("value1", "value2"));
var predicate = converter.convert(criteria);
assertNotNull(predicate);
assertFalse(predicate.test(mock(Extension.class)));
} |
@SuppressWarnings("WeakerAccess")
public Serde<?> defaultKeySerde() {
final Object keySerdeConfigSetting = get(DEFAULT_KEY_SERDE_CLASS_CONFIG);
if (keySerdeConfigSetting == null) {
throw new ConfigException("Please specify a key serde or set one through StreamsConfig#DEFAULT_KEY_SERDE_CLASS_CONFIG");
}
try {
final Serde<?> serde = getConfiguredInstance(DEFAULT_KEY_SERDE_CLASS_CONFIG, Serde.class);
serde.configure(originals(), true);
return serde;
} catch (final Exception e) {
throw new StreamsException(
String.format("Failed to configure key serde %s", keySerdeConfigSetting), e);
}
} | @Test
public void shouldSpecifyCorrectKeySerdeClassOnError() {
final Properties props = getStreamsConfig();
props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, MisconfiguredSerde.class);
final StreamsConfig config = new StreamsConfig(props);
try {
config.defaultKeySerde();
fail("Test should throw a StreamsException");
} catch (final StreamsException e) {
assertEquals(
"Failed to configure key serde class org.apache.kafka.streams.StreamsConfigTest$MisconfiguredSerde",
e.getMessage()
);
}
} |
@Override
public Optional<Measure> getRawMeasure(final Component component, final Metric metric) {
// fail fast
requireNonNull(component);
requireNonNull(metric);
return find(component, metric);
} | @Test
public void getRawMeasure_throws_NPE_without_reading_batch_report_if_component_arg_is_null() {
try {
underTestWithMock.getRawMeasure(null, metric1);
fail("an NPE should have been raised");
} catch (NullPointerException e) {
verifyNoMoreInteractions(mockBatchReportReader);
}
} |
@Override
public ObjectNode encode(Criterion criterion, CodecContext context) {
EncodeCriterionCodecHelper encoder = new EncodeCriterionCodecHelper(criterion, context);
return encoder.encode();
} | @Test
public void matchEthDstMaskTest() {
Criterion criterion = Criteria.matchEthDstMasked(mcastMac, mcastMacMask);
ObjectNode result = criterionCodec.encode(criterion, context);
assertThat(result, matchesCriterion(criterion));
} |
public static MapBackedDMNContext of(Map<String, Object> ctx) {
return new MapBackedDMNContext(ctx);
} | @Test
void emptyContext() {
MapBackedDMNContext ctx1 = MapBackedDMNContext.of(new HashMap<>(Collections.emptyMap()));
testCloneAndAlter(ctx1, Collections.emptyMap(), Collections.emptyMap());
MapBackedDMNContext ctx2 = MapBackedDMNContext.of(new HashMap<>(Collections.emptyMap()));
testPushAndPopScope(ctx2, Collections.emptyMap(), Collections.emptyMap());
} |
@Override
public List<RemoteInstance> queryRemoteNodes() {
List<RemoteInstance> remoteInstances = new ArrayList<>();
try {
List<Instance> instances = namingService.selectInstances(config.getServiceName(), true);
if (CollectionUtils.isNotEmpty(instances)) {
instances.forEach(instance -> {
Address address = new Address(instance.getIp(), instance.getPort(), false);
if (address.equals(selfAddress)) {
address.setSelf(true);
}
remoteInstances.add(new RemoteInstance(address));
});
}
ClusterHealthStatus healthStatus = OAPNodeChecker.isHealth(remoteInstances);
if (healthStatus.isHealth()) {
this.healthChecker.health();
} else {
this.healthChecker.unHealth(healthStatus.getReason());
}
} catch (Throwable e) {
healthChecker.unHealth(e);
throw new ServiceQueryException(e.getMessage());
}
if (log.isDebugEnabled()) {
log.debug("Nacos cluster instances:{}", remoteInstances);
}
return remoteInstances;
} | @Test
@SuppressWarnings("unchecked")
public void queryRemoteNodesWithNonOrEmpty() throws NacosException {
when(namingService.selectInstances(anyString(), anyBoolean())).thenReturn(null, Collections.emptyList());
assertEquals(0, coordinator.queryRemoteNodes().size());
} |
@Override
public QueuedCommandStatus enqueueCommand(
final CommandId commandId,
final Command command,
final Producer<CommandId, Command> transactionalProducer
) {
final CommandStatusFuture statusFuture = commandStatusMap.compute(
commandId,
(k, v) -> {
if (v == null) {
return new CommandStatusFuture(commandId);
}
// We should fail registration if a future is already registered, to prevent
// a caller from receiving a future for a different statement.
throw new IllegalStateException(
String.format(
"Another command with the same id (%s) is being executed.",
commandId)
);
}
);
try {
final ProducerRecord<CommandId, Command> producerRecord = new ProducerRecord<>(
commandTopicName,
COMMAND_TOPIC_PARTITION,
commandId,
command);
final RecordMetadata recordMetadata =
transactionalProducer.send(producerRecord).get();
return new QueuedCommandStatus(recordMetadata.offset(), statusFuture);
} catch (final Exception e) {
commandStatusMap.remove(commandId);
throw new KsqlStatementException(
"Could not write the statement into the command topic.",
String.format(
"Could not write the statement '%s' into the command topic.",
QueryMask.getMaskedStatement(command.getStatement())
),
QueryMask.getMaskedStatement(command.getStatement()),
KsqlStatementException.Problem.OTHER,
e
);
}
} | @Test
public void shouldDistributeCommand() {
when(transactionalProducer.send(any(ProducerRecord.class))).thenReturn(testFuture);
// When:
commandStore.enqueueCommand(commandId, command, transactionalProducer);
// Then:
verify(transactionalProducer).send(new ProducerRecord<>(
COMMAND_TOPIC_NAME,
COMMAND_TOPIC_PARTITION.partition(),
commandId,
command
));
} |
public static <K, E> Collector<E, ImmutableSetMultimap.Builder<K, E>, ImmutableSetMultimap<K, E>> unorderedIndex(Function<? super E, K> keyFunction) {
return unorderedIndex(keyFunction, Function.identity());
} | @Test
public void unorderedIndex_with_valueFunction_fails_if_key_function_is_null() {
assertThatThrownBy(() -> unorderedIndex(null, MyObj::getText))
.isInstanceOf(NullPointerException.class)
.hasMessage("Key function can't be null");
} |
public StatementExecutorResponse execute(
final ConfiguredStatement<? extends Statement> statement,
final KsqlExecutionContext executionContext,
final KsqlSecurityContext securityContext
) {
final String commandRunnerWarningString = commandRunnerWarning.get();
if (!commandRunnerWarningString.equals("")) {
throw new KsqlServerException("Failed to handle Ksql Statement."
+ System.lineSeparator()
+ commandRunnerWarningString);
}
final InjectorWithSideEffects injector = InjectorWithSideEffects.wrap(
injectorFactory.apply(executionContext, securityContext.getServiceContext()));
final ConfiguredStatementWithSideEffects<?> injectedWithSideEffects =
injector.injectWithSideEffects(statement);
try {
return executeInjected(
injectedWithSideEffects.getStatement(),
statement,
executionContext,
securityContext);
} catch (Exception e) {
injector.revertSideEffects(injectedWithSideEffects);
throw e;
}
} | @Test
public void shouldNotEnqueueRedundantIfNotExists() {
// Given:
final PreparedStatement<Statement> preparedStatement =
PreparedStatement.of("", new CreateStream(
SourceName.of("TEST"),
TableElements.of(),
false,
true,
CreateSourceProperties.from(ImmutableMap.of(
CommonCreateConfigs.KAFKA_TOPIC_NAME_PROPERTY, new StringLiteral("topic"),
CommonCreateConfigs.VALUE_FORMAT_PROPERTY, new StringLiteral("json")
)),
false
));
final ConfiguredStatement<Statement> configured =
ConfiguredStatement.of(preparedStatement, SessionConfig.of(KSQL_CONFIG, ImmutableMap.of())
);
final DataSource dataSource = mock(DataSource.class);
doReturn(dataSource).when(metaStore).getSource(SourceName.of("TEST"));
// When:
final StatementExecutorResponse response = distributor.execute(configured, executionContext, securityContext);
// Then:
assertThat("Should be present", response.getEntity().isPresent());
assertThat(((WarningEntity) response.getEntity().get()).getMessage(), containsString(""));
} |
public static ProcessingGuarantee min(ProcessingGuarantee g1, ProcessingGuarantee g2) {
return g1.ordinal() < g2.ordinal() ? g1 : g2;
} | @Test
public void test_minGuarantee() {
assertEquals(NONE, Util.min(NONE, AT_LEAST_ONCE));
assertEquals(AT_LEAST_ONCE, Util.min(AT_LEAST_ONCE, EXACTLY_ONCE));
assertEquals(NONE, Util.min(NONE, EXACTLY_ONCE));
assertEquals(NONE, Util.min(NONE, NONE));
} |
@Override
public void deleteCouponTemplate(Long id) {
// 校验存在
validateCouponTemplateExists(id);
// 删除
couponTemplateMapper.deleteById(id);
} | @Test
public void testDeleteCouponTemplate_notExists() {
// 准备参数
Long id = randomLongId();
// 调用, 并断言异常
assertServiceException(() -> couponTemplateService.deleteCouponTemplate(id), COUPON_TEMPLATE_NOT_EXISTS);
} |
@Override
public V put(K key, V value, Duration ttl) {
return get(putAsync(key, value, ttl));
} | @Test
public void testExpireAt() throws InterruptedException {
RMapCacheNative<String, String> cache = redisson.getMapCacheNative("simple");
cache.put("0", "8", Duration.ofSeconds(1));
cache.expireAt(System.currentTimeMillis() + 100);
Thread.sleep(500);
Assertions.assertEquals(0, cache.size());
cache.destroy();
} |
@Override
public AuthenticationResult authenticate(final ChannelHandlerContext context, final PacketPayload payload) {
AuthorityRule rule = ProxyContext.getInstance().getContextManager().getMetaDataContexts().getMetaData().getGlobalRuleMetaData().getSingleRule(AuthorityRule.class);
if (MySQLConnectionPhase.AUTH_PHASE_FAST_PATH == connectionPhase) {
currentAuthResult = authenticatePhaseFastPath(context, payload, rule);
if (!currentAuthResult.isFinished()) {
return currentAuthResult;
}
} else if (MySQLConnectionPhase.AUTHENTICATION_METHOD_MISMATCH == connectionPhase) {
authenticateMismatchedMethod((MySQLPacketPayload) payload);
}
Grantee grantee = new Grantee(currentAuthResult.getUsername(), getHostAddress(context));
if (!login(rule, grantee, authResponse)) {
throw new AccessDeniedException(currentAuthResult.getUsername(), grantee.getHostname(), 0 != authResponse.length);
}
if (!authorizeDatabase(rule, grantee, currentAuthResult.getDatabase())) {
throw new DatabaseAccessDeniedException(currentAuthResult.getUsername(), grantee.getHostname(), currentAuthResult.getDatabase());
}
writeOKPacket(context);
return AuthenticationResultBuilder.finished(grantee.getUsername(), grantee.getHostname(), currentAuthResult.getDatabase());
} | @Test
void assertAuthenticateFailedWithInvalidDatabase() {
AuthorityRule rule = mock(AuthorityRule.class);
when(rule.getAuthenticatorType(any())).thenReturn("");
setConnectionPhase(MySQLConnectionPhase.AUTH_PHASE_FAST_PATH);
ChannelHandlerContext context = mockChannelHandlerContext();
ContextManager contextManager = mockContextManager(rule);
when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager);
try (MockedConstruction<MySQLErrPacket> ignored = mockConstruction(MySQLErrPacket.class, (mock, mockContext) -> assertInvalidDatabaseErrorPacket(mockContext.arguments()))) {
assertThrows(UnknownDatabaseException.class, () -> authenticationEngine.authenticate(context, getPayload("root", "invalid_db", authResponse)));
}
} |
void runOnce() {
if (transactionManager != null) {
try {
transactionManager.maybeResolveSequences();
RuntimeException lastError = transactionManager.lastError();
// do not continue sending if the transaction manager is in a failed state
if (transactionManager.hasFatalError()) {
if (lastError != null)
maybeAbortBatches(lastError);
client.poll(retryBackoffMs, time.milliseconds());
return;
}
if (transactionManager.hasAbortableError() && shouldHandleAuthorizationError(lastError)) {
return;
}
// Check whether we need a new producerId. If so, we will enqueue an InitProducerId
// request which will be sent below
transactionManager.bumpIdempotentEpochAndResetIdIfNeeded();
if (maybeSendAndPollTransactionalRequest()) {
return;
}
} catch (AuthenticationException e) {
// This is already logged as error, but propagated here to perform any clean ups.
log.trace("Authentication exception while processing transactional request", e);
transactionManager.authenticationFailed(e);
}
}
long currentTimeMs = time.milliseconds();
long pollTimeout = sendProducerData(currentTimeMs);
client.poll(pollTimeout, currentTimeMs);
} | @Test
public void testRecordsFlushedImmediatelyOnTransactionCompletion() throws Exception {
try (Metrics m = new Metrics()) {
int lingerMs = 50;
SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m);
TransactionManager txnManager = new TransactionManager(logContext, "txnId", 6000, 100, apiVersions);
setupWithTransactionState(txnManager, lingerMs);
Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL,
1, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, txnManager, apiVersions);
// Begin a transaction and successfully add one partition to it.
ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0);
doInitTransactions(txnManager, producerIdAndEpoch);
txnManager.beginTransaction();
addPartitionToTxn(sender, txnManager, tp0);
// Send a couple records and assert that they are not sent immediately (due to linger).
appendToAccumulator(tp0);
appendToAccumulator(tp0);
sender.runOnce();
assertFalse(client.hasInFlightRequests());
// Now begin the commit and assert that the Produce request is sent immediately
// without waiting for the linger.
TransactionalRequestResult commitResult = txnManager.beginCommit();
runUntil(sender, client::hasInFlightRequests);
// Respond to the produce request and wait for the EndTxn request to be sent.
respondToProduce(tp0, Errors.NONE, 1L);
runUntil(sender, txnManager::hasInFlightRequest);
// Respond to the expected EndTxn request.
respondToEndTxn(Errors.NONE);
runUntil(sender, txnManager::isReady);
assertTrue(commitResult.isSuccessful());
commitResult.await();
// Finally, we want to assert that the linger time is still effective
// when the new transaction begins.
txnManager.beginTransaction();
addPartitionToTxn(sender, txnManager, tp0);
appendToAccumulator(tp0);
appendToAccumulator(tp0);
time.sleep(lingerMs - 1);
sender.runOnce();
assertFalse(client.hasInFlightRequests());
assertTrue(accumulator.hasUndrained());
time.sleep(1);
runUntil(sender, client::hasInFlightRequests);
assertFalse(accumulator.hasUndrained());
}
} |
protected void handleLostAssignments(ConnectorsAndTasks lostAssignments,
ConnectorsAndTasks.Builder lostAssignmentsToReassign,
List<WorkerLoad> completeWorkerAssignment) {
// There are no lost assignments and there have been no successive revoking rebalances
if (lostAssignments.isEmpty() && !revokedInPrevious) {
resetDelay();
return;
}
final long now = time.milliseconds();
log.debug("Found the following connectors and tasks missing from previous assignments: "
+ lostAssignments);
Set<String> activeMembers = completeWorkerAssignment.stream()
.map(WorkerLoad::worker)
.collect(Collectors.toSet());
if (scheduledRebalance <= 0 && activeMembers.containsAll(previousMembers)) {
log.debug("No worker seems to have departed the group during the rebalance. The "
+ "missing assignments that the leader is detecting are probably due to some "
+ "workers failing to receive the new assignments in the previous rebalance. "
+ "Will reassign missing tasks as new tasks");
lostAssignmentsToReassign.addAll(lostAssignments);
return;
} else if (maxDelay == 0) {
log.debug("Scheduled rebalance delays are disabled ({} = 0); "
+ "reassigning all lost connectors and tasks immediately",
SCHEDULED_REBALANCE_MAX_DELAY_MS_CONFIG
);
lostAssignmentsToReassign.addAll(lostAssignments);
return;
}
if (scheduledRebalance > 0 && now >= scheduledRebalance) {
// delayed rebalance expired and it's time to assign resources
log.debug("Delayed rebalance expired. Reassigning lost tasks");
List<WorkerLoad> candidateWorkerLoad = Collections.emptyList();
if (!candidateWorkersForReassignment.isEmpty()) {
candidateWorkerLoad = pickCandidateWorkerForReassignment(completeWorkerAssignment);
}
if (!candidateWorkerLoad.isEmpty()) {
log.debug("Assigning lost tasks to {} candidate workers: {}",
candidateWorkerLoad.size(),
candidateWorkerLoad.stream().map(WorkerLoad::worker).collect(Collectors.joining(",")));
Iterator<WorkerLoad> candidateWorkerIterator = candidateWorkerLoad.iterator();
for (String connector : lostAssignments.connectors()) {
// Loop over the candidate workers as many times as it takes
if (!candidateWorkerIterator.hasNext()) {
candidateWorkerIterator = candidateWorkerLoad.iterator();
}
WorkerLoad worker = candidateWorkerIterator.next();
log.debug("Assigning connector id {} to member {}", connector, worker.worker());
worker.assign(connector);
}
candidateWorkerIterator = candidateWorkerLoad.iterator();
for (ConnectorTaskId task : lostAssignments.tasks()) {
if (!candidateWorkerIterator.hasNext()) {
candidateWorkerIterator = candidateWorkerLoad.iterator();
}
WorkerLoad worker = candidateWorkerIterator.next();
log.debug("Assigning task id {} to member {}", task, worker.worker());
worker.assign(task);
}
} else {
log.debug("No single candidate worker was found to assign lost tasks. Treating lost tasks as new tasks");
lostAssignmentsToReassign.addAll(lostAssignments);
}
resetDelay();
// Resetting the flag as now we can permit successive revoking rebalances.
// since we have gone through the full rebalance delay
revokedInPrevious = false;
} else {
candidateWorkersForReassignment
.addAll(candidateWorkersForReassignment(completeWorkerAssignment));
if (now < scheduledRebalance) {
// a delayed rebalance is in progress, but it's not yet time to reassign
// unaccounted resources
delay = calculateDelay(now);
log.debug("Delayed rebalance in progress. Task reassignment is postponed. New computed rebalance delay: {}", delay);
} else {
// This means scheduledRebalance == 0
// We could also extract the current minimum delay from the group, to make
// independent of consecutive leader failures, but this optimization is skipped
// at the moment
delay = maxDelay;
log.debug("Resetting rebalance delay to the max: {}. scheduledRebalance: {} now: {} diff scheduledRebalance - now: {}",
delay, scheduledRebalance, now, scheduledRebalance - now);
}
scheduledRebalance = now + delay;
}
} | @Test
public void testLostAssignmentHandlingWhenWorkerBouncesBackButFinallyLeaves() {
// Customize assignor for this test case
time = new MockTime();
initAssignor();
assertTrue(assignor.candidateWorkersForReassignment.isEmpty());
assertEquals(0, assignor.scheduledRebalance);
assertEquals(0, assignor.delay);
Map<String, WorkerLoad> configuredAssignment = new HashMap<>();
configuredAssignment.put("worker0", workerLoad("worker0", 0, 2, 0, 4));
configuredAssignment.put("worker1", workerLoad("worker1", 2, 2, 4, 4));
configuredAssignment.put("worker2", workerLoad("worker2", 4, 2, 8, 4));
// No lost assignments
assignor.handleLostAssignments(new ConnectorsAndTasks.Builder().build(),
new ConnectorsAndTasks.Builder(),
new ArrayList<>(configuredAssignment.values()));
assertEquals(Collections.emptySet(),
assignor.candidateWorkersForReassignment,
"Wrong set of workers for reassignments");
assertEquals(0, assignor.scheduledRebalance);
assertEquals(0, assignor.delay);
assignor.previousMembers = new HashSet<>(configuredAssignment.keySet());
String veryFlakyWorker = "worker1";
WorkerLoad lostLoad = configuredAssignment.remove(veryFlakyWorker);
ConnectorsAndTasks lostAssignments = new ConnectorsAndTasks.Builder()
.with(lostLoad.connectors(), lostLoad.tasks()).build();
// Lost assignments detected - No candidate worker has appeared yet (worker with no assignments)
assignor.handleLostAssignments(lostAssignments, new ConnectorsAndTasks.Builder(),
new ArrayList<>(configuredAssignment.values()));
assertEquals(Collections.emptySet(),
assignor.candidateWorkersForReassignment,
"Wrong set of workers for reassignments");
assertEquals(time.milliseconds() + rebalanceDelay, assignor.scheduledRebalance);
assertEquals(rebalanceDelay, assignor.delay);
assignor.previousMembers = new HashSet<>(configuredAssignment.keySet());
time.sleep(rebalanceDelay / 2);
rebalanceDelay /= 2;
// A new worker (probably returning worker) has joined
configuredAssignment.put(veryFlakyWorker, new WorkerLoad.Builder(veryFlakyWorker).build());
assignor.handleLostAssignments(lostAssignments, new ConnectorsAndTasks.Builder(),
new ArrayList<>(configuredAssignment.values()));
assertEquals(Collections.singleton(veryFlakyWorker),
assignor.candidateWorkersForReassignment,
"Wrong set of workers for reassignments");
assertEquals(time.milliseconds() + rebalanceDelay, assignor.scheduledRebalance);
assertEquals(rebalanceDelay, assignor.delay);
assignor.previousMembers = new HashSet<>(configuredAssignment.keySet());
time.sleep(rebalanceDelay);
// The returning worker leaves permanently after joining briefly during the delay
configuredAssignment.remove(veryFlakyWorker);
ConnectorsAndTasks.Builder lostAssignmentsToReassign = new ConnectorsAndTasks.Builder();
assignor.handleLostAssignments(lostAssignments, lostAssignmentsToReassign,
new ArrayList<>(configuredAssignment.values()));
assertTrue(lostAssignmentsToReassign.build().connectors().containsAll(lostAssignments.connectors()),
"Wrong assignment of lost connectors");
assertTrue(lostAssignmentsToReassign.build().tasks().containsAll(lostAssignments.tasks()),
"Wrong assignment of lost tasks");
assertEquals(Collections.emptySet(),
assignor.candidateWorkersForReassignment,
"Wrong set of workers for reassignments");
assertEquals(0, assignor.scheduledRebalance);
assertEquals(0, assignor.delay);
} |
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
final AttributedList<ch.cyberduck.core.Path> paths = new AttributedList<>();
final java.nio.file.Path p = session.toPath(directory);
if(!Files.exists(p)) {
throw new LocalExceptionMappingService().map("Listing directory {0} failed",
new NoSuchFileException(directory.getAbsolute()), directory);
}
try (DirectoryStream<java.nio.file.Path> stream = Files.newDirectoryStream(p)) {
for(java.nio.file.Path n : stream) {
if(null == n.getFileName()) {
continue;
}
try {
final PathAttributes attributes = feature.toAttributes(n);
final EnumSet<Path.Type> type = EnumSet.noneOf(Path.Type.class);
if(Files.isDirectory(n)) {
type.add(Path.Type.directory);
}
else {
type.add(Path.Type.file);
}
final Path file = new Path(directory, n.getFileName().toString(), type, attributes);
if(this.post(n, file)) {
paths.add(file);
listener.chunk(directory, paths);
}
}
catch(IOException e) {
log.warn(String.format("Failure reading attributes for %s", n));
}
}
}
catch(IOException ex) {
throw new LocalExceptionMappingService().map("Listing directory {0} failed", ex, directory);
}
return paths;
} | @Test
public void testListJunction() throws Exception {
final LocalSession session = new LocalSession(new Host(new LocalProtocol(), new LocalProtocol().getDefaultHostname()));
assumeTrue(Factory.Platform.getDefault().equals(Factory.Platform.Name.windows));
assertNotNull(session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback()));
assertTrue(session.isConnected());
assertNotNull(session.getClient());
session.login(new DisabledLoginCallback(), new DisabledCancelCallback());
final Path home = new LocalHomeFinderFeature().find();
final AttributedList<Path> list = new LocalListService(session).list(home, new DisabledListProgressListener());
assertTrue(list.contains(new Path(home, "Recent", EnumSet.of(Path.Type.directory))));
final Path recent = list.get(new Path(home, "Recent", EnumSet.of(Path.Type.directory)));
assertFalse(recent.attributes().getPermission().isReadable());
assertTrue(recent.attributes().getPermission().isExecutable());
try {
new LocalListService(session).list(recent, new DisabledListProgressListener());
fail();
}
catch(AccessDeniedException | NotfoundException e) {
//
}
session.close();
} |
public static int ipToInt(String ip) {
try {
return bytesToInt(ipToBytesByInet(ip));
} catch (Exception e) {
throw new IllegalArgumentException(ip + " is invalid IP");
}
} | @Test
void testIllegalIpToInt() {
assertThrows(IllegalArgumentException.class, () -> {
InternetAddressUtil.ipToInt("127.0.0.256");
});
} |
@Override
public CiConfiguration loadConfiguration() {
// https://wiki.jenkins-ci.org/display/JENKINS/GitHub+pull+request+builder+plugin#GitHubpullrequestbuilderplugin-EnvironmentVariables
// https://wiki.jenkins-ci.org/display/JENKINS/Building+a+software+project
String revision = system.envVariable("ghprbActualCommit");
if (StringUtils.isNotBlank(revision)) {
return new CiConfigurationImpl(revision, getName());
}
revision = system.envVariable("GIT_COMMIT");
if (StringUtils.isNotBlank(revision)) {
if (StringUtils.isNotBlank(system.envVariable("CHANGE_ID"))) {
String jenkinsGitPrSha1 = getJenkinsGitPrSha1();
if (StringUtils.isNotBlank(jenkinsGitPrSha1)) {
return new CiConfigurationImpl(jenkinsGitPrSha1, getName());
}
}
return new CiConfigurationImpl(revision, getName());
}
revision = system.envVariable("SVN_COMMIT");
return new CiConfigurationImpl(revision, getName());
} | @Test
public void loadConfiguration_with_deprecated_pull_request_plugin() {
setEnvVariable("ghprbActualCommit", "abd12fc");
assertThat(underTest.loadConfiguration().getScmRevision()).hasValue("abd12fc");
} |
@Override
@NotNull
public List<PartitionStatistics> sort(@NotNull List<PartitionStatistics> partitionStatistics) {
return partitionStatistics.stream()
.filter(p -> p.getCompactionScore() != null)
.sorted(Comparator.comparingInt((PartitionStatistics stats) -> stats.getPriority().getValue()).reversed()
.thenComparing(Comparator.comparing(PartitionStatistics::getCompactionScore).reversed()))
.collect(Collectors.toList());
} | @Test
public void test() {
List<PartitionStatistics> statisticsList = new ArrayList<>();
PartitionStatistics statistics = new PartitionStatistics(new PartitionIdentifier(1, 2, 3));
statistics.setCompactionScore(Quantiles.compute(Arrays.asList(0.0, 0.0, 0.0)));
statisticsList.add(statistics);
statistics = new PartitionStatistics(new PartitionIdentifier(1, 2, 6));
statistics.setCompactionScore(Quantiles.compute(Arrays.asList(1.1, 1.1, 1.2)));
statisticsList.add(statistics);
statistics = new PartitionStatistics(new PartitionIdentifier(1, 2, 4));
statistics.setCompactionScore(Quantiles.compute(Arrays.asList(0.99, 0.99, 0.99)));
statisticsList.add(statistics);
statistics = new PartitionStatistics(new PartitionIdentifier(1, 2, 5));
statistics.setCompactionScore(Quantiles.compute(Arrays.asList(1.0, 1.0)));
statisticsList.add(statistics);
ScoreSorter sorter = new ScoreSorter();
List<PartitionStatistics> sortedList = sorter.sort(statisticsList);
Assert.assertEquals(4, sortedList.size());
Assert.assertEquals(6, sortedList.get(0).getPartition().getPartitionId());
Assert.assertEquals(5, sortedList.get(1).getPartition().getPartitionId());
Assert.assertEquals(4, sortedList.get(2).getPartition().getPartitionId());
Assert.assertEquals(3, sortedList.get(3).getPartition().getPartitionId());
} |
@Override
public Num calculate(BarSeries series, Position position) {
return position.getProfit();
} | @Test
public void calculateOnlyWithProfitShortPositions() {
MockBarSeries series = new MockBarSeries(numFunction, 100, 105, 110, 100, 95, 105);
TradingRecord tradingRecord = new BaseTradingRecord(Trade.sellAt(0, series, series.numOf(50)),
Trade.buyAt(2, series, series.numOf(50)), Trade.sellAt(3, series, series.numOf(50)),
Trade.buyAt(5, series, series.numOf(50)));
AnalysisCriterion profit = getCriterion();
assertNumEquals(-(500 + 250), profit.calculate(series, tradingRecord));
} |
@Override
public GetLabelsToNodesResponse getLabelsToNodes(
GetLabelsToNodesRequest request) throws YarnException, IOException {
RMNodeLabelsManager labelsMgr = rmContext.getNodeLabelManager();
if (request.getNodeLabels() == null || request.getNodeLabels().isEmpty()) {
return GetLabelsToNodesResponse.newInstance(labelsMgr.getLabelsToNodes());
} else {
return GetLabelsToNodesResponse.newInstance(
labelsMgr.getLabelsToNodes(request.getNodeLabels()));
}
} | @Test
public void testGetLabelsToNodes() throws Exception {
MockRM rm = new MockRM() {
protected ClientRMService createClientRMService() {
return new ClientRMService(this.rmContext, scheduler,
this.rmAppManager, this.applicationACLsManager,
this.queueACLsManager, this.getRMContext()
.getRMDelegationTokenSecretManager());
};
};
resourceManager = rm;
rm.start();
NodeLabel labelX = NodeLabel.newInstance("x", false);
NodeLabel labelY = NodeLabel.newInstance("y", false);
NodeLabel labelZ = NodeLabel.newInstance("z", false);
RMNodeLabelsManager labelsMgr = rm.getRMContext().getNodeLabelManager();
labelsMgr.addToCluserNodeLabels(ImmutableSet.of(labelX, labelY, labelZ));
NodeId node1A = NodeId.newInstance("host1", 1234);
NodeId node1B = NodeId.newInstance("host1", 5678);
NodeId node2A = NodeId.newInstance("host2", 1234);
NodeId node3A = NodeId.newInstance("host3", 1234);
NodeId node3B = NodeId.newInstance("host3", 5678);
Map<NodeId, Set<String>> map = new HashMap<NodeId, Set<String>>();
map.put(node1A, ImmutableSet.of("x"));
map.put(node1B, ImmutableSet.of("z"));
map.put(node2A, ImmutableSet.of("y"));
map.put(node3A, ImmutableSet.of("y"));
map.put(node3B, ImmutableSet.of("z"));
labelsMgr.replaceLabelsOnNode(map);
// Create a client.
conf = new Configuration();
rpc = YarnRPC.create(conf);
InetSocketAddress rmAddress = rm.getClientRMService().getBindAddress();
LOG.info("Connecting to ResourceManager at " + rmAddress);
client = (ApplicationClientProtocol) rpc.getProxy(
ApplicationClientProtocol.class, rmAddress, conf);
// Get node labels collection
GetClusterNodeLabelsResponse response = client
.getClusterNodeLabels(GetClusterNodeLabelsRequest.newInstance());
Assert.assertTrue(response.getNodeLabelList().containsAll(
Arrays.asList(labelX, labelY, labelZ)));
// Get labels to nodes mapping
GetLabelsToNodesResponse response1 = client
.getLabelsToNodes(GetLabelsToNodesRequest.newInstance());
Map<String, Set<NodeId>> labelsToNodes = response1.getLabelsToNodes();
Assert.assertTrue(labelsToNodes.keySet().containsAll(
Arrays.asList(labelX.getName(), labelY.getName(), labelZ.getName())));
Assert.assertTrue(labelsToNodes.get(labelX.getName()).containsAll(
Arrays.asList(node1A)));
Assert.assertTrue(labelsToNodes.get(labelY.getName()).containsAll(
Arrays.asList(node2A, node3A)));
Assert.assertTrue(labelsToNodes.get(labelZ.getName()).containsAll(
Arrays.asList(node1B, node3B)));
// Get labels to nodes mapping for specific labels
Set<String> setlabels = new HashSet<String>(Arrays.asList(new String[]{"x",
"z"}));
GetLabelsToNodesResponse response2 = client
.getLabelsToNodes(GetLabelsToNodesRequest.newInstance(setlabels));
labelsToNodes = response2.getLabelsToNodes();
Assert.assertTrue(labelsToNodes.keySet().containsAll(
Arrays.asList(labelX.getName(), labelZ.getName())));
Assert.assertTrue(labelsToNodes.get(labelX.getName()).containsAll(
Arrays.asList(node1A)));
Assert.assertTrue(labelsToNodes.get(labelZ.getName()).containsAll(
Arrays.asList(node1B, node3B)));
assertThat(labelsToNodes.get(labelY.getName())).isNull();
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.