focal_method stringlengths 13 60.9k | test_case stringlengths 25 109k |
|---|---|
@Override
public long get(K key) {
return complete(asyncCounterMap.get(key));
} | @Test(expected = ConsistentMapException.class)
public void testExecutionError() {
AtomicCounterMapWithErrors<String> atomicCounterMap =
new AtomicCounterMapWithErrors<>();
atomicCounterMap.setErrorState(TestingCompletableFutures.ErrorState.EXECUTION_EXCEPTION);
DefaultAtomicCounterMap<String> map =
new DefaultAtomicCounterMap<>(atomicCounterMap, 1000);
map.get(KEY1);
} |
public static int getCidrPrefixLength(String cidr) {
SubnetUtils subnetUtils = new SubnetUtils(cidr);
subnetUtils.setInclusiveHostCount(true);
// 2^(32 - prefixLength) = addressCount,
// so prefixLength = 32 - log2(addressCount) = 32 - (63 - leadingZeros(addressCount)) = leadingZeros(addressCount) - 31
return Long.numberOfLeadingZeros(subnetUtils.getInfo().getAddressCountLong()) - 31;
} | @Test
public void testGetCidrPrefixLength() {
for (int i = 0; i <= 32; i++) {
String addr = "192.168.0.1/" + i;
assertThat(NetUtils.getCidrPrefixLength(addr)).isEqualTo(i);
}
} |
@Override
public Serde<GenericKey> create(
final FormatInfo format,
final PersistenceSchema schema,
final KsqlConfig ksqlConfig,
final Supplier<SchemaRegistryClient> schemaRegistryClientFactory,
final String loggerNamePrefix,
final ProcessingLogContext processingLogContext,
final Optional<TrackedCallback> tracker
) {
return createInner(
format,
schema,
ksqlConfig,
schemaRegistryClientFactory,
loggerNamePrefix,
processingLogContext,
tracker
);
} | @Test
public void shouldConfigureLoggingSerdeNonWindowed() {
// When:
factory.create(format, schema, config, srClientFactory, LOGGER_PREFIX, processingLogCxt,
Optional.empty());
// Then:
verify(loggingSerde).configure(ImmutableMap.of(), true);
} |
@Config("discovery-server.enabled")
public EmbeddedDiscoveryConfig setEnabled(boolean enabled)
{
this.enabled = enabled;
return this;
} | @Test
public void testExplicitPropertyMappings()
{
Map<String, String> properties = new ImmutableMap.Builder<String, String>()
.put("discovery-server.enabled", "true")
.build();
EmbeddedDiscoveryConfig expected = new EmbeddedDiscoveryConfig()
.setEnabled(true);
assertFullMapping(properties, expected);
} |
public static OpenAction getOpenAction(int flag) {
// open flags must contain one of O_RDONLY(0), O_WRONLY(1), O_RDWR(2)
// O_ACCMODE is mask of read write(3)
// Alluxio fuse only supports read-only for completed file
// and write-only for file that does not exist or contains open flag O_TRUNC
// O_RDWR will be treated as read-only if file exists and no O_TRUNC,
// write-only otherwise
switch (OpenFlags.valueOf(flag & O_ACCMODE.intValue())) {
case O_RDONLY:
return OpenAction.READ_ONLY;
case O_WRONLY:
return OpenAction.WRITE_ONLY;
case O_RDWR:
return OpenAction.READ_WRITE;
default:
// Should not fall here
return OpenAction.NOT_SUPPORTED;
}
} | @Test
public void readOnly() {
int[] readFlags = new int[]{0x8000, 0x9000};
for (int readFlag : readFlags) {
Assert.assertEquals(AlluxioFuseOpenUtils.OpenAction.READ_ONLY,
AlluxioFuseOpenUtils.getOpenAction(readFlag));
}
} |
@SuppressWarnings("deprecation")
public static <K> KStreamHolder<K> build(
final KStreamHolder<K> left,
final KStreamHolder<K> right,
final StreamStreamJoin<K> join,
final RuntimeBuildContext buildContext,
final StreamJoinedFactory streamJoinedFactory) {
final QueryContext queryContext = join.getProperties().getQueryContext();
final QueryContext.Stacker stacker = QueryContext.Stacker.of(queryContext);
final LogicalSchema leftSchema;
final LogicalSchema rightSchema;
final Formats rightFormats;
final Formats leftFormats;
if (join.getJoinType().equals(RIGHT)) {
leftFormats = join.getRightInternalFormats();
rightFormats = join.getLeftInternalFormats();
leftSchema = right.getSchema();
rightSchema = left.getSchema();
} else {
leftFormats = join.getLeftInternalFormats();
rightFormats = join.getRightInternalFormats();
leftSchema = left.getSchema();
rightSchema = right.getSchema();
}
final PhysicalSchema leftPhysicalSchema = PhysicalSchema.from(
leftSchema,
leftFormats.getKeyFeatures(),
leftFormats.getValueFeatures()
);
final Serde<GenericRow> leftSerde = buildContext.buildValueSerde(
leftFormats.getValueFormat(),
leftPhysicalSchema,
stacker.push(LEFT_SERDE_CTX).getQueryContext()
);
final PhysicalSchema rightPhysicalSchema = PhysicalSchema.from(
rightSchema,
rightFormats.getKeyFeatures(),
rightFormats.getValueFeatures()
);
final Serde<GenericRow> rightSerde = buildContext.buildValueSerde(
rightFormats.getValueFormat(),
rightPhysicalSchema,
stacker.push(RIGHT_SERDE_CTX).getQueryContext()
);
final Serde<K> keySerde = left.getExecutionKeyFactory().buildKeySerde(
leftFormats.getKeyFormat(),
leftPhysicalSchema,
queryContext
);
final StreamJoined<K, GenericRow, GenericRow> joined = streamJoinedFactory.create(
keySerde,
leftSerde,
rightSerde,
StreamsUtil.buildOpName(queryContext),
StreamsUtil.buildOpName(queryContext)
);
final JoinParams joinParams = JoinParamsFactory
.create(join.getKeyColName(), leftSchema, rightSchema);
JoinWindows joinWindows;
// Grace, as optional, helps to identify if a user specified the GRACE PERIOD syntax in the
// join window. If specified, then we'll call the new KStreams API ofTimeDifferenceAndGrace()
// which enables the "spurious" results bugfix with left/outer joins (see KAFKA-10847).
if (join.getGraceMillis().isPresent()) {
joinWindows = JoinWindows.ofTimeDifferenceAndGrace(
join.getBeforeMillis(),
join.getGraceMillis().get());
} else {
joinWindows = JoinWindows.of(join.getBeforeMillis());
}
joinWindows = joinWindows.after(join.getAfterMillis());
final KStream<K, GenericRow> result;
switch (join.getJoinType()) {
case LEFT:
result = left.getStream().leftJoin(
right.getStream(), joinParams.getJoiner(), joinWindows, joined);
break;
case RIGHT:
result = right.getStream().leftJoin(
left.getStream(), joinParams.getJoiner(), joinWindows, joined);
break;
case OUTER:
result = left.getStream().outerJoin(
right.getStream(), joinParams.getJoiner(), joinWindows, joined);
break;
case INNER:
result = left.getStream().join(
right.getStream(), joinParams.getJoiner(), joinWindows, joined);
break;
default:
throw new IllegalStateException("invalid join type");
}
return left.withStream(result, joinParams.getSchema());
} | @Test
public void shouldDoOuterJoin() {
// Given:
givenOuterJoin();
// When:
final KStreamHolder<Struct> result = join.build(planBuilder, planInfo);
// Then:
verify(leftKStream).outerJoin(
same(rightKStream),
eq(new KsqlValueJoiner(LEFT_SCHEMA.value().size(), RIGHT_SCHEMA.value().size(), 1)),
eq(WINDOWS_NO_GRACE),
same(joined)
);
verifyNoMoreInteractions(leftKStream, rightKStream, resultKStream);
assertThat(result.getStream(), is(resultKStream));
assertThat(result.getExecutionKeyFactory(), is(executionKeyFactory));
} |
public boolean isInjvmRefer(URL url) {
String scope = url.getParameter(SCOPE_KEY);
// Since injvm protocol is configured explicitly, we don't need to set any extra flag, use normal refer process.
if (SCOPE_LOCAL.equals(scope) || (url.getParameter(LOCAL_PROTOCOL, false))) {
// if it's declared as local reference
// 'scope=local' is equivalent to 'injvm=true', injvm will be deprecated in the future release
return true;
} else if (SCOPE_REMOTE.equals(scope)) {
// it's declared as remote reference
return false;
} else if (url.getParameter(GENERIC_KEY, false)) {
// generic invocation is not local reference
return false;
} else if (getExporter(exporterMap, url) != null) {
// Broadcast cluster means that multiple machines will be called,
// which is not converted to injvm protocol at this time.
if (BROADCAST_CLUSTER.equalsIgnoreCase(url.getParameter(CLUSTER_KEY))) {
return false;
}
// by default, go through local reference if there's the service exposed locally
return true;
} else {
return false;
}
} | @Test
void testIsInjvmRefer() {
DemoService service = new DemoServiceImpl();
URL url = URL.valueOf("injvm://127.0.0.1/TestService")
.addParameter(INTERFACE_KEY, DemoService.class.getName())
.setScopeModel(ApplicationModel.defaultModel().getDefaultModule());
Exporter<?> exporter = protocol.export(proxy.getInvoker(service, DemoService.class, url));
exporters.add(exporter);
url = url.setProtocol("dubbo");
assertTrue(InjvmProtocol.getInjvmProtocol(FrameworkModel.defaultModel()).isInjvmRefer(url));
url = url.addParameter(GROUP_KEY, "*").addParameter(VERSION_KEY, "*");
assertTrue(InjvmProtocol.getInjvmProtocol(FrameworkModel.defaultModel()).isInjvmRefer(url));
url = URL.valueOf("fake://127.0.0.1/TestService").addParameter(SCOPE_KEY, SCOPE_LOCAL);
assertTrue(InjvmProtocol.getInjvmProtocol(FrameworkModel.defaultModel()).isInjvmRefer(url));
url = URL.valueOf("fake://127.0.0.1/TestService").addParameter(LOCAL_PROTOCOL, true);
assertTrue(InjvmProtocol.getInjvmProtocol(FrameworkModel.defaultModel()).isInjvmRefer(url));
url = URL.valueOf("fake://127.0.0.1/TestService").addParameter(SCOPE_KEY, SCOPE_REMOTE);
assertFalse(
InjvmProtocol.getInjvmProtocol(FrameworkModel.defaultModel()).isInjvmRefer(url));
url = URL.valueOf("fake://127.0.0.1/TestService").addParameter(GENERIC_KEY, true);
assertFalse(
InjvmProtocol.getInjvmProtocol(FrameworkModel.defaultModel()).isInjvmRefer(url));
url = URL.valueOf("fake://127.0.0.1/TestService").addParameter("cluster", "broadcast");
assertFalse(
InjvmProtocol.getInjvmProtocol(FrameworkModel.defaultModel()).isInjvmRefer(url));
} |
public List<ChangeStreamRecord> toChangeStreamRecords(
PartitionMetadata partition,
ChangeStreamResultSet resultSet,
ChangeStreamResultSetMetadata resultSetMetadata) {
if (this.isPostgres()) {
// In PostgresQL, change stream records are returned as JsonB.
return Collections.singletonList(
toChangeStreamRecordJson(partition, resultSet.getPgJsonb(0), resultSetMetadata));
}
// In GoogleSQL, change stream records are returned as an array of structs.
return resultSet.getCurrentRowAsStruct().getStructList(0).stream()
.flatMap(struct -> toChangeStreamRecord(partition, struct, resultSetMetadata))
.collect(Collectors.toList());
} | @Test
public void testMappingUpdateJsonRowNewRowToDataChangeRecord() {
final DataChangeRecord dataChangeRecord =
new DataChangeRecord(
"partitionToken",
Timestamp.ofTimeSecondsAndNanos(10L, 20),
"serverTransactionId",
true,
"1",
"tableName",
Arrays.asList(
new ColumnType("column1", new TypeCode("{\"code\":\"INT64\"}"), true, 1L),
new ColumnType("column2", new TypeCode("{\"code\":\"BYTES\"}"), false, 2L)),
Collections.singletonList(
new Mod("{\"column1\":\"value1\"}", null, "{\"column2\":\"newValue2\"}")),
ModType.UPDATE,
ValueCaptureType.NEW_ROW,
10L,
2L,
"transactionTag",
true,
null);
final String jsonString = recordToJson(dataChangeRecord, false, false);
assertNotNull(jsonString);
ChangeStreamResultSet resultSet = mock(ChangeStreamResultSet.class);
when(resultSet.getPgJsonb(0)).thenReturn(jsonString);
assertEquals(
Collections.singletonList(dataChangeRecord),
mapperPostgres.toChangeStreamRecords(partition, resultSet, resultSetMetadata));
} |
public AggregateParams create(
final LogicalSchema schema,
final List<ColumnName> nonAggregateColumns,
final FunctionRegistry functionRegistry,
final List<FunctionCall> functionList,
final boolean windowedAggregation,
final KsqlConfig config
) {
return create(
schema,
nonAggregateColumns,
functionRegistry,
functionList,
false,
windowedAggregation,
config
);
} | @SuppressWarnings("unchecked")
@Test
public void shouldCreateAggregatorWithCorrectParams() {
verify(udafFactory).create(2, ImmutableList.of(agg0, agg1));
} |
@Override
public <T> List<SearchResult<T>> search(SearchRequest request, Class<T> typeFilter) {
SearchSession<T> session = new SearchSession<>(request, Collections.singleton(typeFilter));
if (request.inParallel()) {
ForkJoinPool commonPool = ForkJoinPool.commonPool();
getProviderTasks(request, session).stream().map(commonPool::submit).forEach(ForkJoinTask::join);
} else {
getProviderTasks(request, session).forEach(Runnable::run);
}
return session.getResults();
} | @Test
public void testAsyncCancel() {
MockServices.setServices(SleepProvider.class);
GraphGenerator generator = GraphGenerator.build();
SearchRequest request1 = buildRequest("sleep", generator);
SearchRequest request2 = buildRequest("bar", generator);
controller.search(request1, searchListener);
controller.search(request2, searchListener);
Awaitility.await().untilAsserted(() -> {
Mockito.verify(searchListener).started(request1);
Mockito.verify(searchListener).started(request2);
Mockito.verify(searchListener).cancelled();
Mockito.verify(searchListener).finished(Mockito.eq(request2), Mockito.any());
});
} |
@Override
public DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options) {
// Partition the requested config resources based on which broker they must be sent to with the
// null broker being used for config resources which can be obtained from any broker
final Map<Integer, Map<ConfigResource, KafkaFutureImpl<Config>>> nodeFutures = new HashMap<>(configResources.size());
for (ConfigResource resource : configResources) {
Integer broker = nodeFor(resource);
nodeFutures.compute(broker, (key, value) -> {
if (value == null) {
value = new HashMap<>();
}
value.put(resource, new KafkaFutureImpl<>());
return value;
});
}
final long now = time.milliseconds();
for (Map.Entry<Integer, Map<ConfigResource, KafkaFutureImpl<Config>>> entry : nodeFutures.entrySet()) {
final Integer node = entry.getKey();
Map<ConfigResource, KafkaFutureImpl<Config>> unified = entry.getValue();
runnable.call(new Call("describeConfigs", calcDeadlineMs(now, options.timeoutMs()),
node != null ? new ConstantNodeIdProvider(node, true) : new LeastLoadedBrokerOrActiveKController()) {
@Override
DescribeConfigsRequest.Builder createRequest(int timeoutMs) {
return new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData()
.setResources(unified.keySet().stream()
.map(config ->
new DescribeConfigsRequestData.DescribeConfigsResource()
.setResourceName(config.name())
.setResourceType(config.type().id())
.setConfigurationKeys(null))
.collect(Collectors.toList()))
.setIncludeSynonyms(options.includeSynonyms())
.setIncludeDocumentation(options.includeDocumentation()));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
DescribeConfigsResponse response = (DescribeConfigsResponse) abstractResponse;
for (Map.Entry<ConfigResource, DescribeConfigsResponseData.DescribeConfigsResult> entry : response.resultMap().entrySet()) {
ConfigResource configResource = entry.getKey();
DescribeConfigsResponseData.DescribeConfigsResult describeConfigsResult = entry.getValue();
KafkaFutureImpl<Config> future = unified.get(configResource);
if (future == null) {
if (node != null) {
log.warn("The config {} in the response from node {} is not in the request",
configResource, node);
} else {
log.warn("The config {} in the response from the least loaded broker is not in the request",
configResource);
}
} else {
if (describeConfigsResult.errorCode() != Errors.NONE.code()) {
future.completeExceptionally(Errors.forCode(describeConfigsResult.errorCode())
.exception(describeConfigsResult.errorMessage()));
} else {
future.complete(describeConfigResult(describeConfigsResult));
}
}
}
completeUnrealizedFutures(
unified.entrySet().stream(),
configResource -> "The node response did not contain a result for config resource " + configResource);
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(unified.values(), throwable);
}
}, now);
}
return new DescribeConfigsResult(
nodeFutures.entrySet()
.stream()
.flatMap(x -> x.getValue().entrySet().stream())
.collect(Collectors.toMap(
Map.Entry::getKey,
Map.Entry::getValue,
(oldValue, newValue) -> {
// Duplicate keys should not occur, throw an exception to signal this issue
throw new IllegalStateException(String.format("Duplicate key for values: %s and %s", oldValue, newValue));
},
HashMap::new
))
);
} | @Test
public void testDescribeBrokerConfigs() throws Exception {
ConfigResource broker0Resource = new ConfigResource(ConfigResource.Type.BROKER, "0");
ConfigResource broker1Resource = new ConfigResource(ConfigResource.Type.BROKER, "1");
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponseFrom(new DescribeConfigsResponse(
new DescribeConfigsResponseData().setResults(singletonList(new DescribeConfigsResponseData.DescribeConfigsResult()
.setResourceName(broker0Resource.name()).setResourceType(broker0Resource.type().id()).setErrorCode(Errors.NONE.code())
.setConfigs(emptyList())))), env.cluster().nodeById(0));
env.kafkaClient().prepareResponseFrom(new DescribeConfigsResponse(
new DescribeConfigsResponseData().setResults(singletonList(new DescribeConfigsResponseData.DescribeConfigsResult()
.setResourceName(broker1Resource.name()).setResourceType(broker1Resource.type().id()).setErrorCode(Errors.NONE.code())
.setConfigs(emptyList())))), env.cluster().nodeById(1));
Map<ConfigResource, KafkaFuture<Config>> result = env.adminClient().describeConfigs(asList(
broker0Resource,
broker1Resource)).values();
assertEquals(new HashSet<>(asList(broker0Resource, broker1Resource)), result.keySet());
result.get(broker0Resource).get();
result.get(broker1Resource).get();
}
} |
static BlockStmt getDiscretizeVariableDeclaration(final String variableName, final Discretize discretize) {
final MethodDeclaration methodDeclaration =
DISCRETIZE_TEMPLATE.getMethodsByName(GETKIEPMMLDISCRETIZE).get(0).clone();
final BlockStmt discretizeBody =
methodDeclaration.getBody().orElseThrow(() -> new KiePMMLException(String.format(MISSING_BODY_TEMPLATE, methodDeclaration)));
final VariableDeclarator variableDeclarator =
getVariableDeclarator(discretizeBody, DISCRETIZE).orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_IN_BODY, DISCRETIZE, discretizeBody)));
variableDeclarator.setName(variableName);
final BlockStmt toReturn = new BlockStmt();
int counter = 0;
final NodeList<Expression> arguments = new NodeList<>();
for (DiscretizeBin discretizeBin : discretize.getDiscretizeBins()) {
String nestedVariableName = String.format(VARIABLE_NAME_TEMPLATE, variableName, counter);
arguments.add(new NameExpr(nestedVariableName));
BlockStmt toAdd = getDiscretizeBinVariableDeclaration(nestedVariableName, discretizeBin);
toAdd.getStatements().forEach(toReturn::addStatement);
counter++;
}
final ObjectCreationExpr objectCreationExpr = variableDeclarator.getInitializer()
.orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_INITIALIZER_TEMPLATE,
DISCRETIZE, toReturn)))
.asObjectCreationExpr();
final Expression nameExpr = new StringLiteralExpr(discretize.getField());
final Expression mapMissingToExpr = getExpressionForObject(discretize.getMapMissingTo());
final Expression defaultValueExpr = getExpressionForObject(discretize.getDefaultValue());
final Expression dataTypeExpression = getExpressionForDataType(discretize.getDataType());
objectCreationExpr.getArguments().set(0, nameExpr);
objectCreationExpr.getArguments().get(2).asMethodCallExpr().setArguments(arguments);
objectCreationExpr.getArguments().set(3, mapMissingToExpr);
objectCreationExpr.getArguments().set(4, defaultValueExpr);
objectCreationExpr.getArguments().set(5, dataTypeExpression);
discretizeBody.getStatements().forEach(toReturn::addStatement);
return toReturn;
} | @Test
void getDiscretizeVariableDeclaration() throws IOException {
String variableName = "variableName";
Discretize discretize = new Discretize();
discretize.setField(NAME);
discretize.setDataType(dataType);
discretize.setMapMissingTo(MAP_MISSING_TO);
discretize.setDefaultValue(DEFAULTVALUE);
discretize.addDiscretizeBins(discretizeBins.toArray(new DiscretizeBin[0]));
BlockStmt retrieved = KiePMMLDiscretizeFactory.getDiscretizeVariableDeclaration(variableName,
discretize);
String dataTypeString = getDATA_TYPEString(discretize.getDataType());
String text = getFileContent(TEST_01_SOURCE);
Statement expected = JavaParserUtils.parseBlock(String.format(text, variableName, NAME, MAP_MISSING_TO,
DEFAULTVALUE, dataTypeString));
assertThat(JavaParserUtils.equalsNode(expected, retrieved)).isTrue();
List<Class<?>> imports = Arrays.asList(Arrays.class, Collections.class, KiePMMLDiscretize.class,
KiePMMLDiscretizeBin.class, KiePMMLInterval.class);
commonValidateCompilationWithImports(retrieved, imports);
} |
@Override
public URL use(ApplicationId applicationId, String resourceKey)
throws YarnException {
Path resourcePath = null;
UseSharedCacheResourceRequest request = Records.newRecord(
UseSharedCacheResourceRequest.class);
request.setAppId(applicationId);
request.setResourceKey(resourceKey);
try {
UseSharedCacheResourceResponse response = this.scmClient.use(request);
if (response != null && response.getPath() != null) {
resourcePath = new Path(response.getPath());
}
} catch (Exception e) {
// Just catching IOException isn't enough.
// RPC call can throw ConnectionException.
// We don't handle different exceptions separately at this point.
throw new YarnException(e);
}
if (resourcePath != null) {
URL pathURL = URL.fromPath(resourcePath);
return pathURL;
} else {
// The resource was not in the cache.
return null;
}
} | @Test
public void testUseCacheMiss() throws Exception {
UseSharedCacheResourceResponse response =
new UseSharedCacheResourceResponsePBImpl();
response.setPath(null);
when(cProtocol.use(isA(UseSharedCacheResourceRequest.class))).thenReturn(
response);
URL newURL = client.use(mock(ApplicationId.class), "key");
assertNull("The path is not null!", newURL);
} |
@Override
public Set<QueryId> getInsertQueries(
final SourceName sourceName,
final BiPredicate<SourceName, PersistentQueryMetadata> filterQueries) {
return insertQueries.getOrDefault(sourceName, Collections.emptySet()).stream()
.map(persistentQueries::get)
.filter(query -> filterQueries.test(sourceName, query))
.map(QueryMetadata::getQueryId)
.collect(Collectors.toSet());
} | @Test
public void shouldGetQueriesInsertingIntoOrReadingFromSource() {
givenCreate(registry, "q1", "source", Optional.of("sink1"), CREATE_AS);
givenCreate(registry, "q2", "source", Optional.of("sink1"), INSERT);
givenCreate(registry, "q3", "source", Optional.of("sink1"), INSERT);
givenCreate(registry, "q4", "sink1", Optional.of("sink2"), INSERT);
// When:
final Set<QueryId> queries = registry.getInsertQueries(SourceName.of("sink1"), (n, q) -> true);
// Then:
assertThat(queries, contains(new QueryId("q2"), new QueryId("q3"), new QueryId("q4")));
} |
@Override
public Committer closeForCommit() throws IOException {
lock();
try {
closeAndUploadPart();
return upload.snapshotAndGetCommitter();
} finally {
unlock();
}
} | @Test(expected = IOException.class)
public void closeForCommitOnClosedStreamShouldFail() throws IOException {
streamUnderTest.closeForCommit().commit();
streamUnderTest.closeForCommit().commit();
} |
OutputT apply(InputT input) throws UserCodeExecutionException {
Optional<UserCodeExecutionException> latestError = Optional.empty();
long waitFor = 0L;
while (waitFor != BackOff.STOP) {
try {
sleepIfNeeded(waitFor);
incIfPresent(getCallCounter());
return getThrowableFunction().apply(input);
} catch (UserCodeExecutionException e) {
if (!e.shouldRepeat()) {
throw e;
}
latestError = Optional.of(e);
} catch (InterruptedException ignored) {
}
try {
incIfPresent(getBackoffCounter());
waitFor = getBackOff().nextBackOffMillis();
} catch (IOException e) {
throw new UserCodeExecutionException(e);
}
}
throw latestError.orElse(
new UserCodeExecutionException("failed to process for input: " + input));
} | @Test
public void givenCallerNonRepeatableError_emitsIntoFailurePCollection() {
PCollectionTuple pct =
pipeline
.apply(Create.of(1))
.apply(
ParDo.of(
new DoFnWithRepeaters(
new CallerImpl(1, UserCodeExecutionException.class),
new SetupTeardownImpl(0)))
.withOutputTags(OUTPUT_TAG, TupleTagList.of(FAILURE_TAG)));
PAssert.that(pct.get(OUTPUT_TAG)).empty();
PAssert.that(pct.get(FAILURE_TAG))
.containsInAnyOrder(UserCodeExecutionException.class.getName());
pipeline.run();
} |
public static <T, S> T convert(S source, Class<T> clazz) {
return Optional.ofNullable(source)
.map(each -> BEAN_MAPPER_BUILDER.map(each, clazz))
.orElse(null);
} | @Test
public void ListToListConvertTest() {
final List<Person> list = new ArrayList<>();
list.add(Person.builder().name("one").age(1).build());
list.add(Person.builder().name("two").age(2).build());
list.add(Person.builder().name("three").age(3).build());
final List<PersonVo> persons = BeanUtil.convert(list, PersonVo.class);
Assert.assertEquals(list.size(), persons.size());
} |
@VisibleForTesting
File getLocalUserFileCacheDir(String userName) {
return LocalizedResource.getLocalUserFileCacheDir(localBaseDir, userName).toFile();
} | @Test
public void testFailAcls() {
assertThrows(AuthorizationException.class, () -> {
try (TmpPath tmp = new TmpPath()) {
Map<String, Object> conf = new HashMap<>();
// set clean time really high so doesn't kick in
conf.put(DaemonConfig.SUPERVISOR_LOCALIZER_CACHE_CLEANUP_INTERVAL_MS, 60 * 60 * 1000);
// enable blobstore acl validation
conf.put(Config.STORM_BLOBSTORE_ACL_VALIDATION_ENABLED, true);
String topo1 = "topo1";
String key1 = "key1";
TestLocalizer localizer = new TestLocalizer(conf, tmp.getPath());
ReadableBlobMeta rbm = new ReadableBlobMeta();
// set acl so user doesn't have read access
AccessControl acl = new AccessControl(AccessControlType.USER, BlobStoreAclHandler.ADMIN);
acl.set_name(user1);
rbm.set_settable(new SettableBlobMeta(Collections.singletonList(acl)));
when(mockBlobStore.getBlobMeta(anyString())).thenReturn(rbm);
when(mockBlobStore.getBlob(key1)).thenReturn(new TestInputStreamWithMeta(1));
File user1Dir = localizer.getLocalUserFileCacheDir(user1);
assertTrue(user1Dir.mkdirs(), "failed to create user dir");
LocalAssignment topo1Assignment = constructLocalAssignment(topo1, user1, Collections.emptyList());
PortAndAssignment topo1Pna = new PortAndAssignmentImpl(1, topo1Assignment);
// This should throw AuthorizationException because auth failed
localizer.getBlob(new LocalResource(key1, false, false), topo1Pna, null);
}
});
} |
public CreateTableCommand createTableCommand(
final KsqlStructuredDataOutputNode outputNode,
final Optional<RefinementInfo> emitStrategy
) {
Optional<WindowInfo> windowInfo =
outputNode.getKsqlTopic().getKeyFormat().getWindowInfo();
if (windowInfo.isPresent() && emitStrategy.isPresent()) {
final WindowInfo info = windowInfo.get();
windowInfo = Optional.of(WindowInfo.of(
info.getType(),
info.getSize(),
Optional.of(emitStrategy.get().getOutputRefinement())
));
}
return new CreateTableCommand(
outputNode.getSinkName().get(),
outputNode.getSchema(),
outputNode.getTimestampColumn(),
outputNode.getKsqlTopic().getKafkaTopicName(),
Formats.from(outputNode.getKsqlTopic()),
windowInfo,
Optional.of(outputNode.getOrReplace()),
Optional.of(false)
);
} | @Test
public void shouldThrowOnNoElementsInCreateTable() {
// Given:
final CreateTable statement
= new CreateTable(SOME_NAME, TableElements.of(),
false, true, withProperties, false);
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> createSourceFactory.createTableCommand(statement, ksqlConfig)
);
// Then:
assertThat(e.getMessage(), containsString(
"The statement does not define any columns."));
} |
public static void verifyChunkedSums(int bytesPerSum, int checksumType,
ByteBuffer sums, ByteBuffer data, String fileName, long basePos)
throws ChecksumException {
nativeComputeChunkedSums(bytesPerSum, checksumType,
sums, sums.position(),
data, data.position(), data.remaining(),
fileName, basePos, true);
} | @Test
public void testVerifyChunkedSumsFail() {
allocateDirectByteBuffers();
fillDataAndInvalidChecksums();
assertThrows(ChecksumException.class,
() -> NativeCrc32.verifyChunkedSums(bytesPerChecksum, checksumType.id,
checksums, data, fileName, BASE_POSITION));
} |
public CompletableFuture<Void> deleteStoredData(final UUID accountUuid) {
final ExternalServiceCredentials credentials = storageServiceCredentialsGenerator.generateForUuid(accountUuid);
final HttpRequest request = HttpRequest.newBuilder()
.uri(deleteUri)
.DELETE()
.header(HttpHeaders.AUTHORIZATION, basicAuthHeader(credentials))
.build();
return httpClient.sendAsync(request, HttpResponse.BodyHandlers.ofString()).thenApply(response -> {
if (HttpUtils.isSuccessfulResponse(response.statusCode())) {
return null;
}
throw new SecureStorageException("Failed to delete storage service data: " + response.statusCode());
});
} | @Test
void deleteStoredDataFailure() {
final String username = RandomStringUtils.randomAlphabetic(16);
final String password = RandomStringUtils.randomAlphanumeric(32);
when(credentialsGenerator.generateForUuid(accountUuid)).thenReturn(
new ExternalServiceCredentials(username, password));
wireMock.stubFor(delete(urlEqualTo(SecureStorageClient.DELETE_PATH))
.withBasicAuth(username, password)
.willReturn(aResponse().withStatus(400)));
final CompletionException completionException = assertThrows(CompletionException.class,
() -> secureStorageClient.deleteStoredData(accountUuid).join());
assertTrue(completionException.getCause() instanceof SecureStorageException);
} |
public String toHumanReadableString() {
return String.format(
"%.2f cores", getValue().setScale(2, ROUND_HALF_UP).stripTrailingZeros());
} | @Test
void toHumanReadableString() {
assertThat(new CPUResource(0).toHumanReadableString()).isEqualTo("0.00 cores");
assertThat(new CPUResource(1).toHumanReadableString()).isEqualTo("1.00 cores");
assertThat(new CPUResource(1.2).toHumanReadableString()).isEqualTo("1.20 cores");
assertThat(new CPUResource(1.23).toHumanReadableString()).isEqualTo("1.23 cores");
assertThat(new CPUResource(1.234).toHumanReadableString()).isEqualTo("1.23 cores");
assertThat(new CPUResource(1.235).toHumanReadableString()).isEqualTo("1.24 cores");
assertThat(new CPUResource(10).toHumanReadableString()).isEqualTo("10.00 cores");
assertThat(new CPUResource(100).toHumanReadableString()).isEqualTo("100.00 cores");
assertThat(new CPUResource(1000).toHumanReadableString()).isEqualTo("1000.00 cores");
assertThat(new CPUResource(123456789).toHumanReadableString())
.isEqualTo("123456789.00 cores");
assertThat(new CPUResource(12345.6789).toHumanReadableString()).isEqualTo("12345.68 cores");
} |
@Override
public void release(ByteBuffer previouslyAllocated) {
lock.lock();
try {
previouslyAllocated.clear();
if (previouslyAllocated.capacity() != batchSize) {
throw new IllegalArgumentException("Released buffer with unexpected size "
+ previouslyAllocated.capacity());
}
// Free the buffer if the number of pooled buffers is already the maximum number of batches.
// Otherwise return the buffer to the memory pool.
if (free.size() >= maxRetainedBatches) {
numAllocatedBatches--;
} else {
free.offer(previouslyAllocated);
}
} finally {
lock.unlock();
}
} | @Test
public void testReleaseBufferNotMatchingBatchSize() {
int batchSize = 1024;
int maxRetainedBatches = 3;
BatchMemoryPool pool = new BatchMemoryPool(maxRetainedBatches, batchSize);
ByteBuffer buffer = ByteBuffer.allocate(1023);
assertThrows(IllegalArgumentException.class, () -> pool.release(buffer));
} |
public SeekableByteChannel open(GcsPath path) throws IOException {
String bucket = path.getBucket();
SeekableByteChannel channel =
googleCloudStorage.open(new StorageResourceId(path.getBucket(), path.getObject()));
return wrapInCounting(channel, bucket);
} | @Test
public void testGCSChannelCloseIdempotent() throws IOException {
GcsOptions pipelineOptions = gcsOptionsWithTestCredential();
GcsUtil gcsUtil = pipelineOptions.getGcsUtil();
GoogleCloudStorageReadOptions readOptions =
GoogleCloudStorageReadOptions.builder().setFastFailOnNotFound(false).build();
SeekableByteChannel channel =
gcsUtil.open(GcsPath.fromComponents("testbucket", "testobject"), readOptions);
channel.close();
channel.close();
} |
public void changeLevel(LoggerLevel level) {
Level logbackLevel = Level.toLevel(level.name());
database.enableSqlLogging(level == TRACE);
helper.changeRoot(serverProcessLogging.getLogLevelConfig(), logbackLevel);
LoggerFactory.getLogger(ServerLogging.class).info("Level of logs changed to {}", level);
} | @Test
public void changeLevel_fails_with_IAE_when_level_is_WARN() {
assertThatThrownBy(() -> underTest.changeLevel(WARN))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("WARN log level is not supported (allowed levels are [TRACE, DEBUG, INFO])");
} |
@Override
public boolean match(Message msg, StreamRule rule) {
if (msg.getField(rule.getField()) == null)
return rule.getInverted();
try {
final Pattern pattern = patternCache.get(rule.getValue());
final CharSequence charSequence = new InterruptibleCharSequence(msg.getField(rule.getField()).toString());
return rule.getInverted() ^ pattern.matcher(charSequence).find();
} catch (ExecutionException e) {
LOG.error("Unable to get pattern from regex cache: ", e);
}
return false;
} | @Test
public void testSuccessfulComplexRegexMatch() {
StreamRule rule = getSampleRule();
rule.setField("some_field");
rule.setValue("foo=^foo|bar\\d.+wat");
Message msg = getSampleMessage();
msg.addField("some_field", "bar1foowat");
StreamRuleMatcher matcher = getMatcher(rule);
assertTrue(matcher.match(msg, rule));
} |
public static <T> MutationDetector forValueWithCoder(T value, Coder<T> coder)
throws CoderException {
if (value == null) {
return noopMutationDetector();
} else {
return new CodedValueMutationDetector<>(value, coder);
}
} | @Test
public void testStructuralValue() throws Exception {
Set<Integer> value = Sets.newHashSet(Arrays.asList(1, 2, 3, 4));
MutationDetector detector =
MutationDetectors.forValueWithCoder(value, IterableCoder.of(VarIntCoder.of()));
detector.verifyUnmodified();
} |
public void createTopic(String key, String newTopic, int queueNum) throws MQClientException {
createTopic(key, newTopic, queueNum, 0, null);
} | @Test
public void testCreateTopic() throws MQClientException {
mqAdminImpl.createTopic("", defaultTopic, 6);
} |
public static Select select(String fieldName) { return new Select(fieldName);
} | @Test
void long_numeric_operations() {
String q = Q.select("*")
.from("sd1")
.where("f1").le(1L)
.and("f2").lt(2L)
.and("f3").ge(3L)
.and("f4").gt(4L)
.and("f5").eq(5L)
.and("f6").inRange(6L, 7L)
.build();
assertEquals(q, "yql=select * from sd1 where f1 <= 1L and f2 < 2L and f3 >= 3L and f4 > 4L and f5 = 5L and range(f6, 6L, 7L)");
} |
@Override
public Set<Name> getLocations() {
final Set<Name> locations = new LinkedHashSet<>();
if(StringUtils.isNotBlank(session.getHost().getRegion())) {
locations.add(new SwiftRegion(session.getHost().getRegion()));
}
else {
final List<Region> regions = new ArrayList<>(session.getClient().getRegions());
regions.sort(new Comparator<Region>() {
@Override
public int compare(final Region r1, final Region r2) {
if(r1.isDefault()) {
return -1;
}
if(r2.isDefault()) {
return 1;
}
return 0;
}
});
for(Region region : regions) {
if(StringUtils.isBlank(region.getRegionId())) {
// v1 authentication contexts do not have region support
continue;
}
locations.add(new SwiftRegion(region.getRegionId()));
}
}
return locations;
} | @Test
public void testGetLocations() throws Exception {
final Set<Location.Name> locations = new SwiftLocationFeature(session).getLocations();
assertTrue(locations.contains(new SwiftLocationFeature.SwiftRegion("DFW")));
assertTrue(locations.contains(new SwiftLocationFeature.SwiftRegion("ORD")));
assertTrue(locations.contains(new SwiftLocationFeature.SwiftRegion("SYD")));
assertEquals(new SwiftLocationFeature.SwiftRegion("DFW"), locations.iterator().next());
} |
@Override
public boolean subscribe(String mode) {
DriverHandler handler = handler();
NetconfController controller = handler.get(NetconfController.class);
MastershipService mastershipService = handler.get(MastershipService.class);
DeviceId ncDeviceId = handler.data().deviceId();
checkNotNull(controller, "Netconf controller is null");
if (!mastershipService.isLocalMaster(ncDeviceId)) {
log.warn("Not master for {} Use {} to execute command",
ncDeviceId,
mastershipService.getMasterFor(ncDeviceId));
return false;
}
if (mode != null) {
if (!DISABLE.equals(mode)) {
log.error("Invalid mode: {}", mode);
return false;
}
}
try {
if (mode != null) {
controller.getDevicesMap().get(ncDeviceId).getSession().
endSubscription();
} else {
StringBuilder request = new StringBuilder();
request.append(ANGLE_LEFT + NOTIFY_ALERT + SPACE);
request.append(VOLT_NE_NAMESPACE + SLASH + ANGLE_RIGHT);
controller.getDevicesMap().get(ncDeviceId).getSession().
startSubscription(request.toString());
}
} catch (NetconfException e) {
log.error("Cannot communicate to device {} exception {}", ncDeviceId, e);
return false;
}
return true;
} | @Test
public void testSubscribe() throws Exception {
assertTrue("Incorrect response", voltConfig.subscribe(null));
assertFalse("Incorrect response", voltConfig.subscribe("false"));
assertTrue("Incorrect response", voltConfig.subscribe("disable"));
} |
public static <C> AsyncBuilder<C> builder() {
return new AsyncBuilder<>();
} | @Test
void throwsFeignExceptionIncludingBody() throws Throwable {
server.enqueue(new MockResponse().setBody("success!"));
TestInterfaceAsync api = AsyncFeign.builder().decoder((response, type) -> {
throw new IOException("timeout");
})
.target(TestInterfaceAsync.class, "http://localhost:" + server.getPort());
CompletableFuture<?> cf = api.body("Request body");
server.takeRequest();
try {
unwrap(cf);
} catch (FeignException e) {
assertThat(e.getMessage())
.contains("timeout reading POST http://localhost:" + server.getPort() + "/");
assertThat(e.contentUTF8()).isEqualTo("Request body");
return;
}
fail("");
} |
@Override
public Integer parse(final String value) {
return Integer.parseInt(value);
} | @Test
void assertParse() {
assertThat(new PostgreSQLIntValueParser().parse("1"), is(1));
} |
@VisibleForTesting
StandardContext addStaticDir(Tomcat tomcat, String contextPath, File dir) {
try {
fs.createOrCleanupDir(dir);
} catch (IOException e) {
throw new IllegalStateException(format("Fail to create or clean-up directory %s", dir.getAbsolutePath()), e);
}
return addContext(tomcat, contextPath, dir);
} | @Test
public void create_dir_and_configure_static_directory() throws Exception {
File dir = temp.newFolder();
dir.delete();
underTest.addStaticDir(tomcat, "/deploy", dir);
assertThat(dir).isDirectory().exists();
verify(tomcat).addWebapp("/deploy", dir.getAbsolutePath());
} |
@Override
public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) {
super.onDataReceived(device, data);
if (data.size() < 2) {
onInvalidDataReceived(device, data);
return;
}
// Read the Op Code
final int opCode = data.getIntValue(Data.FORMAT_UINT8, 0);
// Estimate the expected operand size based on the Op Code
int expectedOperandSize;
switch (opCode) {
case OP_CODE_COMMUNICATION_INTERVAL_RESPONSE ->
// UINT8
expectedOperandSize = 1;
case OP_CODE_CALIBRATION_VALUE_RESPONSE ->
// Calibration Value
expectedOperandSize = 10;
case OP_CODE_PATIENT_HIGH_ALERT_LEVEL_RESPONSE,
OP_CODE_PATIENT_LOW_ALERT_LEVEL_RESPONSE,
OP_CODE_HYPO_ALERT_LEVEL_RESPONSE,
OP_CODE_HYPER_ALERT_LEVEL_RESPONSE,
OP_CODE_RATE_OF_DECREASE_ALERT_LEVEL_RESPONSE,
OP_CODE_RATE_OF_INCREASE_ALERT_LEVEL_RESPONSE ->
// SFLOAT
expectedOperandSize = 2;
case OP_CODE_RESPONSE_CODE ->
// Request Op Code (UINT8), Response Code Value (UINT8)
expectedOperandSize = 2;
default -> {
onInvalidDataReceived(device, data);
return;
}
}
// Verify packet length
if (data.size() != 1 + expectedOperandSize && data.size() != 1 + expectedOperandSize + 2) {
onInvalidDataReceived(device, data);
return;
}
// Verify CRC if present
final boolean crcPresent = data.size() == 1 + expectedOperandSize + 2; // opCode + expected operand + CRC
if (crcPresent) {
final int expectedCrc = data.getIntValue(Data.FORMAT_UINT16_LE, 1 + expectedOperandSize);
final int actualCrc = CRC16.MCRF4XX(data.getValue(), 0, 1 + expectedOperandSize);
if (expectedCrc != actualCrc) {
onCGMSpecificOpsResponseReceivedWithCrcError(device, data);
return;
}
}
switch (opCode) {
case OP_CODE_COMMUNICATION_INTERVAL_RESPONSE -> {
final int interval = data.getIntValue(Data.FORMAT_UINT8, 1);
onContinuousGlucoseCommunicationIntervalReceived(device, interval, crcPresent);
return;
}
case OP_CODE_CALIBRATION_VALUE_RESPONSE -> {
final float glucoseConcentrationOfCalibration = data.getFloatValue(Data.FORMAT_SFLOAT, 1);
final int calibrationTime = data.getIntValue(Data.FORMAT_UINT16_LE, 3);
final int calibrationTypeAndSampleLocation = data.getIntValue(Data.FORMAT_UINT8, 5);
@SuppressLint("WrongConstant") final int calibrationType = calibrationTypeAndSampleLocation & 0x0F;
final int calibrationSampleLocation = calibrationTypeAndSampleLocation >> 4;
final int nextCalibrationTime = data.getIntValue(Data.FORMAT_UINT16_LE, 6);
final int calibrationDataRecordNumber = data.getIntValue(Data.FORMAT_UINT16_LE, 8);
final int calibrationStatus = data.getIntValue(Data.FORMAT_UINT8, 10);
onContinuousGlucoseCalibrationValueReceived(device, glucoseConcentrationOfCalibration,
calibrationTime, nextCalibrationTime, calibrationType, calibrationSampleLocation,
calibrationDataRecordNumber, new CGMCalibrationStatus(calibrationStatus), crcPresent);
return;
}
case OP_CODE_RESPONSE_CODE -> {
final int requestCode = data.getIntValue(Data.FORMAT_UINT8, 1); // ignore
final int responseCode = data.getIntValue(Data.FORMAT_UINT8, 2);
if (responseCode == CGM_RESPONSE_SUCCESS) {
onCGMSpecificOpsOperationCompleted(device, requestCode, crcPresent);
} else {
onCGMSpecificOpsOperationError(device, requestCode, responseCode, crcPresent);
}
return;
}
}
// Read SFLOAT value
final float value = data.getFloatValue(Data.FORMAT_SFLOAT, 1);
switch (opCode) {
case OP_CODE_PATIENT_HIGH_ALERT_LEVEL_RESPONSE ->
onContinuousGlucosePatientHighAlertReceived(device, value, crcPresent);
case OP_CODE_PATIENT_LOW_ALERT_LEVEL_RESPONSE ->
onContinuousGlucosePatientLowAlertReceived(device, value, crcPresent);
case OP_CODE_HYPO_ALERT_LEVEL_RESPONSE ->
onContinuousGlucoseHypoAlertReceived(device, value, crcPresent);
case OP_CODE_HYPER_ALERT_LEVEL_RESPONSE ->
onContinuousGlucoseHyperAlertReceived(device, value, crcPresent);
case OP_CODE_RATE_OF_DECREASE_ALERT_LEVEL_RESPONSE ->
onContinuousGlucoseRateOfDecreaseAlertReceived(device, value, crcPresent);
case OP_CODE_RATE_OF_INCREASE_ALERT_LEVEL_RESPONSE ->
onContinuousGlucoseRateOfIncreaseAlertReceived(device, value, crcPresent);
}
} | @Test
public void onContinuousGlucoseHyperAlertReceived() {
final Data data = new Data(new byte[] { 18, 10, 32});
callback.onDataReceived(null, data);
assertEquals("Level", 1000f, hyperAlertLevel, 0.01);
assertFalse(secured);
} |
public FnDataReceiver<WindowedValue<?>> getMultiplexingConsumer(String pCollectionId) {
return pCollectionIdsToWrappedConsumer.computeIfAbsent(
pCollectionId,
pcId -> {
if (!processBundleDescriptor.containsPcollections(pCollectionId)) {
throw new IllegalArgumentException(
String.format("Unknown PCollection id %s", pCollectionId));
}
String coderId =
processBundleDescriptor.getPcollectionsOrThrow(pCollectionId).getCoderId();
Coder<?> coder;
OutputSampler<?> sampler = null;
try {
Coder<?> maybeWindowedValueInputCoder = rehydratedComponents.getCoder(coderId);
if (dataSampler != null) {
sampler = dataSampler.sampleOutput(pCollectionId, maybeWindowedValueInputCoder);
}
// TODO: Stop passing windowed value coders within PCollections.
if (maybeWindowedValueInputCoder instanceof WindowedValue.WindowedValueCoder) {
coder = ((WindowedValueCoder) maybeWindowedValueInputCoder).getValueCoder();
} else {
coder = maybeWindowedValueInputCoder;
}
} catch (IOException e) {
throw new IllegalStateException(
String.format("Unable to materialize coder %s", coderId), e);
}
List<ConsumerAndMetadata> consumerAndMetadatas =
pCollectionIdsToConsumers.computeIfAbsent(
pCollectionId, (unused) -> new ArrayList<>());
if (consumerAndMetadatas.size() == 1) {
ConsumerAndMetadata consumerAndMetadata = consumerAndMetadatas.get(0);
if (consumerAndMetadata.getConsumer() instanceof HandlesSplits) {
return new SplittingMetricTrackingFnDataReceiver(
pcId, coder, consumerAndMetadata, sampler);
}
return new MetricTrackingFnDataReceiver(pcId, coder, consumerAndMetadata, sampler);
} else {
/* TODO(SDF), Consider supporting splitting each consumer individually. This would never
come up in the existing SDF expansion, but might be useful to support fused SDF nodes.
This would require dedicated delivery of the split results to each of the consumers
separately. */
return new MultiplexingMetricTrackingFnDataReceiver(
pcId, coder, consumerAndMetadatas, sampler);
}
});
} | @Test
public void noConsumers() throws Exception {
ShortIdMap shortIds = new ShortIdMap();
BundleProgressReporter.InMemory reporterAndRegistrar = new BundleProgressReporter.InMemory();
PCollectionConsumerRegistry consumers =
new PCollectionConsumerRegistry(
sampler.create(), shortIds, reporterAndRegistrar, TEST_DESCRIPTOR);
FnDataReceiver<WindowedValue<String>> wrapperConsumer =
(FnDataReceiver<WindowedValue<String>>)
(FnDataReceiver) consumers.getMultiplexingConsumer(P_COLLECTION_A);
String elementValue = "elem";
WindowedValue<String> element = valueInGlobalWindow(elementValue);
int numElements = 10;
for (int i = 0; i < numElements; i++) {
wrapperConsumer.accept(element);
}
List<MonitoringInfo> expected = new ArrayList<>();
SimpleMonitoringInfoBuilder builder = new SimpleMonitoringInfoBuilder();
builder.setUrn(MonitoringInfoConstants.Urns.ELEMENT_COUNT);
builder.setLabel(MonitoringInfoConstants.Labels.PCOLLECTION, P_COLLECTION_A);
builder.setInt64SumValue(numElements);
expected.add(builder.build());
long elementByteSize = StringUtf8Coder.of().getEncodedElementByteSize(elementValue);
builder = new SimpleMonitoringInfoBuilder();
builder.setUrn(Urns.SAMPLED_BYTE_SIZE);
builder.setLabel(MonitoringInfoConstants.Labels.PCOLLECTION, P_COLLECTION_A);
builder.setInt64DistributionValue(
DistributionData.create(
numElements * elementByteSize, numElements, elementByteSize, elementByteSize));
expected.add(builder.build());
Map<String, ByteString> actualData = new HashMap<>();
reporterAndRegistrar.updateFinalMonitoringData(actualData);
// Clear the timestamp before comparison.
Iterable<MonitoringInfo> result =
Iterables.filter(
shortIds.toMonitoringInfo(actualData),
monitoringInfo -> monitoringInfo.containsLabels(Labels.PCOLLECTION));
assertThat(result, containsInAnyOrder(expected.toArray()));
} |
private Keys() {} | @Test
@Category(ValidatesRunner.class)
public void testKeys() {
PCollection<KV<String, Integer>> input =
p.apply(
Create.of(Arrays.asList(TABLE))
.withCoder(KvCoder.of(StringUtf8Coder.of(), BigEndianIntegerCoder.of())));
PCollection<String> output = input.apply(Keys.create());
PAssert.that(output).containsInAnyOrder("one", "two", "three", "dup", "dup");
p.run();
} |
@GetMapping(value = "/{id}")
public Mono<Post> get(@PathVariable(value = "id") Long id) {
return this.posts.findById(id);
} | @Test
public void getPostById() throws Exception {
this.client
.get()
.uri("/posts/1")
.accept(APPLICATION_JSON)
.exchange()
.expectBody()
.jsonPath("$.title")
.isEqualTo("post one");
this.client
.get()
.uri("/posts/2")
.accept(APPLICATION_JSON)
.exchange()
.expectBody()
.jsonPath("$.title")
.isEqualTo("post two");
} |
public static boolean overlapsOrdered(IndexIterationPointer left, IndexIterationPointer right, Comparator comparator) {
assert left.isDescending() == right.isDescending() : "Cannot compare pointer with different directions";
assert left.lastEntryKeyData == null && right.lastEntryKeyData == null : "Can merge only initial pointers";
// fast path for the same instance
if (left == right) {
return true;
}
assert comparator.compare(left.from, right.from) <= 0 : "Pointers must be ordered";
// if one of the ends is +/-inf respectively -> overlap
if (left.to == null || right.from == null) {
return true;
}
// if given end is equal the ranges overlap (or at least are adjacent)
// if at least one of the ranges is inclusive
boolean eqOverlaps = left.isToInclusive() || right.isFromInclusive();
// Check non-inf values, do not need to check the other way around because pointers are ordered
// Thanks to order we do not have to check `right.to`, we only need to check
// if `right.from` belongs to `left` pointer range.
// we must take into account inclusiveness, so we do not merge < X and > X ranges
int rfCmpLt = comparator.compare(right.from, left.to);
return eqOverlaps ? rfCmpLt <= 0 : rfCmpLt < 0;
} | @Test
void overlapsOrderedRanges() {
// ranges unbounded on the same side
assertTrue(overlapsOrdered(pointer(lessThan(5)), pointer(lessThan(5)), OrderedIndexStore.SPECIAL_AWARE_COMPARATOR));
assertTrue(overlapsOrdered(pointer(lessThan(5)), pointer(lessThan(6)), OrderedIndexStore.SPECIAL_AWARE_COMPARATOR));
assertTrue(overlapsOrdered(pointer(greaterThan(5)), pointer(greaterThan(5)), OrderedIndexStore.SPECIAL_AWARE_COMPARATOR));
assertTrue(overlapsOrdered(pointer(greaterThan(5)), pointer(greaterThan(6)), OrderedIndexStore.SPECIAL_AWARE_COMPARATOR));
// adjacent ranges unbounded on different sides
assertFalse(overlapsOrdered(pointer(lessThan(5)), pointer(greaterThan(5)), OrderedIndexStore.SPECIAL_AWARE_COMPARATOR),
"Ranges with the same open end should not be adjacent");
assertTrue(overlapsOrdered(pointer(lessThan(5)), pointer(atLeast(5)), OrderedIndexStore.SPECIAL_AWARE_COMPARATOR));
assertTrue(overlapsOrdered(pointer(atMost(5)), pointer(greaterThan(5)), OrderedIndexStore.SPECIAL_AWARE_COMPARATOR));
// overlapping ranges unbounded on different sides
assertTrue(overlapsOrdered(pointer(lessThan(6)), pointer(greaterThan(5)), OrderedIndexStore.SPECIAL_AWARE_COMPARATOR));
assertTrue(overlapsOrdered(pointer(lessThan(6)), pointer(atLeast(5)), OrderedIndexStore.SPECIAL_AWARE_COMPARATOR));
assertTrue(overlapsOrdered(pointer(atMost(6)), pointer(greaterThan(5)), OrderedIndexStore.SPECIAL_AWARE_COMPARATOR));
assertTrue(overlapsOrdered(pointer(atMost(6)), pointer(atLeast(5)), OrderedIndexStore.SPECIAL_AWARE_COMPARATOR));
// non-overlapping ranges unbounded on different sides
assertFalse(overlapsOrdered(pointer(lessThan(5)), pointer(greaterThan(6)), OrderedIndexStore.SPECIAL_AWARE_COMPARATOR));
assertFalse(overlapsOrdered(pointer(lessThan(5)), pointer(atLeast(6)), OrderedIndexStore.SPECIAL_AWARE_COMPARATOR));
assertFalse(overlapsOrdered(pointer(atMost(5)), pointer(greaterThan(6)), OrderedIndexStore.SPECIAL_AWARE_COMPARATOR));
assertFalse(overlapsOrdered(pointer(atMost(5)), pointer(atLeast(6)), OrderedIndexStore.SPECIAL_AWARE_COMPARATOR));
} |
public ApplicationBuilder compiler(String compiler) {
this.compiler = compiler;
return getThis();
} | @Test
void compiler() {
ApplicationBuilder builder = new ApplicationBuilder();
builder.compiler("compiler");
Assertions.assertEquals("compiler", builder.build().getCompiler());
} |
@Override
public void run() {
try (DbSession dbSession = dbClient.openSession(false)) {
List<CeActivityDto> recentSuccessfulTasks = getRecentSuccessfulTasks(dbSession);
Collection<String> entityUuids = recentSuccessfulTasks.stream()
.map(CeActivityDto::getEntityUuid)
.toList();
List<EntityDto> entities = dbClient.entityDao().selectByUuids(dbSession, entityUuids);
Map<String, String> entityUuidAndKeys = entities.stream()
.collect(Collectors.toMap(EntityDto::getUuid, EntityDto::getKey));
reportObservedDurationForTasks(recentSuccessfulTasks, entityUuidAndKeys);
}
lastUpdatedTimestamp = system.now();
} | @Test
public void run_given1SuccessfulTasksAnd1Failing_observeDurationFor1Tasks() {
RecentTasksDurationTask task = new RecentTasksDurationTask(dbClient, metrics, config, system);
List<CeActivityDto> recentTasks = createTasks(1, 1);
when(entityDao.selectByUuids(any(), any())).thenReturn(createEntityDtos(1));
when(ceActivityDao.selectNewerThan(any(), anyLong())).thenReturn(recentTasks);
task.run();
verify(metrics, times(1)).observeComputeEngineTaskDuration(anyLong(), any(), any());
} |
@Override
public boolean addAll(IntSet set) {
throw new UnsupportedOperationException("RangeSet is immutable");
} | @Test(expected = UnsupportedOperationException.class)
public void addAll() throws Exception {
RangeSet rs = new RangeSet(4);
RangeSet rs2 = new RangeSet(5);
rs.addAll(rs2);
} |
@Override
public Object handle(String targetService, List<Object> invokers, Object invocation, Map<String, String> queryMap,
String serviceInterface) {
if (!shouldHandle(invokers)) {
return invokers;
}
List<Object> targetInvokers;
if (routerConfig.isUseRequestRouter()) {
targetInvokers = getTargetInvokersByRequest(targetService, invokers, invocation);
} else {
targetInvokers = getTargetInvokersByRules(invokers, invocation, queryMap, targetService, serviceInterface);
}
return super.handle(targetService, targetInvokers, invocation, queryMap, serviceInterface);
} | @Test
public void testGetTargetInstancesByRequestWithNoTags() {
config.setRequestTags(null);
config.setUseRequestRouter(true);
List<Object> invokers = new ArrayList<>();
ApacheInvoker<Object> invoker1 = new ApacheInvoker<>("1.0.0",
Collections.singletonMap(RouterConstant.PARAMETERS_KEY_PREFIX + "bar", "bar1"));
invokers.add(invoker1);
ApacheInvoker<Object> invoker2 = new ApacheInvoker<>("1.0.1",
Collections.singletonMap(RouterConstant.PARAMETERS_KEY_PREFIX + "foo", "bar2"));
invokers.add(invoker2);
ApacheInvoker<Object> invoker3 = new ApacheInvoker<>("1.0.1");
invokers.add(invoker3);
Invocation invocation = new ApacheInvocation();
Map<String, String> queryMap = new HashMap<>();
queryMap.put("side", "consumer");
queryMap.put("group", "fooGroup");
queryMap.put("version", "0.0.1");
queryMap.put("interface", "io.sermant.foo.FooTest");
DubboCache.INSTANCE.putApplication("io.sermant.foo.FooTest", "foo");
// when the test is not tags
List<Object> targetInvokers = (List<Object>) flowRouteHandler.handle(
DubboCache.INSTANCE.getApplication("io.sermant.foo.FooTest")
, invokers, invocation, queryMap, "io.sermant.foo.FooTest");
Assert.assertEquals(invokers, targetInvokers);
} |
@ScalarOperator(CAST)
@SqlType(StandardTypes.TINYINT)
public static long castToTinyint(@SqlType(StandardTypes.INTEGER) long value)
{
try {
return SignedBytes.checkedCast(value);
}
catch (IllegalArgumentException e) {
throw new PrestoException(NUMERIC_VALUE_OUT_OF_RANGE, "Out of range for tinyint: " + value, e);
}
} | @Test
public void testCastToTinyint()
{
assertFunction("cast(INTEGER'37' as tinyint)", TINYINT, (byte) 37);
assertFunction("cast(INTEGER'17' as tinyint)", TINYINT, (byte) 17);
} |
public Path metadataFilePath() {
Optional<String> metadataFilePath = configuration.get(METADATA_FILE_PATH_KEY);
if (metadataFilePath.isPresent()) {
Path metadataPath = Paths.get(metadataFilePath.get());
if (!metadataPath.isAbsolute()) {
throw MessageException.of(String.format("Property '%s' must point to an absolute path: %s", METADATA_FILE_PATH_KEY, metadataFilePath.get()));
}
return project.getBaseDir().resolve(metadataPath);
} else {
return project.getWorkDir().resolve(METADATA_DUMP_FILENAME);
}
} | @Test
public void should_define_metadata_file_path() throws IOException {
Path path = temp.newFolder().toPath().resolve("report");
settings.setProperty("sonar.scanner.metadataFilePath", path.toString());
assertThat(underTest.metadataFilePath()).isEqualTo(path);
} |
public static Set<String> getFilterUrlPatterns( Document webXml, String filterName )
{
return getUrlPatterns( "filter", webXml, filterName );
} | @Test
public void testGetFilterUrlPatterns() throws Exception
{
// Setup fixture.
final Document webXml = WebXmlUtils.asDocument( new File( Objects.requireNonNull(WebXmlUtilsTest.class.getResource("/org/jivesoftware/util/test-web.xml")).toURI() ) );
final String filterName = "LocaleFilter";
// Execute system under test.
final Set<String> results = WebXmlUtils.getFilterUrlPatterns( webXml, filterName );
// Verify result.
assertNotNull( results );
assertEquals( 2, results.size() );
assertTrue( results.contains( "*.jsp" ));
assertTrue( results.contains( "foo.bar" ));
} |
@Override
public Mono<ExtensionStore> fetchByName(String name) {
return repository.findById(name);
} | @Test
void fetchByName() {
var expectedExtension =
new ExtensionStore("/registry/posts/hello-world", "this is post".getBytes(), 1L);
when(repository.findById("/registry/posts/hello-halo"))
.thenReturn(Mono.just(expectedExtension));
var gotExtension = client.fetchByName("/registry/posts/hello-halo").blockOptional();
assertTrue(gotExtension.isPresent());
assertEquals(expectedExtension, gotExtension.get());
} |
@Override
public String getChartData(Chart chart) {
// 根据id查询数据库
List<Map<String, String>> chartData = baseMapper.getChartDataByChartId(chart.getId());
List<Collection<String>> excelData = new ArrayList<>();
for (int i = 0; i < chartData.size(); i++) {
Map<String, String> chartDatum = chartData.get(i);
if (i == 0) {
Set<String> keys = chartDatum.keySet();
excelData.add(keys);
} else {
Collection<String> values = chartDatum.values();
excelData.add(values);
}
}
return JSONUtil.toJsonStr(excelData);
} | @Test
void getChartData() {
Chart chart = new Chart();
chart.setId(123456789L);
String chartData = chartService.getChartData(chart);
System.out.println(chartData);
} |
public AccountLogsResult getAccountLogs(long accountId, String deviceName, String appCode, AccountLogsRequest request) {
Map<String, Object> resultMap = accountClient.getAccountLogs(accountId, deviceName, appCode, request.getPageSize(),
request.getPageId(), request.getQuery());
return objectMapper.convertValue(resultMap, AccountLogsResult.class);
} | @Test
public void testGetAccountLogs() {
AccountLogsRequest request = new AccountLogsRequest();
request.setPageId(1);
request.setPageSize(2);
request.setQuery("q");
List<Map<String, Object>> logs = List.of(
Map.of(
"id", 6,
"name", "log name"));
Map<String, Object> result = Map.of(
"status", "OK",
"error", "custom error",
"total_items", 3,
"total_pages", 5,
"results", logs);
when(accountClient.getAccountLogs(anyLong(), anyString(), anyString(), any(), any(), anyString())).thenReturn(result);
AccountLogsResult accountLogs = accountService.getAccountLogs(1, "deviceName", "appCode", request);
assertEquals(Status.OK, accountLogs.getStatus());
assertEquals("custom error", accountLogs.getError());
assertEquals(3, accountLogs.getTotalItems());
assertEquals(5, accountLogs.getTotalPages());
assertEquals(1, accountLogs.getResults().size());
} |
public void subscribe(String topic, String subExpression) throws MQClientException {
try {
SubscriptionData subscriptionData = FilterAPI.buildSubscriptionData(topic, subExpression);
this.rebalanceImpl.getSubscriptionInner().put(topic, subscriptionData);
if (this.mQClientFactory != null) {
this.mQClientFactory.sendHeartbeatToAllBrokerWithLock();
}
} catch (Exception e) {
throw new MQClientException("subscription exception", e);
}
} | @Test
public void testSubscribe() throws MQClientException {
defaultMQPushConsumerImpl.subscribe(defaultTopic, "fullClassname", "filterClassSource");
RebalanceImpl actual = defaultMQPushConsumerImpl.getRebalanceImpl();
assertEquals(1, actual.getSubscriptionInner().size());
} |
@Override
public Optional<ScmInfo> getScmInfo(Component component) {
requireNonNull(component, "Component cannot be null");
if (component.getType() != Component.Type.FILE) {
return Optional.empty();
}
return scmInfoCache.computeIfAbsent(component, this::getScmInfoForComponent);
} | @Test
public void read_from_DB_with_missing_lines_if_no_report_and_file_unchanged() {
createDbScmInfoWithMissingLine();
when(fileStatuses.isUnchanged(FILE_SAME)).thenReturn(true);
// should clear revision and author
ScmInfo scmInfo = underTest.getScmInfo(FILE_SAME).get();
assertThat(scmInfo.getAllChangesets()).hasSize(2);
assertChangeset(scmInfo.getChangesetForLine(1), null, null, 10L);
assertThat(scmInfo.hasChangesetForLine(2)).isFalse();
verify(fileStatuses).isUnchanged(FILE_SAME);
verify(dbLoader).getScmInfo(FILE_SAME);
verifyNoMoreInteractions(dbLoader);
verifyNoMoreInteractions(fileStatuses);
verifyNoInteractions(diff);
} |
@Override
protected void init() throws ServiceException {
LOG.info("Using FileSystemAccess JARs version [{}]", VersionInfo.getVersion());
String security = getServiceConfig().get(AUTHENTICATION_TYPE, "simple").trim();
if (security.equals("kerberos")) {
String defaultName = getServer().getName();
String keytab = System.getProperty("user.home") + "/" + defaultName + ".keytab";
keytab = getServiceConfig().get(KERBEROS_KEYTAB, keytab).trim();
if (keytab.length() == 0) {
throw new ServiceException(FileSystemAccessException.ERROR.H01, KERBEROS_KEYTAB);
}
String principal = defaultName + "/localhost@LOCALHOST";
principal = getServiceConfig().get(KERBEROS_PRINCIPAL, principal).trim();
if (principal.length() == 0) {
throw new ServiceException(FileSystemAccessException.ERROR.H01, KERBEROS_PRINCIPAL);
}
Configuration conf = new Configuration();
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
try {
UserGroupInformation.loginUserFromKeytab(principal, keytab);
} catch (IOException ex) {
throw new ServiceException(FileSystemAccessException.ERROR.H02, ex.getMessage(), ex);
}
LOG.info("Using FileSystemAccess Kerberos authentication, principal [{}] keytab [{}]", principal, keytab);
} else if (security.equals("simple")) {
Configuration conf = new Configuration();
conf.set(HADOOP_SECURITY_AUTHENTICATION, "simple");
UserGroupInformation.setConfiguration(conf);
LOG.info("Using FileSystemAccess simple/pseudo authentication, principal [{}]", System.getProperty("user.name"));
} else {
throw new ServiceException(FileSystemAccessException.ERROR.H09, security);
}
String hadoopConfDirProp = getServiceConfig().get(HADOOP_CONF_DIR, getServer().getConfigDir());
File hadoopConfDir = new File(hadoopConfDirProp).getAbsoluteFile();
if (!hadoopConfDir.exists()) {
hadoopConfDir = new File(getServer().getConfigDir()).getAbsoluteFile();
}
if (!hadoopConfDir.exists()) {
throw new ServiceException(FileSystemAccessException.ERROR.H10, hadoopConfDir);
}
try {
serviceHadoopConf = loadHadoopConf(hadoopConfDir);
fileSystemConf = getNewFileSystemConfiguration();
} catch (IOException ex) {
throw new ServiceException(FileSystemAccessException.ERROR.H11, ex.toString(), ex);
}
if (LOG.isDebugEnabled()) {
LOG.debug("FileSystemAccess FileSystem configuration:");
for (Map.Entry entry : serviceHadoopConf) {
LOG.debug(" {} = {}", entry.getKey(), entry.getValue());
}
}
setRequiredServiceHadoopConf(serviceHadoopConf);
nameNodeWhitelist = toLowerCase(getServiceConfig().getTrimmedStringCollection(NAME_NODE_WHITELIST));
} | @Test
@TestDir
public void simpleSecurity() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",",
Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
Assert.assertNotNull(server.get(FileSystemAccess.class));
server.destroy();
} |
@Override
public T extractOutput(VarianceAccumulator accumulator) {
return decimalConverter.apply(getVariance(accumulator));
} | @Test
public void testExtractsOutput() {
assertEquals(expectedExtractedResult, varianceFn.extractOutput(testAccumulatorInput));
} |
public static void readFully(InputStream stream, byte[] bytes, int offset, int length)
throws IOException {
int bytesRead = readRemaining(stream, bytes, offset, length);
if (bytesRead < length) {
throw new EOFException(
"Reached the end of stream with " + (length - bytesRead) + " bytes left to read");
}
} | @Test
public void testReadFullySmallReads() throws Exception {
byte[] buffer = new byte[5];
MockInputStream stream = new MockInputStream(2, 3, 3);
IOUtil.readFully(stream, buffer, 0, buffer.length);
assertThat(buffer)
.as("Byte array contents should match")
.containsExactly(Arrays.copyOfRange(MockInputStream.TEST_ARRAY, 0, 5));
assertThat(stream.getPos()).as("Stream position should reflect bytes read").isEqualTo(5);
} |
@Override
public Response onRequest(ReadRequest request) {
return null;
} | @Test
void testOnRequest() {
Response response = serviceMetadataProcessor.onRequest(ReadRequest.getDefaultInstance());
assertNull(response);
} |
@Override
public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException {
try {
new SDSAttributesFinderFeature(session, nodeid).find(file, listener);
return true;
}
catch(NotfoundException e) {
return false;
}
} | @Test
public void testFindFile() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final Path room = new SDSDirectoryFeature(session, nodeid).mkdir(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final Path file = new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new SDSTouchFeature(session, nodeid).touch(file, new TransferStatus());
assertTrue(new SDSFindFeature(session, nodeid).find(file));
assertFalse(new SDSFindFeature(session, nodeid).find(new Path(file.getAbsolute(), EnumSet.of(Path.Type.directory))));
new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@Override
public <T_OTHER, OUT> ProcessConfigurableAndNonKeyedPartitionStream<OUT> connectAndProcess(
KeyedPartitionStream<K, T_OTHER> other,
TwoInputNonBroadcastStreamProcessFunction<V, T_OTHER, OUT> processFunction) {
validateStates(
processFunction.usesStates(),
new HashSet<>(
Collections.singletonList(StateDeclaration.RedistributionMode.IDENTICAL)));
TypeInformation<OUT> outTypeInfo =
StreamUtils.getOutputTypeForTwoInputNonBroadcastProcessFunction(
processFunction,
getType(),
((KeyedPartitionStreamImpl<K, T_OTHER>) other).getType());
KeyedTwoInputNonBroadcastProcessOperator<K, V, T_OTHER, OUT> processOperator =
new KeyedTwoInputNonBroadcastProcessOperator<>(processFunction);
Transformation<OUT> outTransformation =
StreamUtils.getTwoInputTransformation(
"Keyed-TwoInput-Process",
this,
(KeyedPartitionStreamImpl<K, T_OTHER>) other,
outTypeInfo,
processOperator);
environment.addOperator(outTransformation);
return StreamUtils.wrapWithConfigureHandle(
new NonKeyedPartitionStreamImpl<>(environment, outTransformation));
} | @Test
void testConnectKeyedStream() throws Exception {
ExecutionEnvironmentImpl env = StreamTestUtils.getEnv();
KeyedPartitionStream<Integer, Integer> stream = createKeyedStream(env);
stream.connectAndProcess(
createKeyedStream(
env,
new TestingTransformation<>("t2", Types.LONG, 1),
(KeySelector<Long, Integer>) Math::toIntExact),
new StreamTestUtils.NoOpTwoInputNonBroadcastStreamProcessFunction());
stream.connectAndProcess(
createKeyedStream(
env,
new TestingTransformation<>("t3", Types.LONG, 1),
(KeySelector<Long, Integer>) Math::toIntExact),
new StreamTestUtils.NoOpTwoInputNonBroadcastStreamProcessFunction(),
(KeySelector<Long, Integer>) Math::toIntExact);
List<Transformation<?>> transformations = env.getTransformations();
assertThat(transformations).hasSize(2);
assertProcessType(transformations.get(0), TwoInputTransformation.class, Types.LONG);
assertProcessType(transformations.get(1), TwoInputTransformation.class, Types.LONG);
} |
@SuppressWarnings("rawtypes")
public Optional<RuleConfiguration> swapToRuleConfiguration(final String ruleTypeName, final Collection<RepositoryTuple> repositoryTuples) {
if (repositoryTuples.isEmpty()) {
return Optional.empty();
}
YamlRuleConfigurationSwapperEngine yamlSwapperEngine = new YamlRuleConfigurationSwapperEngine();
for (YamlRuleConfigurationSwapper each : ShardingSphereServiceLoader.getServiceInstances(YamlRuleConfigurationSwapper.class)) {
Class<? extends YamlRuleConfiguration> yamlRuleConfigClass = getYamlRuleConfigurationClass(each);
if (ruleTypeName.equals(Objects.requireNonNull(yamlRuleConfigClass.getAnnotation(RepositoryTupleEntity.class)).value())) {
Optional<YamlRuleConfiguration> yamlRuleConfig = swapToYamlRuleConfiguration(repositoryTuples, yamlRuleConfigClass);
return yamlRuleConfig.map(yamlSwapperEngine::swapToRuleConfiguration);
}
}
return Optional.empty();
} | @Test
void assertSwapToRuleConfiguration() {
assertFalse(new RepositoryTupleSwapperEngine().swapToRuleConfiguration("leaf", Collections.singleton(new RepositoryTuple("/rules/leaf/versions/0", "value: foo"))).isPresent());
} |
public static void main(String[] args) throws Exception
{
try
{
final CommandLineParser parser = new GnuParser();
CommandLine cl = parser.parse(OPTIONS, args);
if (cl.hasOption('h'))
{
help();
System.exit(0);
}
String sourceFormat = cl.getOptionValue('s', SchemaParser.FILETYPE).trim();
String destFormat = cl.getOptionValue('d', PdlSchemaParser.FILETYPE).trim();
boolean keepOriginal = cl.hasOption('o');
String preserveSourceCmd = cl.getOptionValue('p');
boolean skipVerification = cl.hasOption('k');
boolean forcePdscFullyQualifiedNames = cl.hasOption('q');
String[] cliArgs = cl.getArgs();
if (cliArgs.length != 3)
{
LOGGER.error("Missing arguments, expected 3 ([resolverPath] [sourceRoot] [destinationPath]), got "
+ cliArgs.length);
help();
System.exit(1);
}
int i = 0;
String resolverPaths = RestLiToolsUtils.readArgFromFileIfNeeded(cliArgs[i++]);
String sourcePath = cliArgs[i++];
String destPath = cliArgs[i++];
File sourceDir = new File(sourcePath);
File destDir = new File(destPath);
if (!sourceDir.exists() || !sourceDir.canRead())
{
LOGGER.error("Source directory does not exist or cannot be read: " + sourceDir.getAbsolutePath());
System.exit(1);
}
destDir.mkdirs();
if (!destDir.exists() || !destDir.canWrite())
{
LOGGER.error("Destination directory does not exist or cannot be written to: " + destDir.getAbsolutePath());
System.exit(1);
}
SchemaFormatTranslator translator = new SchemaFormatTranslator(
resolverPaths,
sourceDir,
destDir,
sourceFormat,
destFormat,
keepOriginal,
preserveSourceCmd,
skipVerification,
forcePdscFullyQualifiedNames);
translator.translateFiles();
}
catch (ParseException e)
{
LOGGER.error("Invalid arguments: " + e.getMessage());
help();
System.exit(1);
}
} | @Test(dataProvider = "fullClassName")
public void testTranslatePdscFromConvertedPdlInSchema(String packageName, String className) throws Exception
{
FileUtil.FileExtensionFilter pdscFilter = new FileUtil.FileExtensionFilter(SchemaParser.FILE_EXTENSION);
FileUtil.FileExtensionFilter pdlFilter = new FileUtil.FileExtensionFilter(PdlSchemaParser.FILE_EXTENSION);
// pdsc to pdl, keep source fields ('-o' flag)
String pdlTemp = Files.createTempDirectory("restli").toFile().getAbsolutePath();
// Keep original in source root.
SchemaFormatTranslator.main(new String[]{"-o", RESOLVER_DIR, SOURCE_ROOT, pdlTemp});
// Source files are not deleted
List<File> sourceFiles = FileUtil.listFiles(new File(SOURCE_ROOT), pdscFilter);
Assert.assertTrue(sourceFiles.size() > 0);
List<File> destFiles = FileUtil.listFiles(new File(pdlTemp), pdlFilter);
Assert.assertTrue(destFiles.size() > 0);
// All source files are translated.
Assert.assertEquals(destFiles.size(), sourceFiles.size());
// pdl to pdsc, delete source files (no '-o' flag)
int inputPdlFileCount = destFiles.size();
String pdscTemp = Files.createTempDirectory("restli").toFile().getAbsolutePath();
String pdlResolverPath = EXTERNAL_RESOURCES + File.pathSeparator + pdlTemp;
SchemaFormatTranslator.main(new String[]{"-spdl", "-dpdsc", pdlResolverPath, pdlTemp, pdscTemp});
destFiles = FileUtil.listFiles(new File(pdscTemp), pdscFilter);
Assert.assertTrue(destFiles.size() > 0);
Assert.assertEquals(destFiles.size(), inputPdlFileCount);
// Source files are deleted.
Assert.assertTrue(FileUtil.listFiles(new File(pdlTemp), pdlFilter).isEmpty());
MultiFormatDataSchemaResolver sourceResolver = MultiFormatDataSchemaResolver.withBuiltinFormats(RESOLVER_DIR);
MultiFormatDataSchemaResolver translatedResolver =
MultiFormatDataSchemaResolver.withBuiltinFormats(pdscTemp + File.pathSeparator + EXTERNAL_RESOURCES);
assertSameSchemas(packageName + "." + className, sourceResolver, translatedResolver);
} |
public static <Req extends MessagingRequest> Matcher<Req> operationEquals(String operation) {
if (operation == null) throw new NullPointerException("operation == null");
if (operation.isEmpty()) throw new NullPointerException("operation is empty");
return new MessagingOperationEquals<Req>(operation);
} | @Test void operationEquals_unmatched_null() {
assertThat(operationEquals("send").matches(request)).isFalse();
} |
public void build(@Nullable SegmentVersion segmentVersion, ServerMetrics serverMetrics)
throws Exception {
SegmentGeneratorConfig genConfig = new SegmentGeneratorConfig(_tableConfig, _dataSchema);
// The segment generation code in SegmentColumnarIndexCreator will throw
// exception if start and end time in time column are not in acceptable
// range. We don't want the realtime consumption to stop (if an exception
// is thrown) and thus the time validity check is explicitly disabled for
// realtime segment generation
genConfig.setSegmentTimeValueCheck(false);
if (_columnIndicesForRealtimeTable.getInvertedIndexColumns() != null) {
genConfig.setIndexOn(StandardIndexes.inverted(), IndexConfig.ENABLED,
_columnIndicesForRealtimeTable.getInvertedIndexColumns());
}
if (_columnIndicesForRealtimeTable.getVarLengthDictionaryColumns() != null) {
genConfig.setVarLengthDictionaryColumns(_columnIndicesForRealtimeTable.getVarLengthDictionaryColumns());
}
if (segmentVersion != null) {
genConfig.setSegmentVersion(segmentVersion);
}
genConfig.setTableName(_tableName);
genConfig.setOutDir(_outputPath);
genConfig.setSegmentName(_segmentName);
addIndexOrDefault(genConfig, StandardIndexes.text(), _columnIndicesForRealtimeTable.getTextIndexColumns(),
new TextIndexConfigBuilder(genConfig.getFSTIndexType()).build());
addIndexOrDefault(genConfig, StandardIndexes.fst(), _columnIndicesForRealtimeTable.getFstIndexColumns(),
new FstIndexConfig(genConfig.getFSTIndexType()));
SegmentPartitionConfig segmentPartitionConfig = _realtimeSegmentImpl.getSegmentPartitionConfig();
genConfig.setSegmentPartitionConfig(segmentPartitionConfig);
genConfig.setNullHandlingEnabled(_nullHandlingEnabled);
genConfig.setSegmentZKPropsConfig(_segmentZKPropsConfig);
// flush any artifacts to disk to improve mutable to immutable segment conversion
_realtimeSegmentImpl.commit();
SegmentIndexCreationDriverImpl driver = new SegmentIndexCreationDriverImpl();
try (PinotSegmentRecordReader recordReader = new PinotSegmentRecordReader()) {
int[] sortedDocIds = _columnIndicesForRealtimeTable.getSortedColumn() != null
? _realtimeSegmentImpl.getSortedDocIdIterationOrderWithSortedColumn(
_columnIndicesForRealtimeTable.getSortedColumn()) : null;
recordReader.init(_realtimeSegmentImpl, sortedDocIds);
RealtimeSegmentSegmentCreationDataSource dataSource =
new RealtimeSegmentSegmentCreationDataSource(_realtimeSegmentImpl, recordReader);
driver.init(genConfig, dataSource, RecordEnricherPipeline.getPassThroughPipeline(),
TransformPipeline.getPassThroughPipeline());
if (!_enableColumnMajor) {
driver.build();
} else {
driver.buildByColumn(_realtimeSegmentImpl);
}
}
if (segmentPartitionConfig != null) {
Map<String, ColumnPartitionConfig> columnPartitionMap = segmentPartitionConfig.getColumnPartitionMap();
for (String columnName : columnPartitionMap.keySet()) {
int numPartitions = driver.getSegmentStats().getColumnProfileFor(columnName).getPartitions().size();
serverMetrics.addValueToTableGauge(_tableName, ServerGauge.REALTIME_SEGMENT_NUM_PARTITIONS, numPartitions);
}
}
} | @Test
public void testNoRecordsIndexedRowMajorSegmentBuilder()
throws Exception {
File tmpDir = new File(TMP_DIR, "tmp_" + System.currentTimeMillis());
TableConfig tableConfig =
new TableConfigBuilder(TableType.REALTIME).setTableName("testTable").setTimeColumnName(DATE_TIME_COLUMN)
.setInvertedIndexColumns(Lists.newArrayList(STRING_COLUMN1)).setSortedColumn(LONG_COLUMN1)
.setRangeIndexColumns(Lists.newArrayList(STRING_COLUMN2))
.setNoDictionaryColumns(Lists.newArrayList(LONG_COLUMN2))
.setVarLengthDictionaryColumns(Lists.newArrayList(STRING_COLUMN3))
.setOnHeapDictionaryColumns(Lists.newArrayList(LONG_COLUMN3)).setColumnMajorSegmentBuilderEnabled(false)
.build();
Schema schema = new Schema.SchemaBuilder().addSingleValueDimension(STRING_COLUMN1, FieldSpec.DataType.STRING)
.addSingleValueDimension(STRING_COLUMN2, FieldSpec.DataType.STRING)
.addSingleValueDimension(STRING_COLUMN3, FieldSpec.DataType.STRING)
.addSingleValueDimension(STRING_COLUMN4, FieldSpec.DataType.STRING)
.addSingleValueDimension(LONG_COLUMN1, FieldSpec.DataType.LONG)
.addSingleValueDimension(LONG_COLUMN2, FieldSpec.DataType.LONG)
.addSingleValueDimension(LONG_COLUMN3, FieldSpec.DataType.LONG)
.addMultiValueDimension(MV_INT_COLUMN, FieldSpec.DataType.INT).addMetric(LONG_COLUMN4, FieldSpec.DataType.LONG)
.addDateTime(DATE_TIME_COLUMN, FieldSpec.DataType.LONG, "1:MILLISECONDS:EPOCH", "1:MILLISECONDS").build();
String tableNameWithType = tableConfig.getTableName();
String segmentName = "testTable__0__0__123456";
IndexingConfig indexingConfig = tableConfig.getIndexingConfig();
DictionaryIndexConfig varLengthDictConf = new DictionaryIndexConfig(false, true);
RealtimeSegmentConfig.Builder realtimeSegmentConfigBuilder =
new RealtimeSegmentConfig.Builder().setTableNameWithType(tableNameWithType).setSegmentName(segmentName)
.setStreamName(tableNameWithType).setSchema(schema).setTimeColumnName(DATE_TIME_COLUMN).setCapacity(1000)
.setAvgNumMultiValues(3)
.setIndex(Sets.newHashSet(LONG_COLUMN2), StandardIndexes.dictionary(), DictionaryIndexConfig.DISABLED)
.setIndex(Sets.newHashSet(Sets.newHashSet(STRING_COLUMN3)), StandardIndexes.dictionary(), varLengthDictConf)
.setIndex(Sets.newHashSet(STRING_COLUMN1), StandardIndexes.inverted(), IndexConfig.ENABLED)
.setSegmentZKMetadata(getSegmentZKMetadata(segmentName)).setOffHeap(true)
.setMemoryManager(new DirectMemoryManager(segmentName))
.setStatsHistory(RealtimeSegmentStatsHistory.deserialzeFrom(new File(tmpDir, "stats")))
.setConsumerDir(new File(tmpDir, "consumerDir").getAbsolutePath());
// create mutable segment impl
MutableSegmentImpl mutableSegmentImpl = new MutableSegmentImpl(realtimeSegmentConfigBuilder.build(), null);
File outputDir = new File(tmpDir, "outputDir");
SegmentZKPropsConfig segmentZKPropsConfig = new SegmentZKPropsConfig();
segmentZKPropsConfig.setStartOffset("1");
segmentZKPropsConfig.setEndOffset("100");
ColumnIndicesForRealtimeTable cdc = new ColumnIndicesForRealtimeTable(indexingConfig.getSortedColumn().get(0),
indexingConfig.getInvertedIndexColumns(), null, null, indexingConfig.getNoDictionaryColumns(),
indexingConfig.getVarLengthDictionaryColumns());
RealtimeSegmentConverter converter =
new RealtimeSegmentConverter(mutableSegmentImpl, segmentZKPropsConfig, outputDir.getAbsolutePath(), schema,
tableNameWithType, tableConfig, segmentName, cdc, false);
converter.build(SegmentVersion.v3, null);
File indexDir = new File(outputDir, segmentName);
SegmentMetadataImpl segmentMetadata = new SegmentMetadataImpl(indexDir);
assertEquals(segmentMetadata.getTotalDocs(), 0);
assertEquals(segmentMetadata.getTimeColumn(), DATE_TIME_COLUMN);
assertEquals(segmentMetadata.getTimeUnit(), TimeUnit.MILLISECONDS);
assertEquals(segmentMetadata.getStartTime(), segmentMetadata.getEndTime());
assertTrue(segmentMetadata.getAllColumns().containsAll(schema.getColumnNames()));
assertEquals(segmentMetadata.getStartOffset(), "1");
assertEquals(segmentMetadata.getEndOffset(), "100");
} |
@Override
public void onNewResourcesAvailable() {
checkDesiredOrSufficientResourcesAvailable();
} | @Test
void testStabilizationTimeoutReset() {
Duration initialResourceTimeout = Duration.ofMillis(-1);
Duration stabilizationTimeout = Duration.ofMillis(50L);
WaitingForResources wfr =
new WaitingForResources(
ctx,
LOG,
initialResourceTimeout,
stabilizationTimeout,
ctx.getClock(),
null);
ctx.setHasDesiredResources(() -> false);
// notify about resources, trigger stabilization timeout
ctx.setHasSufficientResources(() -> true);
ctx.advanceTimeByMillis(40); // advance time, but don't trigger stabilizationTimeout
wfr.onNewResourcesAvailable();
// notify again, but insufficient (reset stabilization timeout)
ctx.setHasSufficientResources(() -> false);
ctx.advanceTimeByMillis(40);
wfr.onNewResourcesAvailable();
// notify again, but sufficient, trigger timeout
ctx.setHasSufficientResources(() -> true);
ctx.advanceTimeByMillis(40);
wfr.onNewResourcesAvailable();
// sanity check: no state transition has been triggered so far
assertThat(ctx.hasStateTransition()).isFalse();
assertThat(ctx.getTestDuration()).isGreaterThan(stabilizationTimeout);
ctx.setExpectCreatingExecutionGraph();
ctx.advanceTimeByMillis(1);
assertThat(ctx.hasStateTransition()).isFalse();
ctx.advanceTimeByMillis(stabilizationTimeout.toMillis());
assertThat(ctx.hasStateTransition()).isTrue();
} |
static boolean allowDestinationRange(String prev, String next)
{
if (prev.isEmpty() || next.isEmpty())
{
return false;
}
int prevCode = prev.codePointAt(0);
int nextCode = next.codePointAt(0);
// Allow the new destination string if:
// 1. It is sequential with the previous one and differs only in the low-order byte
// 2. The previous string does not contain any UTF-16 surrogates
return allowCodeRange(prevCode, nextCode) && prev.codePointCount(0, prev.length()) == 1;
} | @Test
void testAllowDestinationRangeSurrogates()
{
// Check surrogates
StringBuilder endOfBMP = new StringBuilder();
endOfBMP.appendCodePoint(0xFFFF);
StringBuilder beyondBMP = new StringBuilder();
beyondBMP.appendCodePoint(0x10000);
StringBuilder cjk1 = new StringBuilder();
cjk1.appendCodePoint(0x2F884);
StringBuilder cjk2 = new StringBuilder();
cjk2.appendCodePoint(0x2F885);
StringBuilder cjk3 = new StringBuilder();
cjk3.appendCodePoint(0x2F886);
// Denied (overflow)
assertFalse(
ToUnicodeWriter.allowDestinationRange(endOfBMP.toString(), beyondBMP.toString()));
// Allowed (sequential surrogates)
assertTrue(ToUnicodeWriter.allowDestinationRange(cjk1.toString(), cjk2.toString()));
assertTrue(ToUnicodeWriter.allowDestinationRange(cjk2.toString(), cjk3.toString()));
// Denied (non sequential surrogates)
assertFalse(ToUnicodeWriter.allowDestinationRange(cjk1.toString(), cjk3.toString()));
} |
public ThreadGroup getThreadGroup() {
return mGroup;
} | @Test
public void testGetThreadGroup() {
NamedThreadFactory threadFactory = new NamedThreadFactory();
ThreadGroup threadGroup = threadFactory.getThreadGroup();
assertNotNull(threadGroup);
} |
public static DistributionData create(long sum, long count, long min, long max) {
return new AutoValue_DistributionData(sum, count, min, max);
} | @Test
public void testCreate() {
DistributionData data = DistributionData.create(5, 2, 1, 4);
assertEquals(5, data.sum());
assertEquals(2, data.count());
assertEquals(1, data.min());
assertEquals(4, data.max());
} |
@Override
public long read() {
return gaugeSource.read();
} | @Test
public void whenDoubleProbe() {
metricsRegistry.registerStaticProbe(this, "foo", MANDATORY,
(DoubleProbeFunction<LongGaugeImplTest>) source -> 10);
LongGauge gauge = metricsRegistry.newLongGauge("foo");
long actual = gauge.read();
assertEquals(10, actual);
} |
@Override
public void executor(final Collection<ApiDocRegisterDTO> dataList) {
for (ApiDocRegisterDTO apiDocRegisterDTO : dataList) {
shenyuClientRegisterRepository.persistApiDoc(apiDocRegisterDTO);
}
} | @Test
public void testExecutorWithEmptyData() {
Collection<ApiDocRegisterDTO> dataList = new ArrayList<>();
executorSubscriber.executor(dataList);
verify(shenyuClientRegisterRepository, never()).persistApiDoc(any());
} |
public void writeAscii(CharSequence v) {
for (int i = 0, length = v.length(); i < length; i++) {
writeByte(v.charAt(i) & 0xff);
}
} | @Test void writeAscii_long() {
assertThat(writeAscii(-1005656679588439279L))
.isEqualTo("-1005656679588439279");
assertThat(writeAscii(0L))
.isEqualTo("0");
assertThat(writeAscii(-9223372036854775808L /* Long.MIN_VALUE */))
.isEqualTo("-9223372036854775808");
assertThat(writeAscii(123456789L))
.isEqualTo("123456789");
} |
public void append(ByteBuffer record, DataType dataType) throws InterruptedException {
if (dataType.isEvent()) {
writeEvent(record, dataType);
} else {
writeRecord(record, dataType);
}
} | @Test
void testAppendEventNotRequestBuffer() throws Exception {
CompletableFuture<Void> requestBufferFuture = new CompletableFuture<>();
HsMemoryDataManagerOperation memoryDataManagerOperation =
TestingMemoryDataManagerOperation.builder()
.setRequestBufferFromPoolSupplier(
() -> {
requestBufferFuture.complete(null);
return null;
})
.build();
HsSubpartitionMemoryDataManager subpartitionMemoryDataManager =
createSubpartitionMemoryDataManager(memoryDataManagerOperation);
subpartitionMemoryDataManager.append(createRecord(0), DataType.EVENT_BUFFER);
assertThat(requestBufferFuture).isNotDone();
} |
@Override
public boolean localMember() {
return localMember;
} | @Test
public void testConstructor_withLiteMember_isFalse() {
MemberImpl member = new MemberImpl.Builder(address)
.version(MemberVersion.of("3.8.0"))
.localMember(true)
.uuid(newUnsecureUUID())
.build();
assertBasicMemberImplFields(member);
assertTrue(member.localMember());
assertFalse(member.isLiteMember());
} |
@Override
protected boolean isNan(Double number) {
return number.isNaN();
} | @Test
void testIsNan() {
DoubleSummaryAggregator ag = new DoubleSummaryAggregator();
assertThat(ag.isNan(-1.0)).isFalse();
assertThat(ag.isNan(0.0)).isFalse();
assertThat(ag.isNan(23.0)).isFalse();
assertThat(ag.isNan(Double.MAX_VALUE)).isFalse();
assertThat(ag.isNan(Double.MIN_VALUE)).isFalse();
assertThat(ag.isNan(Double.NaN)).isTrue();
} |
public ObjectNode toRestconfErrorJson() {
ObjectMapper mapper = new ObjectMapper();
ArrayNode errorArray = mapper.createArrayNode();
restconfErrors.forEach(error -> errorArray.add(error.toJson()));
ObjectNode errorsNode = (ObjectNode) mapper.createObjectNode();
errorsNode.put("ietf-restconf:errors", errorArray);
return errorsNode;
} | @Test
public void testToRestconfErrorJson() {
IllegalArgumentException ie = new IllegalArgumentException("This is a test");
RestconfException e = new RestconfException("Error in system", ie,
RestconfError.ErrorTag.DATA_EXISTS, Response.Status.BAD_REQUEST,
Optional.of("/some/path"));
e.addToErrors(error1);
e.addToErrors(error2);
e.addToErrors(error3);
assertEquals("{\"ietf-restconf:errors\":[" +
"{\"error\":{" +
"\"error-type\":\"application\"," +
"\"error-tag\":\"data-exists\"," +
"\"error-path\":\"/some/path\"," +
"\"error-message\":\"Error in system\"," +
"\"error-info\":\"This is a test\"}}," +
"{\"error\":{" +
"\"error-type\":\"transport\"," +
"\"error-tag\":\"access-denied\"}}," +
"{\"error\":{" +
"\"error-type\":\"transport\"," +
"\"error-tag\":\"bad-attribute\"}}," +
"{\"error\":{" +
"\"error-type\":\"rpc\"," +
"\"error-tag\":\"bad-element\"," +
"\"error-app-tag\":\"my-app-tag\"," +
"\"error-path\":\"/a/b/c\"," +
"\"error-message\":\"a message about the error\"," +
"\"error-info\":\"info about the error\"}}]}",
e.toRestconfErrorJson().toString());
} |
@Override
public String toString() {
int numColumns = getColumnCount();
TextTable table = new TextTable();
String[] columnNames = new String[numColumns];
for (int c = 0; c < numColumns; c++) {
columnNames[c] = _columnsArray.get(c).asText();
}
table.addHeader(columnNames);
int numRows = getRowCount();
for (int r = 0; r < numRows; r++) {
String[] columnValues = new String[numColumns];
for (int c = 0; c < numColumns; c++) {
try {
columnValues[c] = getString(r, c);
} catch (Exception e) {
columnNames[c] = "ERROR";
}
}
table.addRow(columnValues);
}
return table.toString();
} | @Test
public void testToString() {
// Run the test
final String result = _selectionResultSetUnderTest.toString();
// Verify the results
assertNotEquals("", result);
} |
@Override
public <W extends Window> TimeWindowedKStream<K, V> windowedBy(final Windows<W> windows) {
return new TimeWindowedKStreamImpl<>(
windows,
builder,
subTopologySourceNodes,
name,
keySerde,
valueSerde,
aggregateBuilder,
graphNode
);
} | @Test
public void shouldNotHaveNullWindowsWithSlidingWindowedReduce() {
assertThrows(NullPointerException.class, () -> groupedStream.windowedBy((SlidingWindows) null));
} |
@Override
public String getSearchStringEscape() {
return null;
} | @Test
void assertGetSearchStringEscape() {
assertNull(metaData.getSearchStringEscape());
} |
@Override
public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException {
URL url = invoker.getUrl();
String methodName = RpcUtils.getMethodName(invocation);
int max = url.getMethodParameter(methodName, EXECUTES_KEY, 0);
if (!RpcStatus.beginCount(url, methodName, max)) {
throw new RpcException(
RpcException.LIMIT_EXCEEDED_EXCEPTION,
"Failed to invoke method " + RpcUtils.getMethodName(invocation) + " in provider " + url
+ ", cause: The service using threads greater than <dubbo:service executes=\"" + max
+ "\" /> limited.");
}
invocation.put(EXECUTE_LIMIT_FILTER_START_TIME, System.currentTimeMillis());
try {
return invoker.invoke(invocation);
} catch (Throwable t) {
if (t instanceof RuntimeException) {
throw (RuntimeException) t;
} else {
throw new RpcException("unexpected exception when ExecuteLimitFilter", t);
}
}
} | @Test
void testNoExecuteLimitInvoke() {
Invoker invoker = Mockito.mock(Invoker.class);
when(invoker.invoke(any(Invocation.class))).thenReturn(new AppResponse("result"));
when(invoker.getUrl()).thenReturn(URL.valueOf("test://test:11/test?accesslog=true&group=dubbo&version=1.1"));
Invocation invocation = Mockito.mock(Invocation.class);
when(invocation.getMethodName()).thenReturn("testNoExecuteLimitInvoke");
Result result = executeLimitFilter.invoke(invoker, invocation);
Assertions.assertEquals("result", result.getValue());
} |
@Override
protected JobExceptionsInfoWithHistory handleRequest(
HandlerRequest<EmptyRequestBody> request, ExecutionGraphInfo executionGraph) {
final List<Integer> exceptionToReportMaxSizes =
request.getQueryParameter(UpperLimitExceptionParameter.class);
final int exceptionToReportMaxSize =
exceptionToReportMaxSizes.size() > 0
? exceptionToReportMaxSizes.get(0)
: MAX_NUMBER_EXCEPTION_TO_REPORT;
List<FailureLabelFilterParameter.FailureLabel> failureLabelFilter =
request.getQueryParameter(FailureLabelFilterParameter.class);
failureLabelFilter =
failureLabelFilter.size() > 0 ? failureLabelFilter : EMPTY_FAILURE_LABEL_FILTER;
return createJobExceptionsInfo(
executionGraph, exceptionToReportMaxSize, failureLabelFilter);
} | @Test
void testWithExceptionHistoryWithTruncationThroughParameter()
throws HandlerRequestException, ExecutionException, InterruptedException {
final RootExceptionHistoryEntry rootCause =
fromGlobalFailure(new RuntimeException("exception #0"), System.currentTimeMillis());
final RootExceptionHistoryEntry otherFailure =
new RootExceptionHistoryEntry(
new RuntimeException("exception #1"),
System.currentTimeMillis(),
CompletableFuture.completedFuture(Collections.singletonMap("key", "value")),
"task name",
new LocalTaskManagerLocation(),
Collections.emptySet());
final ExecutionGraphInfo executionGraphInfo =
createExecutionGraphInfo(rootCause, otherFailure);
final HandlerRequest<EmptyRequestBody> request =
createRequest(executionGraphInfo.getJobId(), 1);
final JobExceptionsInfoWithHistory response =
testInstance.handleRequest(request, executionGraphInfo);
assertThat(response.getExceptionHistory().getEntries())
.satisfies(
matching(
contains(
historyContainsGlobalFailure(
rootCause.getException(),
rootCause.getTimestamp()))));
assertThat(response.getExceptionHistory().getEntries())
.satisfies(matching(iterableWithSize(1)));
assertThat(response.getExceptionHistory().isTruncated()).isTrue();
} |
public void sort(String id1, SortDir dir1, String id2, SortDir dir2) {
Collections.sort(rows, new RowComparator(id1, dir1, id2, dir2));
} | @Test
public void sortAlphaDescNumberAsc() {
tm = unsortedDoubleTableModel();
verifyRowOrder("unsorted", tm, UNSORTED_IDS);
tm.sort(ALPHA, SortDir.DESC, NUMBER, SortDir.ASC);
verifyRowOrder("adna", tm, ROW_ORDER_AD_NA);
} |
@Override
public void handle(final RoutingContext routingContext) {
final HttpConnection httpConnection = routingContext.request().connection();
if (!httpConnection.isSsl()) {
throw new IllegalStateException("Should only have ssl connections");
}
final Principal peerPrincipal = getPeerPrincipal(httpConnection.sslSession());
routingContext.setUser(new SystemUser(peerPrincipal));
routingContext.next();
} | @Test (expected = IllegalStateException.class)
public void shouldNotSetUser_noSsl() {
when(routingContext.request()).thenReturn(request);
when(request.connection()).thenReturn(connection);
when(connection.isSsl()).thenReturn(false);
SystemAuthenticationHandler handler = new SystemAuthenticationHandler();
handler.handle(routingContext);
verify(routingContext, never()).setUser(any());
verify(routingContext).next();
} |
public void heartbeat(boolean successful, Instant lastHeartbeatAttempt) {
if (successful) {
heartbeatLastSuccess = lastHeartbeatAttempt;
heartbeatSuccessesSinceLastFailure++;
heartbeatFailuresSinceLastSuccess = 0;
} else {
heartbeatLastFailure = lastHeartbeatAttempt;
heartbeatSuccessesSinceLastFailure = 0;
heartbeatFailuresSinceLastSuccess++;
}
} | @Test
void happy() {
HeartbeatState state =
new HeartbeatState(
clock,
clock.now(),
new HeartbeatConfig(Duration.ofMinutes(1), 4, Duration.ofMinutes(4)));
assertOk(state);
clock.tick(Duration.ofSeconds(30));
state.heartbeat(true, clock.now());
assertOk(state);
clock.tick(Duration.ofSeconds(60));
state.heartbeat(false, clock.now());
assertFailing(state, 1, 0.25);
} |
@Override
public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo,
List<String> partNames, boolean areAllPartsFound) throws MetaException {
checkStatisticsList(colStatsWithSourceInfo);
ColumnStatisticsObj statsObj = null;
String colType;
String colName = null;
// check if all the ColumnStatisticsObjs contain stats and all the ndv are
// bitvectors
boolean doAllPartitionContainStats = partNames.size() == colStatsWithSourceInfo.size();
NumDistinctValueEstimator ndvEstimator = null;
boolean areAllNDVEstimatorsMergeable = true;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
if (statsObj == null) {
colName = cso.getColName();
colType = cso.getColType();
statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType,
cso.getStatsData().getSetField());
LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName, doAllPartitionContainStats);
}
DateColumnStatsDataInspector columnStatsData = dateInspectorFromStats(cso);
// check if we can merge NDV estimators
if (columnStatsData.getNdvEstimator() == null) {
areAllNDVEstimatorsMergeable = false;
break;
} else {
NumDistinctValueEstimator estimator = columnStatsData.getNdvEstimator();
if (ndvEstimator == null) {
ndvEstimator = estimator;
} else {
if (!ndvEstimator.canMerge(estimator)) {
areAllNDVEstimatorsMergeable = false;
break;
}
}
}
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
LOG.debug("all of the bit vectors can merge for {} is {}", colName, areAllNDVEstimatorsMergeable);
ColumnStatisticsData columnStatisticsData = initColumnStatisticsData();
if (doAllPartitionContainStats || colStatsWithSourceInfo.size() < 2) {
DateColumnStatsDataInspector aggregateData = null;
long lowerBound = 0;
long higherBound = 0;
double densityAvgSum = 0.0;
DateColumnStatsMerger merger = new DateColumnStatsMerger();
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
DateColumnStatsDataInspector newData = dateInspectorFromStats(cso);
lowerBound = Math.max(lowerBound, newData.getNumDVs());
higherBound += newData.getNumDVs();
if (newData.isSetLowValue() && newData.isSetHighValue()) {
densityAvgSum += ((double) diff(newData.getHighValue(), newData.getLowValue())) / newData.getNumDVs();
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setLowValue(merger.mergeLowValue(
merger.getLowValue(aggregateData), merger.getLowValue(newData)));
aggregateData.setHighValue(merger.mergeHighValue(
merger.getHighValue(aggregateData), merger.getHighValue(newData)));
aggregateData.setNumNulls(merger.mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls()));
aggregateData.setNumDVs(merger.mergeNumDVs(aggregateData.getNumDVs(), newData.getNumDVs()));
}
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
// if all the ColumnStatisticsObjs contain bitvectors, we do not need to
// use uniform distribution assumption because we can merge bitvectors
// to get a good estimation.
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
} else {
long estimation;
if (useDensityFunctionForNDVEstimation && aggregateData != null
&& aggregateData.isSetLowValue() && aggregateData.isSetHighValue()) {
// We have estimation, lowerbound and higherbound. We use estimation
// if it is between lowerbound and higherbound.
double densityAvg = densityAvgSum / partNames.size();
estimation = (long) (diff(aggregateData.getHighValue(), aggregateData.getLowValue()) / densityAvg);
if (estimation < lowerBound) {
estimation = lowerBound;
} else if (estimation > higherBound) {
estimation = higherBound;
}
} else {
estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner);
}
aggregateData.setNumDVs(estimation);
}
columnStatisticsData.setDateStats(aggregateData);
} else {
// TODO: bail out if missing stats are over a certain threshold
// we need extrapolation
LOG.debug("start extrapolation for {}", colName);
Map<String, Integer> indexMap = new HashMap<>();
for (int index = 0; index < partNames.size(); index++) {
indexMap.put(partNames.get(index), index);
}
Map<String, Double> adjustedIndexMap = new HashMap<>();
Map<String, ColumnStatisticsData> adjustedStatsMap = new HashMap<>();
// while we scan the css, we also get the densityAvg, lowerbound and
// higherbound when useDensityFunctionForNDVEstimation is true.
double densityAvgSum = 0.0;
if (!areAllNDVEstimatorsMergeable) {
// if not every partition uses bitvector for ndv, we just fall back to
// the traditional extrapolation methods.
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
DateColumnStatsData newData = cso.getStatsData().getDateStats();
if (useDensityFunctionForNDVEstimation && newData.isSetLowValue() && newData.isSetHighValue()) {
densityAvgSum += ((double) diff(newData.getHighValue(), newData.getLowValue())) / newData.getNumDVs();
}
adjustedIndexMap.put(partName, (double) indexMap.get(partName));
adjustedStatsMap.put(partName, cso.getStatsData());
}
} else {
// we first merge all the adjacent bitvectors that we could merge and
// derive new partition names and index.
StringBuilder pseudoPartName = new StringBuilder();
double pseudoIndexSum = 0;
int length = 0;
int curIndex = -1;
DateColumnStatsDataInspector aggregateData = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
DateColumnStatsDataInspector newData = dateInspectorFromStats(cso);
// newData.isSetBitVectors() should be true for sure because we
// already checked it before.
if (indexMap.get(partName) != curIndex) {
// There is bitvector, but it is not adjacent to the previous ones.
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setDateStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += ((double) diff(aggregateData.getHighValue(), aggregateData.getLowValue()))
/ aggregateData.getNumDVs();
}
// reset everything
pseudoPartName = new StringBuilder();
pseudoIndexSum = 0;
length = 0;
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
aggregateData = null;
}
curIndex = indexMap.get(partName);
pseudoPartName.append(partName);
pseudoIndexSum += curIndex;
length++;
curIndex++;
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setLowValue(min(aggregateData.getLowValue(), newData.getLowValue()));
aggregateData.setHighValue(max(aggregateData.getHighValue(), newData.getHighValue()));
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
}
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setDateStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += ((double) diff(aggregateData.getHighValue(), aggregateData.getLowValue()))
/ aggregateData.getNumDVs();
}
}
}
extrapolate(columnStatisticsData, partNames.size(), colStatsWithSourceInfo.size(),
adjustedIndexMap, adjustedStatsMap, densityAvgSum / adjustedStatsMap.size());
}
LOG.debug(
"Ndv estimation for {} is {}. # of partitions requested: {}. # of partitions found: {}",
colName, columnStatisticsData.getDateStats().getNumDVs(), partNames.size(),
colStatsWithSourceInfo.size());
KllHistogramEstimator mergedKllHistogramEstimator = mergeHistograms(colStatsWithSourceInfo);
if (mergedKllHistogramEstimator != null) {
columnStatisticsData.getDateStats().setHistogram(mergedKllHistogramEstimator.serialize());
}
statsObj.setStatsData(columnStatisticsData);
return statsObj;
} | @Test
public void testAggregateMultiStatsOnlySomeAvailableButUnmergeableBitVector() throws MetaException {
List<String> partitions = Arrays.asList("part1", "part2", "part3");
long[] values1 = { DATE_1.getDaysSinceEpoch(), DATE_2.getDaysSinceEpoch(), DATE_6.getDaysSinceEpoch() };
ColumnStatisticsData data1 = new ColStatsBuilder<>(Date.class).numNulls(1).numDVs(3)
.low(DATE_1).high(DATE_6).fmSketch(values1).kll(values1).build();
long[] values3 = { DATE_7.getDaysSinceEpoch() };
ColumnStatisticsData data3 = new ColStatsBuilder<>(Date.class).numNulls(3).numDVs(1)
.low(DATE_7).high(DATE_7).hll(DATE_7.getDaysSinceEpoch()).kll(values3).build();
List<ColStatsObjWithSourceInfo> statsList = Arrays.asList(
createStatsWithInfo(data1, TABLE, COL, partitions.get(0)),
createStatsWithInfo(data3, TABLE, COL, partitions.get(2)));
DateColumnStatsAggregator aggregator = new DateColumnStatsAggregator();
ColumnStatisticsObj computedStatsObj = aggregator.aggregate(statsList, partitions, false);
// hll in case of missing stats is left as null, only numDVs is updated
ColumnStatisticsData expectedStats = new ColStatsBuilder<>(Date.class).numNulls(6).numDVs(3)
.low(DATE_1).high(DATE_7).kll(Longs.concat(values1, values3)).build();
assertEqualStatistics(expectedStats, computedStatsObj.getStatsData());
aggregator.useDensityFunctionForNDVEstimation = true;
computedStatsObj = aggregator.aggregate(statsList, partitions, false);
// the use of the density function leads to a different estimation for numNDV
expectedStats = new ColStatsBuilder<>(Date.class).numNulls(6).numDVs(4)
.low(DATE_1).high(DATE_7).kll(Longs.concat(values1, values3)).build();
assertEqualStatistics(expectedStats, computedStatsObj.getStatsData());
} |
public static String substringBeforeLast(String s, String splitter) {
return s.substring(0, s.lastIndexOf(splitter));
} | @Test
void testSubstringBeforeLast() {
String input = "jar:file:/home/ronald/Projects/Personal/JobRunr/bugs/jobrunr_issue/target/demo-0.0.1-SNAPSHOT.jar!/BOOT-INF/lib/jobrunr-1.0.0-SNAPSHOT.jar!/org/jobrunr/storage/sql/common/migrations";
assertThat(substringBeforeLast(input, "!")).isEqualTo("jar:file:/home/ronald/Projects/Personal/JobRunr/bugs/jobrunr_issue/target/demo-0.0.1-SNAPSHOT.jar!/BOOT-INF/lib/jobrunr-1.0.0-SNAPSHOT.jar");
} |
public boolean isAllowed() {
if (lock.tryLock()) {
try {
if (lastAllowed.plus(perDuration).isBefore(now())) {
lastAllowed = now();
return true;
}
return false;
} finally {
lock.unlock();
}
} else {
return false;
}
} | @Test
void testRateLimit() {
final RateLimiter rateLimit = rateLimit().at1Request().per(SECOND);
await()
.pollInterval(ofMillis(20))
.atMost(ofMillis(150))
.untilAsserted(() -> {
assertThat(rateLimit.isAllowed()).isTrue();
assertThat(rateLimit.isAllowed()).isFalse();
});
await()
.pollInterval(ofMillis(20))
.atMost(ofMillis(1050))
.untilAsserted(() -> assertThat(rateLimit.isAllowed()).isTrue());
} |
public DropSourceCommand create(final DropStream statement) {
return create(
statement.getName(),
statement.getIfExists(),
statement.isDeleteTopic(),
DataSourceType.KSTREAM
);
} | @Test
public void shouldCreateCommandForDropTable() {
// Given:
final DropTable ddlStatement = new DropTable(TABLE_NAME, true, true);
// When:
final DdlCommand result = dropSourceFactory.create(ddlStatement);
// Then:
assertThat(result, instanceOf(DropSourceCommand.class));
} |
public boolean isValid(String value) {
if (value == null) {
return false;
}
URI uri; // ensure value is a valid URI
try {
uri = new URI(value);
} catch (URISyntaxException e) {
return false;
}
// OK, perfom additional validation
String scheme = uri.getScheme();
if (!isValidScheme(scheme)) {
return false;
}
String authority = uri.getRawAuthority();
if ("file".equals(scheme) && (authority == null || "".equals(authority))) { // Special case - file: allows an empty authority
return true; // this is a local file - nothing more to do here
} else if ("file".equals(scheme) && authority != null && authority.contains(":")) {
return false;
} else {
// Validate the authority
if (!isValidAuthority(authority)) {
return false;
}
}
if (!isValidPath(uri.getRawPath())) {
return false;
}
if (!isValidQuery(uri.getRawQuery())) {
return false;
}
if (!isValidFragment(uri.getRawFragment())) {
return false;
}
return true;
} | @Test
public void testValidator339() {
UrlValidator urlValidator = new UrlValidator();
assertTrue(urlValidator.isValid("http://www.cnn.com/WORLD/?hpt=sitenav")); // without
assertTrue(urlValidator.isValid("http://www.cnn.com./WORLD/?hpt=sitenav")); // with
assertFalse(urlValidator.isValid("http://www.cnn.com../")); // doubly dotty
assertFalse(urlValidator.isValid("http://www.cnn.invalid/"));
assertFalse(urlValidator.isValid("http://www.cnn.invalid./")); // check . does not affect invalid domains
} |
public IssueQuery create(SearchRequest request) {
try (DbSession dbSession = dbClient.openSession(false)) {
final ZoneId timeZone = parseTimeZone(request.getTimeZone()).orElse(clock.getZone());
Collection<RuleDto> ruleDtos = ruleKeysToRuleId(dbSession, request.getRules());
Collection<String> ruleUuids = ruleDtos.stream().map(RuleDto::getUuid).collect(Collectors.toSet());
Collection<String> issueKeys = collectIssueKeys(dbSession, request);
if (request.getRules() != null && request.getRules().stream().collect(Collectors.toSet()).size() != ruleDtos.size()) {
ruleUuids.add("non-existing-uuid");
}
IssueQuery.Builder builder = IssueQuery.builder()
.issueKeys(issueKeys)
.severities(request.getSeverities())
.cleanCodeAttributesCategories(request.getCleanCodeAttributesCategories())
.impactSoftwareQualities(request.getImpactSoftwareQualities())
.impactSeverities(request.getImpactSeverities())
.statuses(request.getStatuses())
.resolutions(request.getResolutions())
.issueStatuses(request.getIssueStatuses())
.resolved(request.getResolved())
.prioritizedRule(request.getPrioritizedRule())
.rules(ruleDtos)
.ruleUuids(ruleUuids)
.assigneeUuids(request.getAssigneeUuids())
.authors(request.getAuthors())
.scopes(request.getScopes())
.languages(request.getLanguages())
.tags(request.getTags())
.types(request.getTypes())
.pciDss32(request.getPciDss32())
.pciDss40(request.getPciDss40())
.owaspAsvs40(request.getOwaspAsvs40())
.owaspAsvsLevel(request.getOwaspAsvsLevel())
.owaspTop10(request.getOwaspTop10())
.owaspTop10For2021(request.getOwaspTop10For2021())
.stigAsdR5V3(request.getStigAsdV5R3())
.casa(request.getCasa())
.sansTop25(request.getSansTop25())
.cwe(request.getCwe())
.sonarsourceSecurity(request.getSonarsourceSecurity())
.assigned(request.getAssigned())
.createdAt(parseStartingDateOrDateTime(request.getCreatedAt(), timeZone))
.createdBefore(parseEndingDateOrDateTime(request.getCreatedBefore(), timeZone))
.facetMode(request.getFacetMode())
.timeZone(timeZone)
.codeVariants(request.getCodeVariants());
List<ComponentDto> allComponents = new ArrayList<>();
boolean effectiveOnComponentOnly = mergeDeprecatedComponentParameters(dbSession, request, allComponents);
addComponentParameters(builder, dbSession, effectiveOnComponentOnly, allComponents, request);
setCreatedAfterFromRequest(dbSession, builder, request, allComponents, timeZone);
String sort = request.getSort();
if (!isNullOrEmpty(sort)) {
builder.sort(sort);
builder.asc(request.getAsc());
}
return builder.build();
}
} | @Test
public void use_provided_timezone_to_parse_createdBefore() {
SearchRequest request = new SearchRequest()
.setCreatedBefore("2020-04-16")
.setTimeZone("Europe/Moscow");
IssueQuery query = underTest.create(request);
assertThat(query.createdBefore()).isEqualTo(parseDateTime("2020-04-17T00:00:00+0300"));
} |
public String getOriTableName() {
return oriTableName;
} | @Test
public void getOriTableNameOutputNull() {
// Arrange
final DdlResult objectUnderTest = new DdlResult();
// Act
final String actual = objectUnderTest.getOriTableName();
// Assert result
Assert.assertNull(actual);
} |
@Override
@Deprecated
public void process(final org.apache.kafka.streams.processor.ProcessorSupplier<? super K, ? super V> processorSupplier,
final String... stateStoreNames) {
process(processorSupplier, Named.as(builder.newProcessorName(PROCESSOR_NAME)), stateStoreNames);
} | @Test
public void shouldNotAllowNullNamedOnProcessWithStores() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.process(processorSupplier, (Named) null, "storeName"));
assertThat(exception.getMessage(), equalTo("named can't be null"));
} |
@Override
public long getPos() throws IOException {
return position;
} | @Test
public void shouldReturnPosition() throws IOException {
assertEquals(position, fsDataOutputStream.getPos());
} |
public final DisposableServer bindNow() {
return bindNow(Duration.ofSeconds(45));
} | @Test
void testBindMonoEmpty() {
assertThatExceptionOfType(NullPointerException.class)
.isThrownBy(() -> new TestServerTransport(Mono.empty()).bindNow(Duration.ofMillis(Long.MAX_VALUE)));
} |
public static boolean noneOf(Object collection, Object value) {
if (collection == null) {
throw new IllegalArgumentException("collection cannot be null");
}
if (value == null) {
throw new IllegalArgumentException("value cannot be null");
}
// collection to check against
Collection targetCollection = getTargetCollection(collection, value);
// elements to check
if (DMNParseUtil.isParseableCollection(value)) {
Collection valueCollection = DMNParseUtil.parseCollection(value, targetCollection);
return !CollectionUtils.containsAny(targetCollection, valueCollection);
} else if (DMNParseUtil.isJavaCollection(value)) {
return !CollectionUtils.containsAny(targetCollection, (Collection) value);
} else if (DMNParseUtil.isArrayNode(value)) {
Collection valueCollection = DMNParseUtil.getCollectionFromArrayNode((ArrayNode) value);
return !CollectionUtils.containsAny(targetCollection, valueCollection);
} else {
Object formattedValue = DMNParseUtil.getFormattedValue(value, targetCollection);
return !targetCollection.contains(formattedValue);
}
} | @Test
public void noneOf() {
assertThat(CollectionUtil.noneOf(Arrays.asList("group1", "group2"), Arrays.asList("group3", "group4"))).isTrue();
assertThat(CollectionUtil.noneOf(Arrays.asList("group1", "group2"), Arrays.asList("group1", "group2"))).isFalse();
assertThat(CollectionUtil.noneOf(Arrays.asList("group1", "group2"), Arrays.asList("group2", "group3"))).isFalse();
assertThat(CollectionUtil.noneOf(Arrays.asList("group1", "group2"), "group3")).isTrue();
assertThat(CollectionUtil.noneOf(Arrays.asList("group1", "group2"), "group2")).isFalse();
assertThat(CollectionUtil.noneOf("group1, group2", "group3, group4")).isTrue();
assertThat(CollectionUtil.noneOf("group1, group2", "group1, group2")).isFalse();
assertThat(CollectionUtil.noneOf("group1, group2", "group2, group3")).isFalse();
ObjectMapper mapper = new ObjectMapper();
assertThat(CollectionUtil.noneOf(mapper.valueToTree(Arrays.asList("group1", "group2")), mapper.valueToTree(Arrays.asList("group3", "group4"))))
.isTrue();
assertThat(CollectionUtil.noneOf(mapper.valueToTree(Arrays.asList("group1", "group2")), mapper.valueToTree(Arrays.asList("group1", "group2"))))
.isFalse();
assertThat(CollectionUtil.noneOf(mapper.valueToTree(Arrays.asList("group1", "group2")), mapper.valueToTree(Arrays.asList("group2", "group3"))))
.isFalse();
} |
@Override
public SubmitApplicationResponse submitApplication(
SubmitApplicationRequest request) throws YarnException, IOException {
if (request == null || request.getApplicationSubmissionContext() == null
|| request.getApplicationSubmissionContext().getApplicationId() == null) {
routerMetrics.incrAppsFailedSubmitted();
String errMsg =
"Missing submitApplication request or applicationSubmissionContext information.";
RouterAuditLogger.logFailure(user.getShortUserName(), SUBMIT_NEW_APP, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, errMsg);
RouterServerUtil.logAndThrowException(errMsg, null);
}
long startTime = clock.getTime();
ApplicationId applicationId =
request.getApplicationSubmissionContext().getApplicationId();
List<SubClusterId> blacklist = new ArrayList<>();
try {
// We need to handle this situation,
// the user will provide us with an expected submitRetries,
// but if the number of Active SubClusters is less than this number at this time,
// we should provide a high number of retry according to the number of Active SubClusters.
int activeSubClustersCount = federationFacade.getActiveSubClustersCount();
int actualRetryNums = Math.min(activeSubClustersCount, numSubmitRetries);
// Try calling the SubmitApplication method
SubmitApplicationResponse response =
((FederationActionRetry<SubmitApplicationResponse>) (retryCount) ->
invokeSubmitApplication(blacklist, request, retryCount)).
runWithRetries(actualRetryNums, submitIntervalTime);
if (response != null) {
long stopTime = clock.getTime();
routerMetrics.succeededAppsSubmitted(stopTime - startTime);
return response;
}
} catch (Exception e) {
routerMetrics.incrAppsFailedSubmitted();
RouterAuditLogger.logFailure(user.getShortUserName(), SUBMIT_NEW_APP, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, e.getMessage(), applicationId);
RouterServerUtil.logAndThrowException(e.getMessage(), e);
}
routerMetrics.incrAppsFailedSubmitted();
String msg = String.format("Application %s with appId %s failed to be submitted.",
request.getApplicationSubmissionContext().getApplicationName(), applicationId);
RouterAuditLogger.logFailure(user.getShortUserName(), SUBMIT_NEW_APP, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, msg, applicationId);
throw new YarnException(msg);
} | @Test
public void testSubmitApplicationMultipleSubmission()
throws YarnException, IOException, InterruptedException {
LOG.info(
"Test FederationClientInterceptor: Submit Application - Multiple");
ApplicationId appId =
ApplicationId.newInstance(System.currentTimeMillis(), 1);
SubmitApplicationRequest request = mockSubmitApplicationRequest(appId);
// First attempt
SubmitApplicationResponse response = interceptor.submitApplication(request);
Assert.assertNotNull(response);
SubClusterId scIdResult = stateStoreUtil.queryApplicationHomeSC(appId);
Assert.assertNotNull(scIdResult);
// First retry
response = interceptor.submitApplication(request);
Assert.assertNotNull(response);
SubClusterId scIdResult2 = stateStoreUtil.queryApplicationHomeSC(appId);
Assert.assertNotNull(scIdResult2);
Assert.assertEquals(scIdResult, scIdResult);
} |
public static int getOccurenceString( String string, String searchFor ) {
if ( string == null || string.length() == 0 ) {
return 0;
}
int counter = 0;
int len = searchFor.length();
if ( len > 0 ) {
int start = string.indexOf( searchFor );
while ( start != -1 ) {
counter++;
start = string.indexOf( searchFor, start + len );
}
}
return counter;
} | @Test
public void testGetOccurenceString() {
assertEquals( 0, Const.getOccurenceString( "", "" ) );
assertEquals( 0, Const.getOccurenceString( "foo bar bazfoo", "cat" ) );
assertEquals( 2, Const.getOccurenceString( "foo bar bazfoo", "foo" ) );
} |
@Override
public Catalog createCatalog(Context context) {
final FactoryUtil.CatalogFactoryHelper helper =
FactoryUtil.createCatalogFactoryHelper(this, context);
helper.validate();
return new HiveCatalog(
context.getName(),
helper.getOptions().get(DEFAULT_DATABASE),
helper.getOptions().get(HIVE_CONF_DIR),
helper.getOptions().get(HADOOP_CONF_DIR),
helper.getOptions().get(HIVE_VERSION));
} | @Test
public void testDisallowEmbedded() {
expectedException.expect(ValidationException.class);
final Map<String, String> options = new HashMap<>();
options.put(CommonCatalogOptions.CATALOG_TYPE.key(), HiveCatalogFactoryOptions.IDENTIFIER);
FactoryUtil.createCatalog(
"my_catalog", options, null, Thread.currentThread().getContextClassLoader());
} |
@Override
public boolean checkCredentials(String username, String password) {
if (username == null || password == null) {
return false;
}
Credentials credentials = new Credentials(username, password);
if (validCredentialsCache.contains(credentials)) {
return true;
} else if (invalidCredentialsCache.contains(credentials)) {
return false;
}
boolean isValid =
this.username.equals(username)
&& this.passwordHash.equals(
generatePasswordHash(
algorithm, salt, iterations, keyLength, password));
if (isValid) {
validCredentialsCache.add(credentials);
} else {
invalidCredentialsCache.add(credentials);
}
return isValid;
} | @Test
public void testPBKDF2WithHmacSHA256_upperCaseWithoutColon() throws Exception {
String algorithm = "PBKDF2WithHmacSHA256";
int iterations = 1000;
int keyLength = 128;
String hash =
"B6:9C:5C:8A:10:3E:41:7B:BA:18:FC:E1:F2:0C:BC:D9:65:70:D3:53:AB:97:EE:2F:3F:A8:88:AF:43:EA:E6:D7:FB"
+ ":70:14:23:F9:51:29:5C:3A:9F:65:C3:20:EE:09:C9:C6:8A:B7:D3:0A:E1:F3:10:2B:9B:36:3F:1F:B6:1D:52:A7"
+ ":9C:CB:AD:55:25:46:C5:73:09:6C:38:9C:F2:FD:82:7F:90:E5:31:EF:7E:3E:6B:B2:0C:38:77:23:EC:3A:CF:29"
+ ":F7:E5:4D:4E:CC:35:7A:C2:E5:CB:E3:B3:E5:09:2B:CC:B9:40:26:A4:28:E9:5F:2D:18:B2:14:41:E7:4D:5B";
hash = hash.toUpperCase().replace(":", "");
PBKDF2Authenticator PBKDF2Authenticator =
new PBKDF2Authenticator(
"/", VALID_USERNAME, hash, algorithm, SALT, iterations, keyLength);
for (String username : TEST_USERNAMES) {
for (String password : TEST_PASSWORDS) {
boolean expectedIsAuthenticated =
VALID_USERNAME.equals(username) && VALID_PASSWORD.equals(password);
boolean actualIsAuthenticated =
PBKDF2Authenticator.checkCredentials(username, password);
assertEquals(expectedIsAuthenticated, actualIsAuthenticated);
}
}
} |
public static FromMatchesFilter createBare(Jid address) {
return new FromMatchesFilter(address, true);
} | @Test
public void bareCompareMatchingServiceJid() {
FromMatchesFilter filter = FromMatchesFilter.createBare(SERVICE_JID1);
Stanza packet = StanzaBuilder.buildMessage().build();
packet.setFrom(SERVICE_JID1);
assertTrue(filter.accept(packet));
packet.setFrom(SERVICE_JID2);
assertFalse(filter.accept(packet));
packet.setFrom(BASE_JID1);
assertFalse(filter.accept(packet));
packet.setFrom(FULL_JID1_R1);
assertFalse(filter.accept(packet));
packet.setFrom(BASE_JID3);
assertFalse(filter.accept(packet));
} |
@Override
public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException {
try {
new SDSAttributesFinderFeature(session, nodeid).find(file, listener);
return true;
}
catch(NotfoundException e) {
return false;
}
} | @Test
public void testFindDirectory() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final Path folder = new SDSDirectoryFeature(session, nodeid).mkdir(
new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
assertTrue(new SDSFindFeature(session, nodeid).find(folder));
assertFalse(new SDSFindFeature(session, nodeid).find(new Path(folder.getAbsolute(), EnumSet.of(Path.Type.file))));
new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(folder), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@Override
public StreamObserver<WorkerStatusResponse> workerStatus(
StreamObserver<WorkerStatusRequest> requestObserver) {
if (isClosed.get()) {
throw new IllegalStateException("BeamWorkerStatusGrpcService already closed.");
}
String workerId = headerAccessor.getSdkWorkerId();
LOG.info("Beam Fn Status client connected with id {}", workerId);
WorkerStatusClient fnApiStatusClient =
WorkerStatusClient.forRequestObserver(workerId, requestObserver);
connectedClient.compute(
workerId,
(k, existingClientFuture) -> {
if (existingClientFuture != null) {
try {
if (existingClientFuture.isDone()) {
LOG.info(
"SDK Worker {} was connected to status server previously, disconnecting old client",
workerId);
existingClientFuture.get().close();
} else {
existingClientFuture.complete(fnApiStatusClient);
return existingClientFuture;
}
} catch (IOException | InterruptedException | ExecutionException e) {
LOG.warn("Error closing worker status client", e);
}
}
return CompletableFuture.completedFuture(fnApiStatusClient);
});
return fnApiStatusClient.getResponseObserver();
} | @Test
public void testClientConnected() throws Exception {
stub.workerStatus(mockObserver);
WorkerStatusClient client = waitAndGetStatusClient(ID);
assertNotNull(client);
} |
@SuppressWarnings("unchecked")
public static void validateFormat(Object offsetData) {
if (offsetData == null)
return;
if (!(offsetData instanceof Map))
throw new DataException("Offsets must be specified as a Map");
validateFormat((Map<Object, Object>) offsetData);
} | @Test
public void testValidateFormatWithValidFormat() {
Map<Object, Object> offsetData = Collections.singletonMap("key", 1);
// Expect no exception to be thrown
OffsetUtils.validateFormat(offsetData);
} |
@Override
public void doFilter(ServletRequest req, ServletResponse resp, FilterChain chain)
throws IOException, ServletException {
HttpServletRequest request = (HttpServletRequest) req;
HttpServletResponse response = (HttpServletResponse) resp;
String appId = accessKeyUtil.extractAppIdFromRequest(request);
if (StringUtils.isBlank(appId)) {
response.sendError(HttpServletResponse.SC_BAD_REQUEST, "InvalidAppId");
return;
}
List<String> availableSecrets = accessKeyUtil.findAvailableSecret(appId);
if (!CollectionUtils.isEmpty(availableSecrets)) {
String timestamp = request.getHeader(Signature.HTTP_HEADER_TIMESTAMP);
String authorization = request.getHeader(HttpHeaders.AUTHORIZATION);
// check timestamp, valid within 1 minute
if (!checkTimestamp(timestamp)) {
logger.warn("Invalid timestamp. appId={},timestamp={}", appId, timestamp);
response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "RequestTimeTooSkewed");
return;
}
// check signature
String uri = request.getRequestURI();
String query = request.getQueryString();
if (!checkAuthorization(authorization, availableSecrets, timestamp, uri, query)) {
logger.warn("Invalid authorization. appId={},authorization={}", appId, authorization);
response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "Unauthorized");
return;
}
}
chain.doFilter(request, response);
} | @Test
public void testAuthorizedSuccessfully() throws Exception {
String appId = "someAppId";
String availableSignature = "someSignature";
List<String> secrets = Lists.newArrayList("someSecret");
String oneMinAgoTimestamp = Long.toString(System.currentTimeMillis());
String correctAuthorization = "Apollo someAppId:someSignature";
when(accessKeyUtil.extractAppIdFromRequest(any())).thenReturn(appId);
when(accessKeyUtil.findAvailableSecret(appId)).thenReturn(secrets);
when(accessKeyUtil.buildSignature(any(), any(), any(), any())).thenReturn(availableSignature);
when(request.getHeader(Signature.HTTP_HEADER_TIMESTAMP)).thenReturn(oneMinAgoTimestamp);
when(request.getHeader(HttpHeaders.AUTHORIZATION)).thenReturn(correctAuthorization);
when(bizConfig.accessKeyAuthTimeDiffTolerance()).thenReturn(60);
clientAuthenticationFilter.doFilter(request, response, filterChain);
verify(response, never()).sendError(HttpServletResponse.SC_BAD_REQUEST, "InvalidAppId");
verify(response, never()).sendError(HttpServletResponse.SC_UNAUTHORIZED, "RequestTimeTooSkewed");
verify(response, never()).sendError(HttpServletResponse.SC_UNAUTHORIZED, "Unauthorized");
verify(filterChain, times(1)).doFilter(request, response);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.