focal_method stringlengths 13 60.9k | test_case stringlengths 25 109k |
|---|---|
public ByteBuffer allocate(int size, long maxTimeToBlockMs) throws InterruptedException {
if (size > this.totalMemory)
throw new IllegalArgumentException("Attempt to allocate " + size
+ " bytes, but there is a hard limit of "
+ this.totalMemory
+ " on memory allocations.");
ByteBuffer buffer = null;
this.lock.lock();
if (this.closed) {
this.lock.unlock();
throw new KafkaException("Producer closed while allocating memory");
}
try {
// check if we have a free buffer of the right size pooled
if (size == poolableSize && !this.free.isEmpty())
return this.free.pollFirst();
// now check if the request is immediately satisfiable with the
// memory on hand or if we need to block
int freeListSize = freeSize() * this.poolableSize;
if (this.nonPooledAvailableMemory + freeListSize >= size) {
// we have enough unallocated or pooled memory to immediately
// satisfy the request, but need to allocate the buffer
freeUp(size);
this.nonPooledAvailableMemory -= size;
} else {
// we are out of memory and will have to block
int accumulated = 0;
Condition moreMemory = this.lock.newCondition();
try {
long remainingTimeToBlockNs = TimeUnit.MILLISECONDS.toNanos(maxTimeToBlockMs);
this.waiters.addLast(moreMemory);
// loop over and over until we have a buffer or have reserved
// enough memory to allocate one
while (accumulated < size) {
long startWaitNs = time.nanoseconds();
long timeNs;
boolean waitingTimeElapsed;
try {
waitingTimeElapsed = !moreMemory.await(remainingTimeToBlockNs, TimeUnit.NANOSECONDS);
} finally {
long endWaitNs = time.nanoseconds();
timeNs = Math.max(0L, endWaitNs - startWaitNs);
recordWaitTime(timeNs);
}
if (this.closed)
throw new KafkaException("Producer closed while allocating memory");
if (waitingTimeElapsed) {
this.metrics.sensor("buffer-exhausted-records").record();
throw new BufferExhaustedException("Failed to allocate " + size + " bytes within the configured max blocking time "
+ maxTimeToBlockMs + " ms. Total memory: " + totalMemory() + " bytes. Available memory: " + availableMemory()
+ " bytes. Poolable size: " + poolableSize() + " bytes");
}
remainingTimeToBlockNs -= timeNs;
// check if we can satisfy this request from the free list,
// otherwise allocate memory
if (accumulated == 0 && size == this.poolableSize && !this.free.isEmpty()) {
// just grab a buffer from the free list
buffer = this.free.pollFirst();
accumulated = size;
} else {
// we'll need to allocate memory, but we may only get
// part of what we need on this iteration
freeUp(size - accumulated);
int got = (int) Math.min(size - accumulated, this.nonPooledAvailableMemory);
this.nonPooledAvailableMemory -= got;
accumulated += got;
}
}
// Don't reclaim memory on throwable since nothing was thrown
accumulated = 0;
} finally {
// When this loop was not able to successfully terminate don't loose available memory
this.nonPooledAvailableMemory += accumulated;
this.waiters.remove(moreMemory);
}
}
} finally {
// signal any additional waiters if there is more memory left
// over for them
try {
if (!(this.nonPooledAvailableMemory == 0 && this.free.isEmpty()) && !this.waiters.isEmpty())
this.waiters.peekFirst().signal();
} finally {
// Another finally... otherwise find bugs complains
lock.unlock();
}
}
if (buffer == null)
return safeAllocateByteBuffer(size);
else
return buffer;
} | @Test
public void testBufferExhaustedExceptionIsThrown() throws Exception {
BufferPool pool = new BufferPool(2, 1, metrics, time, metricGroup);
pool.allocate(1, maxBlockTimeMs);
assertThrows(BufferExhaustedException.class, () -> pool.allocate(2, maxBlockTimeMs));
} |
@SuppressWarnings("deprecation")
public static List<SimpleAclRule> fromCrd(AclRule rule) {
if (rule.getOperations() != null && rule.getOperation() != null) {
throw new InvalidResourceException("Both fields `operations` and `operation` cannot be filled in at the same time");
} else if (rule.getOperations() != null) {
List<SimpleAclRule> simpleAclRules = new ArrayList<>();
for (AclOperation operation : rule.getOperations()) {
simpleAclRules.add(new SimpleAclRule(rule.getType(), SimpleAclRuleResource.fromCrd(rule.getResource()), rule.getHost(), operation));
}
return simpleAclRules;
} else {
return List.of(new SimpleAclRule(rule.getType(), SimpleAclRuleResource.fromCrd(rule.getResource()), rule.getHost(), rule.getOperation()));
}
} | @Test
public void testFromCrd() {
AclRule rule = new AclRuleBuilder()
.withType(AclRuleType.ALLOW)
.withResource(ACL_RULE_TOPIC_RESOURCE)
.withHost("127.0.0.1")
.withOperation(AclOperation.READ)
.build();
List<SimpleAclRule> simpleAclRules = SimpleAclRule.fromCrd(rule);
assertThat(simpleAclRules.get(0).getOperation(), is(AclOperation.READ));
assertThat(simpleAclRules.get(0).getType(), is(AclRuleType.ALLOW));
assertThat(simpleAclRules.get(0).getHost(), is("127.0.0.1"));
assertThat(simpleAclRules.get(0).getResource(), is(RESOURCE));
} |
@Override
public GenericRow transform(GenericRow record) {
for (Map.Entry<String, FunctionEvaluator> entry : _expressionEvaluators.entrySet()) {
String column = entry.getKey();
FunctionEvaluator transformFunctionEvaluator = entry.getValue();
Object existingValue = record.getValue(column);
if (existingValue == null) {
try {
// Skip transformation if column value already exists
// NOTE: column value might already exist for OFFLINE data,
// For backward compatibility, The only exception here is that we will override nested field like array,
// collection or map since they were not included in the record transformation before.
record.putValue(column, transformFunctionEvaluator.evaluate(record));
} catch (Exception e) {
if (!_continueOnError) {
throw new RuntimeException("Caught exception while evaluation transform function for column: " + column, e);
} else {
LOGGER.debug("Caught exception while evaluation transform function for column: {}", column, e);
record.putValue(GenericRow.INCOMPLETE_RECORD_KEY, true);
}
}
} else if (existingValue.getClass().isArray() || existingValue instanceof Collections
|| existingValue instanceof Map) {
try {
Object transformedValue = transformFunctionEvaluator.evaluate(record);
// For backward compatibility, The only exception here is that we will override nested field like array,
// collection or map since they were not included in the record transformation before.
if (!isTypeCompatible(existingValue, transformedValue)) {
record.putValue(column, transformedValue);
}
} catch (Exception e) {
LOGGER.debug("Caught exception while evaluation transform function for column: {}", column, e);
}
}
}
return record;
} | @Test
public void testTransformConfigsFromSchema() {
Schema pinotSchema = new Schema.SchemaBuilder().addSingleValueDimension("userId", FieldSpec.DataType.LONG)
.addSingleValueDimension("fullName", FieldSpec.DataType.STRING)
.addMultiValueDimension("bids", FieldSpec.DataType.INT)
.addSingleValueDimension("maxBid", FieldSpec.DataType.INT)
.addMultiValueDimension("map1__KEYS", FieldSpec.DataType.INT)
.addMultiValueDimension("map1__VALUES", FieldSpec.DataType.STRING).addMetric("cost", FieldSpec.DataType.DOUBLE)
.addDateTime("hoursSinceEpoch", FieldSpec.DataType.LONG, "1:HOURS:EPOCH", "1:HOURS").build();
// only specified in schema
pinotSchema.getFieldSpecFor("maxBid").setTransformFunction("Groovy({bids.max{ it.toBigDecimal() }}, bids)");
// also specified in table config, ignore the schema setting
pinotSchema.getFieldSpecFor("hoursSinceEpoch").setTransformFunction("Groovy({timestamp/(1000)}, timestamp)");
List<TransformConfig> transformConfigs = Arrays.asList(
new TransformConfig("userId", "Groovy({user_id}, user_id)"),
new TransformConfig("fullName", "Groovy({firstName+' '+lastName}, firstName, lastName)"),
new TransformConfig("hoursSinceEpoch", "Groovy({timestamp/(1000*60*60)}, timestamp)"));
IngestionConfig ingestionConfig = new IngestionConfig();
ingestionConfig.setTransformConfigs(transformConfigs);
TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName("testTransformFunctions")
.setIngestionConfig(ingestionConfig).build();
ExpressionTransformer expressionTransformer = new ExpressionTransformer(tableConfig, pinotSchema);
GenericRow genericRow = new GenericRow();
genericRow.putValue("user_id", 1L);
genericRow.putValue("firstName", "John");
genericRow.putValue("lastName", "Denver");
genericRow.putValue("bids", Arrays.asList(10, 20));
HashMap<String, String> map1 = new HashMap<>(); // keys in Map from avro are always in STRING
map1.put("30", "foo");
map1.put("200", "bar");
genericRow.putValue("map1", map1);
HashMap<String, Integer> map2 = new HashMap<>();
map2.put("k1", 10);
map2.put("k2", 20);
genericRow.putValue("map2", map2);
genericRow.putValue("cost", 1000.0);
genericRow.putValue("timestamp", 1574000000000L);
// expression transformer
expressionTransformer.transform(genericRow);
// extract userId
Assert.assertEquals(genericRow.getValue("userId"), 1L);
// concat fullName
Assert.assertEquals(genericRow.getValue("fullName"), "John Denver");
Assert.assertTrue(((List) genericRow.getValue("bids")).containsAll(Arrays.asList(10, 20)));
// find max bid from bids
Assert.assertEquals(genericRow.getValue("maxBid"), 20);
// Backward compatible way to support MAP - __KEYS indicates keys of map1
ArrayList map1Keys = (ArrayList) genericRow.getValue("map1__KEYS");
Assert.assertEquals(map1Keys.get(0), "200");
Assert.assertEquals(map1Keys.get(1), "30");
// Backward compatible way to support MAP - __VALUES indicates values of map1
ArrayList map1Values = (ArrayList) genericRow.getValue("map1__VALUES");
Assert.assertEquals(map1Values.get(0), "bar");
Assert.assertEquals(map1Values.get(1), "foo");
Assert.assertEquals(genericRow.getValue("cost"), 1000.0);
// calculate hoursSinceEpoch
Assert.assertEquals(genericRow.getValue("hoursSinceEpoch").toString(), "437222.2222222222");
} |
public static String[] parseUri(String uri) {
return doParseUri(uri, false);
} | @Test
public void testParseNoPathButSlash() {
String[] out1 = CamelURIParser.parseUri("file:/");
assertEquals("file", out1[0]);
assertEquals("/", out1[1]);
assertNull(out1[2]);
String[] out2 = CamelURIParser.parseUri("file:///");
assertEquals("file", out2[0]);
assertEquals("/", out2[1]);
assertNull(out2[2]);
} |
public int getLength() {
return length;
} | @Test
public void testGetLength() {
assertEquals(TestParameters.length, dle.getLength());
} |
@Override
public Iterable<K> loadAllKeys() {
// If loadAllKeys property is disabled, don't load anything
if (!genericMapStoreProperties.loadAllKeys) {
return Collections.emptyList();
}
awaitSuccessfulInit();
String sql = queries.loadAllKeys();
SqlResult keysResult = sqlService.execute(sql);
// The contract for loadAllKeys says that if iterator implements Closable
// then it will be closed when the iteration is over
return () -> new MappingClosingIterator<>(
keysResult.iterator(),
(SqlRow row) -> row.getObject(genericMapStoreProperties.idColumn),
keysResult::close
);
} | @Test
public void givenTrue_whenLoadAllKeys_thenReturnKeys() {
ObjectSpec spec = objectProvider.createObject(mapName, true);
objectProvider.insertItems(spec, 1);
Properties properties = new Properties();
properties.setProperty(DATA_CONNECTION_REF_PROPERTY, TEST_DATABASE_REF);
properties.setProperty(ID_COLUMN_PROPERTY, "person-id");
properties.setProperty(LOAD_ALL_KEYS_PROPERTY, "true");
mapLoader = createMapLoader(properties, hz);
List<Integer> ids = newArrayList(mapLoader.loadAllKeys());
assertThat(ids).contains(0);
} |
public T get(String key) {
return delegate.get(key.toLowerCase());
} | @Test
public void testGet() throws Exception {
String someKey = "someKey";
Object someValue = mock(Object.class);
when(someMap.get(someKey.toLowerCase())).thenReturn(someValue);
assertEquals(someValue, caseInsensitiveMapWrapper.get(someKey));
verify(someMap, times(1)).get(someKey.toLowerCase());
} |
@Subscribe
public void onChatMessage(ChatMessage event)
{
if (event.getType() == ChatMessageType.GAMEMESSAGE || event.getType() == ChatMessageType.SPAM)
{
String message = Text.removeTags(event.getMessage());
Matcher dodgyCheckMatcher = DODGY_CHECK_PATTERN.matcher(message);
Matcher dodgyProtectMatcher = DODGY_PROTECT_PATTERN.matcher(message);
Matcher dodgyBreakMatcher = DODGY_BREAK_PATTERN.matcher(message);
Matcher bindingNecklaceCheckMatcher = BINDING_CHECK_PATTERN.matcher(message);
Matcher bindingNecklaceUsedMatcher = BINDING_USED_PATTERN.matcher(message);
Matcher ringOfForgingCheckMatcher = RING_OF_FORGING_CHECK_PATTERN.matcher(message);
Matcher amuletOfChemistryCheckMatcher = AMULET_OF_CHEMISTRY_CHECK_PATTERN.matcher(message);
Matcher amuletOfChemistryUsedMatcher = AMULET_OF_CHEMISTRY_USED_PATTERN.matcher(message);
Matcher amuletOfChemistryBreakMatcher = AMULET_OF_CHEMISTRY_BREAK_PATTERN.matcher(message);
Matcher amuletOfBountyCheckMatcher = AMULET_OF_BOUNTY_CHECK_PATTERN.matcher(message);
Matcher amuletOfBountyUsedMatcher = AMULET_OF_BOUNTY_USED_PATTERN.matcher(message);
Matcher chronicleAddMatcher = CHRONICLE_ADD_PATTERN.matcher(message);
Matcher chronicleUseAndCheckMatcher = CHRONICLE_USE_AND_CHECK_PATTERN.matcher(message);
Matcher slaughterActivateMatcher = BRACELET_OF_SLAUGHTER_ACTIVATE_PATTERN.matcher(message);
Matcher slaughterCheckMatcher = BRACELET_OF_SLAUGHTER_CHECK_PATTERN.matcher(message);
Matcher expeditiousActivateMatcher = EXPEDITIOUS_BRACELET_ACTIVATE_PATTERN.matcher(message);
Matcher expeditiousCheckMatcher = EXPEDITIOUS_BRACELET_CHECK_PATTERN.matcher(message);
Matcher bloodEssenceCheckMatcher = BLOOD_ESSENCE_CHECK_PATTERN.matcher(message);
Matcher bloodEssenceExtractMatcher = BLOOD_ESSENCE_EXTRACT_PATTERN.matcher(message);
Matcher braceletOfClayCheckMatcher = BRACELET_OF_CLAY_CHECK_PATTERN.matcher(message);
if (message.contains(RING_OF_RECOIL_BREAK_MESSAGE))
{
notifier.notify(config.recoilNotification(), "Your Ring of Recoil has shattered");
}
else if (dodgyBreakMatcher.find())
{
notifier.notify(config.dodgyNotification(), "Your dodgy necklace has crumbled to dust.");
updateDodgyNecklaceCharges(MAX_DODGY_CHARGES);
}
else if (dodgyCheckMatcher.find())
{
updateDodgyNecklaceCharges(Integer.parseInt(dodgyCheckMatcher.group(1)));
}
else if (dodgyProtectMatcher.find())
{
updateDodgyNecklaceCharges(Integer.parseInt(dodgyProtectMatcher.group(1)));
}
else if (amuletOfChemistryCheckMatcher.find())
{
updateAmuletOfChemistryCharges(Integer.parseInt(amuletOfChemistryCheckMatcher.group(1)));
}
else if (amuletOfChemistryUsedMatcher.find())
{
final String match = amuletOfChemistryUsedMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateAmuletOfChemistryCharges(charges);
}
else if (amuletOfChemistryBreakMatcher.find())
{
notifier.notify(config.amuletOfChemistryNotification(), "Your amulet of chemistry has crumbled to dust.");
updateAmuletOfChemistryCharges(MAX_AMULET_OF_CHEMISTRY_CHARGES);
}
else if (amuletOfBountyCheckMatcher.find())
{
updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyCheckMatcher.group(1)));
}
else if (amuletOfBountyUsedMatcher.find())
{
updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyUsedMatcher.group(1)));
}
else if (message.equals(AMULET_OF_BOUNTY_BREAK_TEXT))
{
updateAmuletOfBountyCharges(MAX_AMULET_OF_BOUNTY_CHARGES);
}
else if (message.contains(BINDING_BREAK_TEXT))
{
notifier.notify(config.bindingNotification(), BINDING_BREAK_TEXT);
// This chat message triggers before the used message so add 1 to the max charges to ensure proper sync
updateBindingNecklaceCharges(MAX_BINDING_CHARGES + 1);
}
else if (bindingNecklaceUsedMatcher.find())
{
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
if (equipment.contains(ItemID.BINDING_NECKLACE))
{
updateBindingNecklaceCharges(getItemCharges(ItemChargeConfig.KEY_BINDING_NECKLACE) - 1);
}
}
else if (bindingNecklaceCheckMatcher.find())
{
final String match = bindingNecklaceCheckMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateBindingNecklaceCharges(charges);
}
else if (ringOfForgingCheckMatcher.find())
{
final String match = ringOfForgingCheckMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateRingOfForgingCharges(charges);
}
else if (message.equals(RING_OF_FORGING_USED_TEXT) || message.equals(RING_OF_FORGING_VARROCK_PLATEBODY))
{
final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY);
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
// Determine if the player smelted with a Ring of Forging equipped.
if (equipment == null)
{
return;
}
if (equipment.contains(ItemID.RING_OF_FORGING) && (message.equals(RING_OF_FORGING_USED_TEXT) || inventory.count(ItemID.IRON_ORE) > 1))
{
int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_RING_OF_FORGING) - 1, 0, MAX_RING_OF_FORGING_CHARGES);
updateRingOfForgingCharges(charges);
}
}
else if (message.equals(RING_OF_FORGING_BREAK_TEXT))
{
notifier.notify(config.ringOfForgingNotification(), "Your ring of forging has melted.");
// This chat message triggers before the used message so add 1 to the max charges to ensure proper sync
updateRingOfForgingCharges(MAX_RING_OF_FORGING_CHARGES + 1);
}
else if (chronicleAddMatcher.find())
{
final String match = chronicleAddMatcher.group(1);
if (match.equals("one"))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1);
}
else
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(match));
}
}
else if (chronicleUseAndCheckMatcher.find())
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(chronicleUseAndCheckMatcher.group(1)));
}
else if (message.equals(CHRONICLE_ONE_CHARGE_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1);
}
else if (message.equals(CHRONICLE_EMPTY_TEXT) || message.equals(CHRONICLE_NO_CHARGES_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 0);
}
else if (message.equals(CHRONICLE_FULL_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1000);
}
else if (slaughterActivateMatcher.find())
{
final String found = slaughterActivateMatcher.group(1);
if (found == null)
{
updateBraceletOfSlaughterCharges(MAX_SLAYER_BRACELET_CHARGES);
notifier.notify(config.slaughterNotification(), BRACELET_OF_SLAUGHTER_BREAK_TEXT);
}
else
{
updateBraceletOfSlaughterCharges(Integer.parseInt(found));
}
}
else if (slaughterCheckMatcher.find())
{
updateBraceletOfSlaughterCharges(Integer.parseInt(slaughterCheckMatcher.group(1)));
}
else if (expeditiousActivateMatcher.find())
{
final String found = expeditiousActivateMatcher.group(1);
if (found == null)
{
updateExpeditiousBraceletCharges(MAX_SLAYER_BRACELET_CHARGES);
notifier.notify(config.expeditiousNotification(), EXPEDITIOUS_BRACELET_BREAK_TEXT);
}
else
{
updateExpeditiousBraceletCharges(Integer.parseInt(found));
}
}
else if (expeditiousCheckMatcher.find())
{
updateExpeditiousBraceletCharges(Integer.parseInt(expeditiousCheckMatcher.group(1)));
}
else if (bloodEssenceCheckMatcher.find())
{
updateBloodEssenceCharges(Integer.parseInt(bloodEssenceCheckMatcher.group(1)));
}
else if (bloodEssenceExtractMatcher.find())
{
updateBloodEssenceCharges(getItemCharges(ItemChargeConfig.KEY_BLOOD_ESSENCE) - Integer.parseInt(bloodEssenceExtractMatcher.group(1)));
}
else if (message.contains(BLOOD_ESSENCE_ACTIVATE_TEXT))
{
updateBloodEssenceCharges(MAX_BLOOD_ESSENCE_CHARGES);
}
else if (braceletOfClayCheckMatcher.find())
{
updateBraceletOfClayCharges(Integer.parseInt(braceletOfClayCheckMatcher.group(1)));
}
else if (message.equals(BRACELET_OF_CLAY_USE_TEXT) || message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN))
{
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
// Determine if the player mined with a Bracelet of Clay equipped.
if (equipment != null && equipment.contains(ItemID.BRACELET_OF_CLAY))
{
final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY);
// Charge is not used if only 1 inventory slot is available when mining in Prifddinas
boolean ignore = inventory != null
&& inventory.count() == 27
&& message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN);
if (!ignore)
{
int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_BRACELET_OF_CLAY) - 1, 0, MAX_BRACELET_OF_CLAY_CHARGES);
updateBraceletOfClayCharges(charges);
}
}
}
else if (message.equals(BRACELET_OF_CLAY_BREAK_TEXT))
{
notifier.notify(config.braceletOfClayNotification(), "Your bracelet of clay has crumbled to dust");
updateBraceletOfClayCharges(MAX_BRACELET_OF_CLAY_CHARGES);
}
}
} | @Test
public void testChronicleCheckFull()
{
ChatMessage chatMessage = new ChatMessage(null, ChatMessageType.GAMEMESSAGE, "", CHRONICLE_CHECK_CHARGES_FULL, "", 0);
itemChargePlugin.onChatMessage(chatMessage);
verify(configManager).setRSProfileConfiguration(ItemChargeConfig.GROUP, ItemChargeConfig.KEY_CHRONICLE, 1000);
} |
@Override
public Optional<ComputationConfig> fetchConfig(String computationId) {
Preconditions.checkArgument(
!computationId.isEmpty(),
"computationId is empty. Cannot fetch computation config without a computationId.");
return fetchConfigWithRetry(
() -> dataflowServiceClient.getStreamingConfigWorkItem(computationId))
.flatMap(StreamingEngineComputationConfigFetcher::createComputationConfig);
} | @Test
public void testGetComputationConfig_noComputationPresent() throws IOException {
Set<StreamingEnginePipelineConfig> receivedPipelineConfig = new HashSet<>();
streamingEngineConfigFetcher =
createConfigFetcher(/* waitForInitialConfig= */ false, 0, receivedPipelineConfig::add);
when(mockDataflowServiceClient.getStreamingConfigWorkItem(anyString()))
.thenReturn(Optional.empty());
Optional<ComputationConfig> pipelineConfig =
streamingEngineConfigFetcher.fetchConfig("someComputationId");
assertFalse(pipelineConfig.isPresent());
assertThat(receivedPipelineConfig).isEmpty();
} |
public static boolean isJsonValid(String schemaText, String jsonText) throws IOException {
List<String> errors = validateJson(schemaText, jsonText);
if (!errors.isEmpty()) {
log.debug("Get validation errors, returning false");
return false;
}
return true;
} | @Test
void testValidateJsonFailureFromYaml() {
boolean valid = false;
String schemaText = null;
String jsonText = "{\"name\": \"Laurent Broudoux\", \"email\": \"laurent@microcks.io\", \"age\": 41}";
try {
// Load schema from file.
schemaText = FileUtils.readFileToString(
new File("target/test-classes/io/github/microcks/util/asyncapi/user-signedup-schema.yaml"));
// Validate Json according schema.
valid = AsyncAPISchemaValidator.isJsonValid(schemaText, jsonText);
} catch (Exception e) {
fail("Exception should not be thrown");
}
// Assert Json object is not valid.
assertFalse(valid);
} |
public static String execForStr(String... cmds) throws IORuntimeException {
return execForStr(CharsetUtil.systemCharset(), cmds);
} | @Test
@Disabled
public void execCmdTest() {
String str = RuntimeUtil.execForStr("cmd /c dir");
Console.log(str);
} |
@Override
public PackageRevision responseMessageForLatestRevision(String responseBody) {
PackageRevision packageRevision = toPackageRevision(responseBody);
if (packageRevision == null) {
throw new RuntimeException("Empty response body");
} else return packageRevision;
} | @Test
public void shouldBuildPackageRevisionFromLatestRevisionResponse() throws Exception {
String responseBody = "{\"revision\":\"abc.rpm\",\"timestamp\":\"2011-07-14T19:43:37.100Z\",\"user\":\"some-user\",\"revisionComment\":\"comment\"," +
"\"trackbackUrl\":\"http:\\\\localhost:9999\",\"data\":{\"dataKeyOne\":\"data-value-one\",\"dataKeyTwo\":\"data-value-two\"}}";
PackageRevision packageRevision = messageHandler.responseMessageForLatestRevision(responseBody);
assertPackageRevision(packageRevision, "abc.rpm", "some-user", "2011-07-14T19:43:37.100Z", "comment", "http:\\localhost:9999");
} |
public static String getIPFromString(String str) {
if (StringUtils.isBlank(str)) {
return "";
}
String result = "";
if (StringUtils.containsIgnoreCase(str, IPV6_START_MARK) && StringUtils.containsIgnoreCase(str,
IPV6_END_MARK)) {
result = str.substring(str.indexOf(IPV6_START_MARK), (str.indexOf(IPV6_END_MARK) + 1));
if (!isIPv6(result)) {
result = "";
}
} else {
Matcher m = IPV4_PATTERN.matcher(str);
if (m.find()) {
result = m.group();
}
}
return result;
} | @Test
void testGetIPFromString() {
assertEquals("[::1]", InternetAddressUtil.getIPFromString("http://[::1]:666/xzdsfasdf/awerwef" + "?eewer=2&xxx=3"));
assertEquals("[::1]", InternetAddressUtil.getIPFromString(
"jdbc:mysql://[::1]:3306/nacos_config_test?characterEncoding=utf8&connectTimeout=1000"
+ "&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC"));
assertEquals("127.0.0.1",
InternetAddressUtil.getIPFromString("http://127.0.0.1:666/xzdsfasdf/awerwef" + "?eewer=2&xxx=3"));
assertEquals("127.0.0.1", InternetAddressUtil.getIPFromString(
"jdbc:mysql://127.0.0.1:3306/nacos_config_test?characterEncoding=utf8&connectTimeout=1000"
+ "&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC"));
assertEquals("", InternetAddressUtil.getIPFromString("http://[::1:666"));
assertEquals("", InternetAddressUtil.getIPFromString("http://[dddd]:666/xzdsfasdf/awerwef" + "?eewer=2&xxx=3"));
assertEquals("", InternetAddressUtil.getIPFromString(
"jdbc:mysql://[127.0.0.1]:3306/nacos_config_test?characterEncoding=utf8&connectTimeout=1000"
+ "&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC"));
assertEquals("", InternetAddressUtil.getIPFromString(
"jdbc:mysql://666.288.333.444:3306/nacos_config_test?characterEncoding=utf8&connectTimeout=1000"
+ "&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC"));
assertEquals("", InternetAddressUtil.getIPFromString(
"jdbc:mysql://292.168.1.1:3306/nacos_config_test?characterEncoding=utf8&connectTimeout=1000"
+ "&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC"));
assertEquals("", InternetAddressUtil.getIPFromString(
"jdbc:mysql://29.168.1.288:3306/nacos_config_test?characterEncoding=utf8&connectTimeout=1000"
+ "&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC"));
assertEquals("", InternetAddressUtil.getIPFromString(
"jdbc:mysql://29.168.288.28:3306/nacos_config_test?characterEncoding=utf8&connectTimeout=1000"
+ "&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC"));
assertEquals("", InternetAddressUtil.getIPFromString(
"jdbc:mysql://29.288.28.28:3306/nacos_config_test?characterEncoding=utf8&connectTimeout=1000"
+ "&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC"));
assertEquals("", InternetAddressUtil.getIPFromString(""));
assertEquals("", InternetAddressUtil.getIPFromString(null));
} |
public static ContainerUpdates
validateAndSplitUpdateResourceRequests(RMContext rmContext,
AllocateRequest request, Resource maximumAllocation,
List<UpdateContainerError> updateErrors) {
ContainerUpdates updateRequests =
new ContainerUpdates();
Set<ContainerId> outstandingUpdate = new HashSet<>();
for (UpdateContainerRequest updateReq : request.getUpdateRequests()) {
RMContainer rmContainer = rmContext.getScheduler().getRMContainer(
updateReq.getContainerId());
String msg = validateContainerIdAndVersion(outstandingUpdate,
updateReq, rmContainer);
ContainerUpdateType updateType = updateReq.getContainerUpdateType();
if (msg == null) {
if ((updateType != ContainerUpdateType.PROMOTE_EXECUTION_TYPE) &&
(updateType !=ContainerUpdateType.DEMOTE_EXECUTION_TYPE)) {
if (validateIncreaseDecreaseRequest(
rmContext, updateReq, maximumAllocation)) {
if (ContainerUpdateType.INCREASE_RESOURCE == updateType) {
updateRequests.getIncreaseRequests().add(updateReq);
} else {
updateRequests.getDecreaseRequests().add(updateReq);
}
outstandingUpdate.add(updateReq.getContainerId());
} else {
msg = RESOURCE_OUTSIDE_ALLOWED_RANGE;
}
} else {
ExecutionType original = rmContainer.getExecutionType();
ExecutionType target = updateReq.getExecutionType();
if (target != original) {
if (target == ExecutionType.GUARANTEED &&
original == ExecutionType.OPPORTUNISTIC) {
updateRequests.getPromotionRequests().add(updateReq);
outstandingUpdate.add(updateReq.getContainerId());
} else if (target == ExecutionType.OPPORTUNISTIC &&
original == ExecutionType.GUARANTEED) {
updateRequests.getDemotionRequests().add(updateReq);
outstandingUpdate.add(updateReq.getContainerId());
}
}
}
}
checkAndcreateUpdateError(updateErrors, updateReq, rmContainer, msg);
}
return updateRequests;
} | @Test
public void testValidateAndSplitUpdateResourceRequests() {
List<UpdateContainerRequest> updateRequests = new ArrayList<>();
int containerVersion = 10;
int resource = 10;
Resource maxAllocation = Resource.newInstance(resource, resource);
UpdateContainerRequestPBImpl updateContainerRequestPBFail =
new UpdateContainerRequestPBImpl();
updateContainerRequestPBFail.setContainerVersion(containerVersion);
updateContainerRequestPBFail
.setCapability(Resource.newInstance(resource + 1, resource + 1));
updateContainerRequestPBFail
.setContainerId(Mockito.mock(ContainerId.class));
ContainerId containerIdOk = Mockito.mock(ContainerId.class);
Resource capabilityOk = Resource.newInstance(resource - 1, resource - 1);
UpdateContainerRequestPBImpl updateContainerRequestPBOk =
new UpdateContainerRequestPBImpl();
updateContainerRequestPBOk.setContainerVersion(containerVersion);
updateContainerRequestPBOk.setCapability(capabilityOk);
updateContainerRequestPBOk.setContainerUpdateType(INCREASE_RESOURCE);
updateContainerRequestPBOk.setContainerId(containerIdOk);
updateRequests.add(updateContainerRequestPBOk);
updateRequests.add(updateContainerRequestPBFail);
Dispatcher dispatcher = Mockito.mock(Dispatcher.class);
RMContext rmContext = Mockito.mock(RMContext.class);
ResourceScheduler scheduler = Mockito.mock(ResourceScheduler.class);
Mockito.when(rmContext.getScheduler()).thenReturn(scheduler);
Mockito.when(rmContext.getDispatcher()).thenReturn(dispatcher);
RMContainer rmContainer = Mockito.mock(RMContainer.class);
Mockito.when(scheduler.getRMContainer(Mockito.any()))
.thenReturn(rmContainer);
Container container = Mockito.mock(Container.class);
Mockito.when(container.getVersion()).thenReturn(containerVersion);
Mockito.when(rmContainer.getContainer()).thenReturn(container);
Mockito.when(scheduler.getNormalizedResource(capabilityOk, maxAllocation))
.thenReturn(capabilityOk);
AllocateRequest allocateRequest =
AllocateRequest.newInstance(1, 0.5f, new ArrayList<ResourceRequest>(),
new ArrayList<ContainerId>(), updateRequests, null);
List<UpdateContainerError> updateErrors = new ArrayList<>();
ContainerUpdates containerUpdates =
RMServerUtils.validateAndSplitUpdateResourceRequests(rmContext,
allocateRequest, maxAllocation, updateErrors);
Assert.assertEquals(1, updateErrors.size());
Assert.assertEquals(resource + 1, updateErrors.get(0)
.getUpdateContainerRequest().getCapability().getMemorySize());
Assert.assertEquals(resource + 1, updateErrors.get(0)
.getUpdateContainerRequest().getCapability().getVirtualCores());
Assert.assertEquals(RESOURCE_OUTSIDE_ALLOWED_RANGE,
updateErrors.get(0).getReason());
Assert.assertEquals(1, containerUpdates.getIncreaseRequests().size());
UpdateContainerRequest increaseRequest =
containerUpdates.getIncreaseRequests().get(0);
Assert.assertEquals(capabilityOk.getVirtualCores(),
increaseRequest.getCapability().getVirtualCores());
Assert.assertEquals(capabilityOk.getMemorySize(),
increaseRequest.getCapability().getMemorySize());
Assert.assertEquals(containerIdOk, increaseRequest.getContainerId());
} |
@Deprecated
public static RowMutationInformation of(MutationType mutationType, long sequenceNumber) {
checkArgument(sequenceNumber >= 0, "sequenceNumber must be non-negative");
return new AutoValue_RowMutationInformation(
mutationType, null, Long.toHexString(sequenceNumber));
} | @Test
public void givenEmptySegment_throws() {
IllegalArgumentException error =
assertThrows(
IllegalArgumentException.class,
() -> RowMutationInformation.of(RowMutationInformation.MutationType.UPSERT, "0/1//3"));
assertEquals(
"changeSequenceNumber: 0/1//3 does not match expected pattern: ^([0-9A-Fa-f]{1,16})(/([0-9A-Fa-f]{1,16})){0,3}$",
error.getMessage());
} |
@Override
public void createNamespace(Namespace namespace, Map<String, String> meta) {
Preconditions.checkArgument(
!namespace.isEmpty(), "Cannot create namespace with invalid name: %s", namespace);
Preconditions.checkArgument(
isValidateNamespace(namespace),
"Cannot support multi part namespace in Hive Metastore: %s",
namespace);
Preconditions.checkArgument(
meta.get(HMS_DB_OWNER_TYPE) == null || meta.get(HMS_DB_OWNER) != null,
"Create namespace setting %s without setting %s is not allowed",
HMS_DB_OWNER_TYPE,
HMS_DB_OWNER);
try {
clients.run(
client -> {
client.createDatabase(convertToDatabase(namespace, meta));
return null;
});
LOG.info("Created namespace: {}", namespace);
} catch (AlreadyExistsException e) {
throw new org.apache.iceberg.exceptions.AlreadyExistsException(
e, "Namespace already exists: %s", namespace);
} catch (TException e) {
throw new RuntimeException(
"Failed to create namespace " + namespace + " in Hive Metastore", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(
"Interrupted in call to createDatabase(name) " + namespace + " in Hive Metastore", e);
}
} | @Test
public void testNamespaceExists() throws TException {
Namespace namespace = Namespace.of("dbname_exists");
catalog.createNamespace(namespace, META);
assertThat(catalog.namespaceExists(namespace)).as("Should true to namespace exist").isTrue();
assertThat(catalog.namespaceExists(Namespace.of("db2", "db2", "ns2")))
.as("Should false to namespace doesn't exist")
.isFalse();
} |
@Override
protected JsonObject convert(final JsonObject data) {
return data.getAsJsonObject(ConfigGroupEnum.APP_AUTH.name());
} | @Test
public void testConvert() {
JsonObject jsonObject = new JsonObject();
JsonObject expectJsonObject = new JsonObject();
jsonObject.add(ConfigGroupEnum.APP_AUTH.name(), expectJsonObject);
assertThat(mockAppAuthDataRefresh.convert(jsonObject), is(expectJsonObject));
} |
public boolean isSameProtocolAndStorageTypes() {
return storageTypes.values().stream().allMatch(protocolType::equals);
} | @Test
void assertIsSameProtocolAndStorageTypes() {
GenericSchemaBuilderMaterial material = new GenericSchemaBuilderMaterial(TypedSPILoader.getService(DatabaseType.class, "FIXTURE"),
Collections.singletonMap("foo", TypedSPILoader.getService(DatabaseType.class, "FIXTURE")),
Collections.emptyMap(), Collections.emptyList(), new ConfigurationProperties(new Properties()), "");
assertTrue(material.isSameProtocolAndStorageTypes());
} |
@Override
public Set<Endpoints> endpointses() {
return ImmutableSet.copyOf(k8sEndpointsStore.endpointses());
} | @Test
public void testGetEndpoints() {
createBasicEndpoints();
assertEquals("Number of endpoints did not match", 1, target.endpointses().size());
} |
OutputT apply(InputT input) throws UserCodeExecutionException {
Optional<UserCodeExecutionException> latestError = Optional.empty();
long waitFor = 0L;
while (waitFor != BackOff.STOP) {
try {
sleepIfNeeded(waitFor);
incIfPresent(getCallCounter());
return getThrowableFunction().apply(input);
} catch (UserCodeExecutionException e) {
if (!e.shouldRepeat()) {
throw e;
}
latestError = Optional.of(e);
} catch (InterruptedException ignored) {
}
try {
incIfPresent(getBackoffCounter());
waitFor = getBackOff().nextBackOffMillis();
} catch (IOException e) {
throw new UserCodeExecutionException(e);
}
}
throw latestError.orElse(
new UserCodeExecutionException("failed to process for input: " + input));
} | @Test
public void givenRepeatableErrorBelowLimit_emitsIntoOutputPCollection() {
PCollectionTuple pct =
pipeline
.apply(Create.of(1))
.apply(
ParDo.of(
new DoFnWithRepeaters(
new CallerImpl(LIMIT - 1, UserCodeQuotaException.class),
new SetupTeardownImpl(0)))
.withOutputTags(OUTPUT_TAG, TupleTagList.of(FAILURE_TAG)));
PAssert.that(pct.get(OUTPUT_TAG)).containsInAnyOrder(2);
PAssert.that(pct.get(FAILURE_TAG)).empty();
pipeline.run();
} |
public List<String> generate(String tableName, String columnName, boolean isAutoGenerated) throws SQLException {
return generate(tableName, singleton(columnName), isAutoGenerated);
} | @Test
public void generate_unknown_dialect() throws SQLException {
Dialect mockDialect = mock(Dialect.class);
when(mockDialect.getId()).thenReturn("unknown-db-vendor");
when(db.getDialect()).thenReturn(mockDialect);
when(dbConstraintFinder.findConstraintName(TABLE_NAME)).thenReturn(Optional.of(CONSTRAINT));
assertThatThrownBy(() -> underTest.generate(TABLE_NAME, PK_COLUMN, true))
.isInstanceOf(IllegalStateException.class);
} |
public static DwrfProto.ColumnEncoding toColumnEncoding(int nodeId, ColumnEncoding columnEncoding)
{
checkArgument(
!columnEncoding.getAdditionalSequenceEncodings().isPresent(),
"Non-zero sequence IDs for column encoding %s",
columnEncoding);
return DwrfProto.ColumnEncoding.newBuilder()
.setKind(toColumnEncodingKind(columnEncoding.getColumnEncodingKind()))
.setDictionarySize(columnEncoding.getDictionarySize())
.setColumn(nodeId)
.setSequence(0)
.build();
} | @Test
public void testToColumnEncodingDirect()
{
int expectedDictionarySize = 0;
ColumnEncoding columnEncoding = new ColumnEncoding(DIRECT, expectedDictionarySize);
DwrfProto.ColumnEncoding actual = toColumnEncoding(COLUMN_ID, columnEncoding);
assertEquals(actual.getColumn(), COLUMN_ID);
assertEquals(actual.getKind(), DwrfProto.ColumnEncoding.Kind.DIRECT);
assertEquals(actual.getDictionarySize(), expectedDictionarySize);
assertEquals(actual.getSequence(), 0);
} |
public String convert(ILoggingEvent event) {
String formattedMessage = event.getFormattedMessage();
if (formattedMessage != null) {
String result = CR_PATTERN.matcher(formattedMessage).replaceAll("\\\\r");
result = LF_PATTERN.matcher(result).replaceAll("\\\\n");
return result;
}
return null;
} | @Test
public void convert_message_with_LF() {
ILoggingEvent event = createILoggingEvent("simple\n message\n with\n LF");
assertThat(underTest.convert(event)).isEqualTo("simple\\n message\\n with\\n LF");
} |
public static <InputT> Builder<InputT> withoutHold(AppliedPTransform<?, ?, ?> transform) {
return new Builder(transform, BoundedWindow.TIMESTAMP_MAX_VALUE);
} | @Test
public void noBundlesNoAdditionalOutputProducedOutputsFalse() {
TransformResult<Integer> result = StepTransformResult.<Integer>withoutHold(transform).build();
assertThat(result.getOutputTypes(), emptyIterable());
} |
static boolean parseScheme(final StringReader reader, final Value<String> scheme) {
final StringBuilder stringBuilder = new StringBuilder();
int tracker = reader.position;
while(!reader.endOfString()) {
final char c = (char) reader.read();
if(Character.isAlphabetic(c)
|| Character.isDigit(c)
|| URI_SCHEME.indexOf(c) != -1) {
if(c == '.') {
// THIS IS VIOLATION OF RFC.
// There can be '.' in URIs.
// This works against "s3.amazonaws.com:443".
reader.skip(tracker - reader.position);
return false;
}
stringBuilder.append(c);
}
else if(c == ':') {
tracker = reader.position;
break;
}
else {
if(c == ' ' && stringBuilder.length() == 0) {
continue;
}
// Invalid character inside scheme.
reader.skip(tracker - reader.position);
return false;
}
}
reader.skip(tracker - reader.position);
scheme.setValue(stringBuilder.toString());
return true; // valid. Break to return stringbuilder
} | @Test
public void testParseScheme() {
final HostParser.Value<String> value = new HostParser.Value<>();
final String test = "https:";
final HostParser.StringReader reader = new HostParser.StringReader(test);
assertTrue(HostParser.parseScheme(reader, value));
assertEquals("https", value.getValue());
} |
Snapshot(int id, String name, INodeDirectory dir) {
this(id, dir, dir);
this.root.setLocalName(DFSUtil.string2Bytes(name));
} | @Test
public void testSnapshot() throws Throwable {
try {
runTestSnapshot(SNAPSHOT_ITERATION_NUMBER);
} catch(Throwable t) {
SnapshotTestHelper.LOG.info("FAILED", t);
SnapshotTestHelper.dumpTree("FAILED", cluster);
throw t;
}
} |
@Override
public void addProducer(ConnectionContext context, ProducerInfo info) throws Exception {
// JMS allows producers to be created without first specifying a destination. In these cases, every send
// operation must specify a destination. Because of this, we only authorize 'addProducer' if a destination is
// specified. If not specified, the authz check in the 'send' method below will ensure authorization.
if (info.getDestination() != null) {
DestinationAction action = new DestinationAction(context, info.getDestination(), "write");
assertAuthorized(action, "write to");
}
super.addProducer(context, info);
} | @Test(expected=UnauthorizedException.class)
public void testAddProducerNotAuthorized() throws Exception {
String name = "myTopic";
ActiveMQDestination dest = new ActiveMQTopic(name);
Subject subject = new PermsSubject();
ConnectionContext context = createContext(subject);
ProducerInfo info = new ProducerInfo(null);
info.setDestination(dest);
filter.addProducer(context, info);
} |
public double[][] test(DataFrame data) {
DataFrame x = formula.x(data);
int n = x.nrow();
int ntrees = trees.length;
double[][] prediction = new double[ntrees][n];
for (int j = 0; j < n; j++) {
Tuple xj = x.get(j);
double base = b;
for (int i = 0; i < ntrees; i++) {
base += shrinkage * trees[i].predict(xj);
prediction[i][j] = base;
}
}
return prediction;
} | @Test
public void testCpuQuantile() {
test(Loss.quantile(0.5), "CPU", CPU.formula, CPU.data, 66.0549);
} |
public static FileDeletionTask convertProtoToFileDeletionTask(
DeletionServiceDeleteTaskProto proto, DeletionService deletionService,
int taskId) {
String user = proto.hasUser() ? proto.getUser() : null;
Path subdir = null;
if (proto.hasSubdir()) {
subdir = new Path(proto.getSubdir());
}
List<Path> basePaths = null;
List<String> basedirs = proto.getBasedirsList();
if (basedirs != null && basedirs.size() > 0) {
basePaths = new ArrayList<>(basedirs.size());
for (String basedir : basedirs) {
basePaths.add(new Path(basedir));
}
}
return new FileDeletionTask(taskId, deletionService, user, subdir,
basePaths);
} | @Test
public void testConvertProtoToFileDeletionTask() throws Exception {
DeletionService deletionService = mock(DeletionService.class);
int id = 0;
String user = "user";
Path subdir = new Path("subdir");
Path basedir = new Path("basedir");
DeletionServiceDeleteTaskProto.Builder protoBuilder =
DeletionServiceDeleteTaskProto.newBuilder();
protoBuilder
.setId(id)
.setUser("user")
.setSubdir(subdir.getName())
.addBasedirs(basedir.getName());
DeletionServiceDeleteTaskProto proto = protoBuilder.build();
DeletionTask deletionTask =
NMProtoUtils.convertProtoToFileDeletionTask(proto, deletionService, id);
assertEquals(DeletionTaskType.FILE.name(),
deletionTask.getDeletionTaskType().name());
assertEquals(id, deletionTask.getTaskId());
assertEquals(subdir, ((FileDeletionTask) deletionTask).getSubDir());
assertEquals(basedir,
((FileDeletionTask) deletionTask).getBaseDirs().get(0));
} |
public static String getHttpMethod(Exchange exchange, Endpoint endpoint) {
// 1. Use method provided in header.
Object method = exchange.getIn().getHeader(Exchange.HTTP_METHOD);
if (method instanceof String) {
return (String) method;
} else if (method instanceof Enum) {
return ((Enum<?>) method).name();
} else if (method != null) {
return exchange.getContext().getTypeConverter().tryConvertTo(String.class, exchange, method);
}
// 2. GET if query string is provided in header.
if (exchange.getIn().getHeader(Exchange.HTTP_QUERY) != null) {
return GET_METHOD;
}
// 3. GET if endpoint is configured with a query string.
if (endpoint.getEndpointUri().indexOf('?') != -1) {
return GET_METHOD;
}
// 4. POST if there is data to send (body is not null).
if (exchange.getIn().getBody() != null) {
return POST_METHOD;
}
// 5. GET otherwise.
return GET_METHOD;
} | @Test
public void testGetMethodBodyNotNull() {
Endpoint endpoint = Mockito.mock(Endpoint.class);
Exchange exchange = Mockito.mock(Exchange.class);
Message message = Mockito.mock(Message.class);
Mockito.when(endpoint.getEndpointUri()).thenReturn(TEST_URI);
Mockito.when(exchange.getIn()).thenReturn(message);
Mockito.when(message.getHeader(Exchange.HTTP_URI, String.class)).thenReturn(TEST_URI);
Mockito.when(message.getBody()).thenReturn("Message Body");
assertEquals(AbstractHttpSpanDecorator.POST_METHOD,
AbstractHttpSpanDecorator.getHttpMethod(exchange, endpoint));
} |
public ConcurrentLongHashMap<CompletableFuture<Producer>> getProducers() {
return producers;
} | @Test(timeOut = 30000)
public void testProducerCommandWithAuthorizationPositive() throws Exception {
AuthorizationService authorizationService = mock(AuthorizationService.class);
doReturn(CompletableFuture.completedFuture(true)).when(authorizationService)
.allowTopicOperationAsync(Mockito.any(),
Mockito.any(), Mockito.any(), Mockito.any());
doReturn(authorizationService).when(brokerService).getAuthorizationService();
svcConfig.setAuthenticationEnabled(true);
resetChannel();
setChannelConnected();
// test PRODUCER success case
ByteBuf clientCommand = Commands.newProducer(successTopicName, 1 /* producer id */, 1 /* request id */,
"prod-name", Collections.emptyMap(), false);
channel.writeInbound(clientCommand);
assertEquals(getResponse().getClass(), CommandProducerSuccess.class);
PersistentTopic topicRef = (PersistentTopic) brokerService.getTopicReference(successTopicName).get();
assertNotNull(topicRef);
assertEquals(topicRef.getProducers().size(), 1);
channel.finish();
assertEquals(topicRef.getProducers().size(), 0);
} |
@Override
public Path copy(final Path source, final Path target, final TransferStatus status, final ConnectionCallback callback, final StreamListener listener) throws BackgroundException {
final ThreadPool pool = ThreadPoolFactory.get("largeupload", concurrency);
try {
final Map<String, String> fileinfo = new HashMap<>(status.getMetadata());
if(null != status.getModified()) {
fileinfo.put(X_BZ_INFO_SRC_LAST_MODIFIED_MILLIS, String.valueOf(status.getModified()));
}
if(null != status.getCreated()) {
fileinfo.put(X_BZ_INFO_SRC_CREATION_DATE_MILLIS, String.valueOf(status.getCreated()));
}
final Checksum checksum = status.getChecksum();
if(Checksum.NONE != checksum) {
switch(checksum.algorithm) {
case sha1:
fileinfo.put(X_BZ_INFO_LARGE_FILE_SHA1, status.getChecksum().hash);
break;
}
}
final B2StartLargeFileResponse response = session.getClient().startLargeFileUpload(fileid.getVersionId(containerService.getContainer(target)),
containerService.getKey(target), status.getMime(), fileinfo);
final long size = status.getLength();
// Submit file segments for concurrent upload
final List<Future<B2UploadPartResponse>> parts = new ArrayList<Future<B2UploadPartResponse>>();
long remaining = status.getLength();
long offset = 0;
final List<B2UploadPartResponse> completed = new ArrayList<B2UploadPartResponse>();
for(int partNumber = 1; remaining > 0; partNumber++) {
final Long length = Math.min(Math.max((size / B2LargeUploadService.MAXIMUM_UPLOAD_PARTS), partSize), remaining);
// Submit to queue
parts.add(this.submit(pool, source, response.getFileId(), status, partNumber, offset, length, callback));
if(log.isDebugEnabled()) {
log.debug(String.format("Part %s submitted with size %d and offset %d", partNumber, length, offset));
}
remaining -= length;
offset += length;
}
for(Future<B2UploadPartResponse> f : parts) {
final B2UploadPartResponse part = Interruptibles.await(f);
completed.add(part);
listener.sent(part.getContentLength());
}
completed.sort(new Comparator<B2UploadPartResponse>() {
@Override
public int compare(final B2UploadPartResponse o1, final B2UploadPartResponse o2) {
return o1.getPartNumber().compareTo(o2.getPartNumber());
}
});
final List<String> checksums = new ArrayList<String>();
for(B2UploadPartResponse part : completed) {
checksums.add(part.getContentSha1());
}
session.getClient().finishLargeFileUpload(response.getFileId(), checksums.toArray(new String[checksums.size()]));
if(log.isInfoEnabled()) {
log.info(String.format("Finished large file upload %s with %d parts", target, completed.size()));
}
fileid.cache(target, response.getFileId());
return target.withAttributes(new PathAttributes(source.attributes()).withVersionId(response.getFileId()));
}
catch(B2ApiException e) {
throw new B2ExceptionMappingService(fileid).map("Cannot copy {0}", e, source);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map("Cannot copy {0}", e, source);
}
finally {
pool.shutdown(false);
}
} | @Test
public void testCopyToExistingFile() throws Exception {
final B2VersionIdProvider fileid = new B2VersionIdProvider(session);
final Path container = new Path("test-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Path folder = new B2DirectoryFeature(session, fileid).mkdir(new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final String name = new AlphanumericRandomStringService().random();
final byte[] content = RandomUtils.nextBytes(6 * 1000 * 1000);
final Path test = new Path(folder, name, EnumSet.of(Path.Type.file));
final OutputStream out = new B2WriteFeature(session, fileid).write(test, new TransferStatus().withLength(content.length), new DisabledConnectionCallback());
new StreamCopier(new TransferStatus(), new TransferStatus().withLength(content.length)).transfer(new ByteArrayInputStream(content), out);
out.close();
final Path copy = new B2TouchFeature(session, fileid).touch(new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
assertTrue(new B2FindFeature(session, fileid).find(new Path(folder, name, EnumSet.of(Path.Type.file))));
assertTrue(new B2FindFeature(session, fileid).find(copy));
new B2LargeCopyFeature(session, fileid, 5 * 1000L * 1000L, 1).copy(test, copy,
new TransferStatus().exists(true).withLength(content.length), new DisabledConnectionCallback(), new DisabledStreamListener());
final Find find = new DefaultFindFeature(session);
assertTrue(find.find(test));
assertTrue(find.find(copy));
new B2DeleteFeature(session, fileid).delete(Arrays.asList(test, copy), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
static <K, V> StateSerdes<K, V> prepareStoreSerde(final StateStoreContext context,
final String storeName,
final String changelogTopic,
final Serde<K> keySerde,
final Serde<V> valueSerde,
final PrepareFunc<V> prepareValueSerdeFunc) {
return new StateSerdes<>(
changelogTopic,
prepareSerde(WrappingNullableUtils::prepareKeySerde, storeName, keySerde, new SerdeGetter(context), true, context.taskId()),
prepareSerde(prepareValueSerdeFunc, storeName, valueSerde, new SerdeGetter(context), false, context.taskId())
);
} | @Test
public void shouldThrowStreamsExceptionOnUndefinedValueSerdeForStateStoreContext() {
final MockInternalNewProcessorContext<String, String> context = new MockInternalNewProcessorContext<>();
utilsMock.when(() -> WrappingNullableUtils.prepareValueSerde(any(), any()))
.thenThrow(new ConfigException("Please set StreamsConfig#DEFAULT_VALUE_SERDE_CLASS_CONFIG"));
final Throwable exception = assertThrows(StreamsException.class,
() -> StoreSerdeInitializer.prepareStoreSerde((StateStoreContext) context, "myStore", "topic",
new Serdes.StringSerde(), new Serdes.StringSerde(), WrappingNullableUtils::prepareValueSerde));
assertThat(exception.getMessage(), equalTo("Failed to initialize value serdes for store myStore"));
assertThat(exception.getCause().getMessage(), equalTo("Please set StreamsConfig#DEFAULT_VALUE_SERDE_CLASS_CONFIG"));
} |
@Override
public double sum() {
return get(sumAsync(60, TimeUnit.SECONDS));
} | @Test
public void testSum() {
RDoubleAdder adder1 = redisson.getDoubleAdder("test1");
RDoubleAdder adder2 = redisson.getDoubleAdder("test1");
RDoubleAdder adder3 = redisson.getDoubleAdder("test1");
adder1.add(2.38);
adder2.add(4.14);
adder3.add(1.48);
Assertions.assertThat(adder1.sum()).isEqualTo(8);
Assertions.assertThat(adder2.sum()).isEqualTo(8);
Assertions.assertThat(adder3.sum()).isEqualTo(8);
} |
@VisibleForTesting
public void validateConfigKeyUnique(Long id, String key) {
ConfigDO config = configMapper.selectByKey(key);
if (config == null) {
return;
}
// 如果 id 为空,说明不用比较是否为相同 id 的参数配置
if (id == null) {
throw exception(CONFIG_KEY_DUPLICATE);
}
if (!config.getId().equals(id)) {
throw exception(CONFIG_KEY_DUPLICATE);
}
} | @Test
public void testValidateConfigKeyUnique_keyDuplicateForCreate() {
// 准备参数
String key = randomString();
// mock 数据
configMapper.insert(randomConfigDO(o -> o.setConfigKey(key)));
// 调用,校验异常
assertServiceException(() -> configService.validateConfigKeyUnique(null, key),
CONFIG_KEY_DUPLICATE);
} |
public long mergeMaxColLen(long oldValue, long newValue) {
return Math.max(oldValue, newValue);
} | @Test
public void testMergeMaxColLen() {
assertEquals(3, MERGER.mergeMaxColLen(1, 3));
assertEquals(3, MERGER.mergeMaxColLen(3, 1));
} |
@VisibleForTesting
protected AudioManager getAudioManager() {
return mAudioManager;
} | @Test
public void testLoadAndUnloadSystemSounds() {
ShadowAskAudioManager shadowAudioManager =
(ShadowAskAudioManager) Shadows.shadowOf(mAnySoftKeyboardUnderTest.getAudioManager());
Assert.assertEquals(
getApplicationContext().getResources().getBoolean(R.bool.settings_default_sound_on),
shadowAudioManager.areSoundEffectsLoaded());
SharedPrefsHelper.setPrefsValue(R.string.settings_key_sound_on, true);
Assert.assertTrue(shadowAudioManager.areSoundEffectsLoaded());
SharedPrefsHelper.setPrefsValue(R.string.settings_key_sound_on, false);
Assert.assertFalse(shadowAudioManager.areSoundEffectsLoaded());
SharedPrefsHelper.setPrefsValue(R.string.settings_key_sound_on, true);
Assert.assertTrue(shadowAudioManager.areSoundEffectsLoaded());
mAnySoftKeyboardController.destroy();
Assert.assertFalse(shadowAudioManager.areSoundEffectsLoaded());
} |
static PiActionProfileGroup translate(Group group, PiPipeconf pipeconf, Device device)
throws PiTranslationException {
if (!SUPPORTED_GROUP_TYPES.contains(group.type())) {
throw new PiTranslationException(format(
"group type %s not supported", group.type()));
}
// Get action profile from group key.
// TODO: define proper field in group class.
if (!(group.appCookie() instanceof PiGroupKey)) {
throw new PiTranslationException(
"group app cookie is not PI (class should be PiGroupKey)");
}
final PiGroupKey groupKey = (PiGroupKey) group.appCookie();
final PiActionProfileId actionProfileId = groupKey.actionProfileId();
// Check validity of action profile against pipeconf.
final PiActionProfileModel actionProfileModel = pipeconf.pipelineModel()
.actionProfiles(actionProfileId)
.orElseThrow(() -> new PiTranslationException(format(
"no such action profile '%s'", actionProfileId)));
if (!actionProfileModel.hasSelector()) {
throw new PiTranslationException(format(
"action profile '%s' does not support dynamic selection",
actionProfileId));
}
// Check if the table associated with the action profile supports only
// one-shot action profile programming.
boolean isTableOneShot = actionProfileModel.tables().stream()
.map(tableId -> pipeconf.pipelineModel().table(tableId))
.allMatch(piTableModel -> piTableModel.isPresent() &&
piTableModel.get().oneShotOnly());
if (isTableOneShot) {
throw new PiTranslationException(format(
"Table associated to action profile '%s' supports only one-shot action profile programming",
actionProfileId));
}
// Check group validity.
if (actionProfileModel.maxGroupSize() > 0
&& group.buckets().buckets().size() > actionProfileModel.maxGroupSize()) {
throw new PiTranslationException(format(
"too many buckets, max group size for action profile '%s' is %d",
actionProfileId, actionProfileModel.maxGroupSize()));
}
// If not INDIRECT, we set the maximum group size as specified in the
// model, however this might be highly inefficient for some HW targets
// which pre-allocate resources for the whole group.
final int maxGroupSize = group.type() == GroupDescription.Type.INDIRECT
? 1 : actionProfileModel.maxGroupSize();
final PiActionProfileGroup.Builder piActionGroupBuilder = PiActionProfileGroup.builder()
.withId(PiActionProfileGroupId.of(group.id().id()))
.withActionProfileId(groupKey.actionProfileId())
.withMaxSize(maxGroupSize);
// Translate group buckets to PI group members
final PiPipelineInterpreter interpreter = getInterpreterOrNull(device, pipeconf);
short bucketIdx = 0;
for (GroupBucket bucket : group.buckets().buckets()) {
/*
FIXME: the way member IDs are computed can cause collisions!
Problem: In P4Runtime action profile members, i.e. action buckets,
are associated to a numeric ID chosen at member insertion time. This
ID must be unique for the whole action profile (i.e. the group table
in OpenFlow). In ONOS, GroupBucket doesn't specify any ID.
Solutions:
- Change GroupBucket API to force application wanting to perform
group operations to specify a member id.
- Maintain state to dynamically allocate/deallocate member IDs, e.g.
in a dedicated service, or in a P4Runtime Group Provider.
Hack: Statically derive member ID by combining groupId and position
of the bucket in the list.
*/
final int memberId = Objects.hash(group.id(), bucketIdx);
if (memberId == 0) {
throw new PiTranslationException(
"GroupBucket produces PiActionProfileMember " +
"with invalid ID 0");
}
bucketIdx++;
final PiTableAction tableAction = translateTreatment(
bucket.treatment(), interpreter,
groupKey.tableId(), pipeconf.pipelineModel());
if (tableAction == null) {
throw new PiTranslationException(
"bucket treatment translator returned null");
}
if (tableAction.type() != ACTION) {
throw new PiTranslationException(format(
"action of type '%s' cannot be used in action profile members",
tableAction.type()));
}
final PiActionProfileMember member = PiActionProfileMember.builder()
.forActionProfile(groupKey.actionProfileId())
.withId(PiActionProfileMemberId.of(memberId))
.withAction((PiAction) tableAction)
.build();
// NOTE Indirect groups have weight set to -1 which is not supported
// by P4RT - setting to 1 to avoid problems with the p4rt server.
final int weight = group.type() == GroupDescription.Type.INDIRECT ? 1 : bucket.weight();
piActionGroupBuilder.addMember(member, weight);
}
return piActionGroupBuilder.build();
} | @Test
public void testTranslateGroupsOneShotError() throws Exception {
thrown.expect(PiTranslationException.class);
thrown.expectMessage(format("Table associated to action profile '%s' " +
"supports only one-shot action profile programming",
INGRESS_WCMP_CONTROL_WCMP_SELECTOR.id()));
PiGroupTranslatorImpl.translate(SELECT_GROUP, pipeconfOneShot, null);
} |
@Override
public boolean isSingleton() {
return true;
} | @Test
public void testIfSpringEmbeddedCacheManagerFactoryBeanDeclaresItselfToOnlyProduceSingletons() {
objectUnderTest = new SpringEmbeddedCacheManagerFactoryBean();
assertTrue("isSingleton() should always return true. However, it returned false",
objectUnderTest.isSingleton());
} |
@Override
public Collection<String> getDistributedTableNames() {
return logicalTableNames;
} | @Test
void assertGetDistributedTableMapper() {
assertThat(new LinkedList<>(ruleAttribute.getDistributedTableNames()), is(Collections.singletonList("foo_tbl")));
} |
@Override
@Transactional(rollbackFor = Exception.class)
public Long createJob(JobSaveReqVO createReqVO) throws SchedulerException {
validateCronExpression(createReqVO.getCronExpression());
// 校验唯一性
if (jobMapper.selectByHandlerName(createReqVO.getHandlerName()) != null) {
throw exception(JOB_HANDLER_EXISTS);
}
// 插入
JobDO job = BeanUtils.toBean(createReqVO, JobDO.class);
job.setStatus(JobStatusEnum.INIT.getStatus());
fillJobMonitorTimeoutEmpty(job);
jobMapper.insert(job);
// 添加 Job 到 Quartz 中
schedulerManager.addJob(job.getId(), job.getHandlerName(), job.getHandlerParam(), job.getCronExpression(),
createReqVO.getRetryCount(), createReqVO.getRetryInterval());
// 更新
JobDO updateObj = JobDO.builder().id(job.getId()).status(JobStatusEnum.NORMAL.getStatus()).build();
jobMapper.updateById(updateObj);
// 返回
return job.getId();
} | @Test
public void testCreateJob_jobHandlerExists() throws SchedulerException {
// 准备参数 指定 Cron 表达式
JobSaveReqVO reqVO = randomPojo(JobSaveReqVO.class, o -> o.setCronExpression("0 0/1 * * * ? *"));
// 调用
jobService.createJob(reqVO);
// 调用,并断言异常
assertServiceException(() -> jobService.createJob(reqVO), JOB_HANDLER_EXISTS);
} |
public static <T> Map<String, T> translateDeprecatedConfigs(Map<String, T> configs, String[][] aliasGroups) {
return translateDeprecatedConfigs(configs, Stream.of(aliasGroups)
.collect(Collectors.toMap(x -> x[0], x -> Stream.of(x).skip(1).collect(Collectors.toList()))));
} | @Test
public void testAllowsNewKey() {
Map<String, String> config = new HashMap<>();
config.put("foo.bar", "baz");
Map<String, String> newConfig = ConfigUtils.translateDeprecatedConfigs(config, new String[][]{
{"foo.bar", "foo.bar.deprecated"},
{"chicken", "rooster", "hen"},
{"cow", "beef", "heifer", "steer"}
});
assertNotNull(newConfig);
assertEquals("baz", newConfig.get("foo.bar"));
assertNull(newConfig.get("foo.bar.deprecated"));
} |
public String disassemble(ScriptDefinition script) throws IOException
{
int[] instructions = script.getInstructions();
int[] iops = script.getIntOperands();
String[] sops = script.getStringOperands();
Map<Integer, Integer>[] switches = script.getSwitches();
assert iops.length == instructions.length;
assert sops.length == instructions.length;
boolean[] jumps = needLabel(script);
StringBuilder writer = new StringBuilder();
writerHeader(writer, script);
for (int i = 0; i < instructions.length; ++i)
{
int opcode = instructions[i];
int iop = iops[i];
String sop = sops[i];
Instruction ins = this.instructions.find(opcode);
if (ins == null)
{
logger.warn("Unknown instruction {} in script {}", opcode, script.getId());
}
if (jumps[i])
{
// something jumps here
writer.append("LABEL").append(i).append(":\n");
}
String name;
if (ins != null && ins.getName() != null)
{
name = ins.getName();
}
else
{
name = String.format("%03d", opcode);
}
writer.append(String.format(" %-22s", name));
if (shouldWriteIntOperand(opcode, iop))
{
if (isJump(opcode))
{
writer.append(" LABEL").append(i + iop + 1);
}
else
{
writer.append(" ").append(iop);
}
}
if (sop != null)
{
writer.append(" \"").append(ESCAPER.escape(sop)).append("\"");
}
if (opcode == Opcodes.SWITCH)
{
Map<Integer, Integer> switchMap = switches[iop];
for (Entry<Integer, Integer> entry : switchMap.entrySet())
{
int value = entry.getKey();
int jump = entry.getValue();
writer.append("\n");
writer.append(" ").append(value).append(": LABEL").append(i + jump + 1);
}
}
writer.append("\n");
}
return writer.toString();
} | @Test
public void test() throws IOException
{
File outDir = folder.newFolder();
int count = 0;
try (Store store = new Store(StoreLocation.LOCATION))
{
store.load();
Storage storage = store.getStorage();
Index index = store.getIndex(IndexType.CLIENTSCRIPT);
ScriptLoader loader = new ScriptLoader();
for (Archive archive : index.getArchives())
{
byte[] contents = archive.decompress(storage.loadArchive(archive));
if (contents == null)
{
continue;
}
ScriptDefinition script = loader.load(archive.getArchiveId(), contents);
File outFile = new File(outDir, archive.getArchiveId() + ".rs2asm");
Disassembler disassembler = new Disassembler();
String out = disassembler.disassemble(script);
Files.write(out.getBytes(StandardCharsets.UTF_8), outFile);
++count;
}
}
logger.info("Dumped {} scripts to {}", count, outDir);
} |
@Bean
public LocaleResolver localeResolver() {
AcceptHeaderLocaleResolver localeResolver = new AcceptHeaderLocaleResolver();
localeResolver.setDefaultLocale(Locale.US);
return localeResolver;
} | @Test
void localeResolver_ReturnsAcceptHeaderLocaleResolver() {
LocaleConfig localeConfig = context.getBean(LocaleConfig.class);
LocaleResolver localeResolver = localeConfig.localeResolver();
assertInstanceOf(AcceptHeaderLocaleResolver.class, localeResolver);
} |
public QueryMetadata parse(String queryString) {
if (Strings.isNullOrEmpty(queryString)) {
return QueryMetadata.empty();
}
Map<String, List<SubstringMultilinePosition>> positions = new LinkedHashMap<>();
final String[] lines = queryString.split("\n");
for (int line = 0; line < lines.length; line++) {
final String currentLine = lines[line];
final Matcher matcher = PLACEHOLDER_PATTERN.matcher(currentLine);
while (matcher.find()) {
final String name = matcher.group(1);
if (!positions.containsKey(name)) {
positions.put(name, new ArrayList<>());
}
positions.get(name).add(SubstringMultilinePosition.create(line + 1, matcher.start(), matcher.end()));
}
}
final ImmutableSet<QueryParam> params = positions.entrySet().stream()
.map(entry -> QueryParam.create(entry.getKey(), entry.getValue()))
.collect(ImmutableSet.toImmutableSet());
return QueryMetadata.builder()
.usedParameters(params)
.build();
} | @Test
void testStringsContainingDollars() {
assertThat(parse("foo:bar$")).isEmpty();
assertThat(parse("foo:bar$ OR foo:$baz")).isEmpty();
assertThat(parse("foo:bar$ OR foo:$baz$")).containsExactly("baz");
assertThat(parse("foo:$bar$ OR foo:$baz")).containsExactly("bar");
assertThat(parse("foo:bar$ AND baz$:$baz$")).containsExactly("baz");
assertThat(parse("foo:$$")).isEmpty();
assertThat(parse("foo:$foo$ AND bar:$$")).containsExactly("foo");
} |
void handleLine(final String line) {
final String trimmedLine = Optional.ofNullable(line).orElse("").trim();
if (trimmedLine.isEmpty()) {
return;
}
handleStatements(trimmedLine);
} | @Test
public void shouldHandleSetPropertyAsPartOfMultiStatementLine() {
// When:
localCli.handleLine("set 'auto.offset.reset'='earliest';");
// Then:
assertThat(terminal.getOutputString(),
containsString("Successfully changed local property 'auto.offset.reset' to 'earliest'"));
} |
public void terminateCluster(final List<String> deleteTopicPatterns) {
terminatePersistentQueries();
deleteSinkTopics(deleteTopicPatterns);
deleteTopics(managedTopics);
ksqlEngine.close();
} | @Test
public void shouldCleanUpSchemasForTopicListWithPattern() throws Exception {
// Given:
givenTopicsExistInKafka("K_Fo", "K_Foo", "K_Fooo", "NotMatched");
givenSinkTopicsExistInMetastore(FormatFactory.AVRO, "K_Fo", "K_Foo", "K_Fooo", "NotMatched");
givenSchemasForTopicsExistInSchemaRegistry("K_Fo", "K_Foo", "K_Fooo", "NotMatched");
// When:
clusterTerminator.terminateCluster(ImmutableList.of("K_Fo.*"));
// Then:
verifySchemaDeletedForTopics("K_Foo", "K_Fooo", "K_Fo");
verifySchemaNotDeletedForTopic("NotMatched");
} |
UuidGenerator loadUuidGenerator() {
Class<? extends UuidGenerator> objectFactoryClass = options.getUuidGeneratorClass();
ClassLoader classLoader = classLoaderSupplier.get();
ServiceLoader<UuidGenerator> loader = ServiceLoader.load(UuidGenerator.class, classLoader);
if (objectFactoryClass == null) {
return loadSingleUuidGeneratorOrDefault(loader);
}
return loadSelectedUuidGenerator(loader, objectFactoryClass);
} | @Test
void test_case_4() {
Options options = () -> null;
UuidGeneratorServiceLoader loader = new UuidGeneratorServiceLoader(
() -> new ServiceLoaderTestClassLoader(UuidGenerator.class,
RandomUuidGenerator.class,
IncrementingUuidGenerator.class,
OtherGenerator.class),
options);
assertThat(loader.loadUuidGenerator(), instanceOf(OtherGenerator.class));
} |
@Override
public Messages process(Messages messages) {
try (Timer.Context ignored = executionTime.time()) {
final State latestState = stateUpdater.getLatestState();
if (latestState.enableRuleMetrics()) {
return process(messages, new RuleMetricsListener(metricRegistry), latestState);
}
return process(messages, new NoopInterpreterListener(), latestState);
}
} | @Test
public void testMatchAllDoesNotContinueIfNotAllRulesMatched() {
final RuleService ruleService = mock(MongoDbRuleService.class);
when(ruleService.loadAll()).thenReturn(ImmutableList.of(RULE_TRUE, RULE_FALSE, RULE_ADD_FOOBAR));
final PipelineService pipelineService = mock(MongoDbPipelineService.class);
when(pipelineService.loadAll()).thenReturn(Collections.singleton(
PipelineDao.create("p1", "title", "description",
"pipeline \"pipeline\"\n" +
"stage 0 match all\n" +
" rule \"true\";\n" +
" rule \"false\";\n" +
"stage 1 match either\n" +
" rule \"add_foobar\";\n" +
"end\n",
Tools.nowUTC(),
null)
));
final Map<String, Function<?>> functions = ImmutableMap.of(SetField.NAME, new SetField());
final PipelineInterpreter interpreter = createPipelineInterpreter(ruleService, pipelineService, functions);
final Messages processed = interpreter.process(messageInDefaultStream("message", "test"));
final List<Message> messages = ImmutableList.copyOf(processed);
assertThat(messages).hasSize(1);
final Message actualMessage = messages.get(0);
assertThat(actualMessage.hasField("foobar")).isFalse();
} |
public Collection<ViewParameterSummaryDTO> forValue() {
final Set<String> searches = viewService.streamAll()
.map(ViewDTO::searchId)
.collect(Collectors.toSet());
final Map<String, Search> qualifyingSearches = this.searchDbService.findByIds(searches).stream()
.filter(search -> !search.parameters().isEmpty())
.collect(Collectors.toMap(Search::id, Functions.identity()));
return viewService.streamAll()
.filter(view -> qualifyingSearches.keySet().contains(view.searchId()))
.map(view -> ViewParameterSummaryDTO.create(view, qualifyingSearches.get(view.searchId())))
.collect(Collectors.toSet());
} | @Test
public void returnEmptyListWhenNoViewsArePresent() {
final QualifyingViewsService service = new QualifyingViewsService(mockSearchService(), mockViewService());
final Collection<ViewParameterSummaryDTO> result = service.forValue();
assertThat(result).isEmpty();
} |
public boolean shouldExecute(DefaultPostJobDescriptor descriptor) {
if (!settingsCondition(descriptor)) {
LOG.debug("'{}' skipped because one of the required properties is missing", descriptor.name());
return false;
}
return true;
} | @Test
public void should_optimize_on_settings() {
DefaultPostJobDescriptor descriptor = new DefaultPostJobDescriptor()
.requireProperty("sonar.foo.reportPath");
optimizer = new PostJobOptimizer(settings.asConfig());
assertThat(optimizer.shouldExecute(descriptor)).isFalse();
settings.setProperty("sonar.foo.reportPath", "foo");
optimizer = new PostJobOptimizer(settings.asConfig());
assertThat(optimizer.shouldExecute(descriptor)).isTrue();
} |
@Override
public KTable<K, V> reduce(final Reducer<V> adder,
final Reducer<V> subtractor,
final Materialized<K, V, KeyValueStore<Bytes, byte[]>> materialized) {
return reduce(adder, subtractor, NamedInternal.empty(), materialized);
} | @Test
public void shouldThrowNullPointerOnReduceWhenSubtractorIsNull() {
assertThrows(NullPointerException.class, () -> groupedTable.reduce(
MockReducer.STRING_ADDER,
null,
Materialized.as("store")));
} |
@Override
public Iterator<Object> iterateObjects() {
return new CompositeObjectIterator(concreteStores, true);
} | @Test
public void queryBySubtypeDoesNotReturnSuperType() throws Exception {
insertObjectWithFactHandle(new SubClass());
insertObjectWithFactHandle(new SuperClass());
Collection<Object> result = collect(underTest.iterateObjects(SubClass.class));
assertThat(result).hasSize(1);
assertThat(result).hasAtLeastOneElementOfType(SubClass.class);
} |
@Override
public boolean encode(
@NonNull Resource<GifDrawable> resource, @NonNull File file, @NonNull Options options) {
GifDrawable drawable = resource.get();
Transformation<Bitmap> transformation = drawable.getFrameTransformation();
boolean isTransformed = !(transformation instanceof UnitTransformation);
if (isTransformed && options.get(ENCODE_TRANSFORMATION)) {
return encodeTransformedToFile(drawable, file);
} else {
return writeDataDirect(drawable.getBuffer(), file);
}
} | @Test
public void testEncode_WithEncodeTransformationFalse_whenOsThrows_returnsFalse()
throws IOException {
options.set(ReEncodingGifResourceEncoder.ENCODE_TRANSFORMATION, false);
byte[] data = "testString".getBytes("UTF-8");
when(gifDrawable.getBuffer()).thenReturn(ByteBuffer.wrap(data));
assertThat(file.mkdirs()).isTrue();
assertFalse(encoder.encode(resource, file, options));
} |
public void write(Writer writer) throws IOException {
for (String line : lines) {
writer.write(line);
}
} | @Test
public void testWrite() throws Exception {
CodeBuffer buffer = new CodeBuffer();
buffer.printf("public static void main(String[] args) throws Exception {%n");
buffer.incrementIndent();
buffer.printf("System.out.println(\"%s\");%n", "hello world");
buffer.decrementIndent();
buffer.printf("}%n");
StringWriter stringWriter = new StringWriter();
buffer.write(stringWriter);
assertEquals(
stringWriter.toString(),
String.format("public static void main(String[] args) throws Exception {%n") +
String.format(" System.out.println(\"hello world\");%n") +
String.format("}%n"));
} |
@Override
public <T> void storeObject(
String accountName,
ObjectType objectType,
String objectKey,
T obj,
String filename,
boolean isAnUpdate) {
if (objectType.equals(ObjectType.CANARY_RESULT_ARCHIVE)) {
var draftRecord = new SqlCanaryArchive();
draftRecord.setId(objectKey);
draftRecord.setContent(mapToJson(obj, objectType));
draftRecord.setCreatedAt(Instant.now());
draftRecord.setUpdatedAt(Instant.now());
sqlCanaryArchiveRepo.save(draftRecord);
return;
}
if (objectType.equals(ObjectType.CANARY_CONFIG)) {
var draftRecord = new SqlCanaryConfig();
draftRecord.setId(objectKey);
draftRecord.setContent(mapToJson(obj, objectType));
draftRecord.setCreatedAt(Instant.now());
draftRecord.setUpdatedAt(Instant.now());
sqlCanaryConfigRepo.save(draftRecord);
return;
}
if (objectType.equals(ObjectType.METRIC_SET_PAIR_LIST)) {
var draftRecord = new SqlMetricSetPairs();
draftRecord.setId(objectKey);
draftRecord.setContent(mapToJson(obj, objectType));
draftRecord.setCreatedAt(Instant.now());
draftRecord.setUpdatedAt(Instant.now());
sqlMetricSetPairsRepo.save(draftRecord);
return;
}
if (objectType.equals(ObjectType.METRIC_SET_LIST)) {
var draftRecord = new SqlMetricSets();
draftRecord.setId(objectKey);
draftRecord.setContent(mapToJson(obj, objectType));
draftRecord.setCreatedAt(Instant.now());
draftRecord.setUpdatedAt(Instant.now());
sqlMetricSetsRepo.save(draftRecord);
return;
}
throw new IllegalArgumentException("Unsupported object type: " + objectType);
} | @Test
public void testStoreObjectWhenMetricSets() {
var testAccountName = UUID.randomUUID().toString();
var testObjectType = ObjectType.METRIC_SET_LIST;
var testObjectKey = UUID.randomUUID().toString();
var testMetricSet = createTestMetricSet();
sqlStorageService.storeObject(
testAccountName, testObjectType, testObjectKey, List.of(testMetricSet));
verify(sqlMetricSetsRepo).save(any(SqlMetricSets.class));
} |
@Override
public boolean shouldSample() {
long now = nanoClock.nanoTimeNow();
long period = now / periodLengthInNanos;
synchronized (this) {
if (period != currentSamplingPeriod) {
currentSamplingPeriod = period;
samplesInCurrentPeriod = 1;
return true;
}
if (samplesInCurrentPeriod >= maxSamplesPerPeriod) {
return false;
}
++samplesInCurrentPeriod;
return true;
}
} | @Test
void samples_exceeding_period_count_return_false() {
var clock = MockUtils.mockedClockReturning(1000L, 1100L, 1200L);
var sampler = new MaxSamplesPerPeriod(clock, 1000L, 2L);
assertTrue(sampler.shouldSample());
assertTrue(sampler.shouldSample());
assertFalse(sampler.shouldSample());
} |
@Override
protected boolean useXML( HttpServletRequest request ) {
// always XML
return true;
} | @Test
public void testUseXML() {
assertTrue( servlet.useXML(null) );
} |
public void validateAuth(final Long memberId) {
if (!this.id.equals(memberId)) {
throw new MemberAuthInvalidException();
}
} | @Test
void 멤버가_일치하는지_확인한다() {
// given
Member member = Member.builder()
.id(1L)
.build();
// when & then
assertDoesNotThrow(() -> member.validateAuth(member.getId()));
} |
static String describe(Throwable t) {
if (t == null) {
return null;
}
String typeDescription = t.getClass().getSimpleName();
String message = t.getMessage();
return typeDescription + (message != null ? ": '" + message + "'" : "");
} | @Test
void describingNullIsNull() {
assertThat(ExceptionUtils.describe(null), is(nullValue()));
} |
@Override
public Optional<Track<T>> clean(Track<T> track) {
TreeSet<Point<T>> points = new TreeSet<>(track.points());
Optional<Point<T>> firstNonNull = firstPointWithAltitude(points);
if (!firstNonNull.isPresent()) {
return Optional.empty();
}
SortedSet<Point<T>> pointsMissingAltitude = points.headSet(firstNonNull.get());
TreeSet<Point<T>> fixedPoints = extrapolateAltitudes(pointsMissingAltitude, firstNonNull.get());
pointsMissingAltitude.clear();
points.addAll(fixedPoints);
Optional<Point<T>> gapStart;
Optional<Point<T>> gapEnd = firstNonNull;
while (gapEnd.isPresent()) {
gapStart = firstPointWithoutAltitude(points.tailSet(gapEnd.get()));
if (!gapStart.isPresent()) {
break;
}
gapEnd = firstPointWithAltitude(points.tailSet(gapStart.get()));
if (!gapEnd.isPresent()) {
pointsMissingAltitude = points.tailSet(gapStart.get());
fixedPoints = extrapolateAltitudes(pointsMissingAltitude, points.lower(gapStart.get()));
pointsMissingAltitude.clear();
points.addAll(fixedPoints);
// extrapolateAltitudes(points.tailSet(gapStart.get()), points.lower(gapStart.get()));
} else {
pointsMissingAltitude = points.subSet(gapStart.get(), gapEnd.get());
fixedPoints = interpolateAltitudes(pointsMissingAltitude, points.lower(gapStart.get()), gapEnd.get());
pointsMissingAltitude.clear();
points.addAll(fixedPoints);
// interpolateAltitudes(points.subSet(gapStart.get(), gapEnd.get()), points.lower(gapStart.get()), gapEnd.get());
}
}
return Optional.of(Track.of(points));
} | @Test
public void testFillingInitialAltitudes() {
Track<NoRawData> testTrack = trackWithNoInitialAltitudes();
Track<NoRawData> cleanedTrack = (new FillMissingAltitudes<NoRawData>()).clean(testTrack).get();
ArrayList<Point<NoRawData>> points = new ArrayList<>(cleanedTrack.points());
assertTrue(
points.get(0).altitude().equals(points.get(1).altitude()) &&
points.get(1).altitude().equals(points.get(2).altitude()),
"The first points should have their altitudes filled"
);
} |
static DynamicState stateMachineStep(DynamicState dynamicState, StaticState staticState) throws Exception {
LOG.debug("STATE {}", dynamicState.state);
switch (dynamicState.state) {
case EMPTY:
return handleEmpty(dynamicState, staticState);
case RUNNING:
return handleRunning(dynamicState, staticState);
case WAITING_FOR_WORKER_START:
return handleWaitingForWorkerStart(dynamicState, staticState);
case KILL_BLOB_UPDATE:
return handleKillBlobUpdate(dynamicState, staticState);
case KILL_AND_RELAUNCH:
return handleKillAndRelaunch(dynamicState, staticState);
case KILL:
return handleKill(dynamicState, staticState);
case WAITING_FOR_BLOB_LOCALIZATION:
return handleWaitingForBlobLocalization(dynamicState, staticState);
case WAITING_FOR_BLOB_UPDATE:
return handleWaitingForBlobUpdate(dynamicState, staticState);
default:
throw new IllegalStateException("Code not ready to handle a state of " + dynamicState.state);
}
} | @Test
public void testRunWithProfileActions() throws Exception {
try (SimulatedTime ignored = new SimulatedTime(1010)) {
int port = 8080;
String cTopoId = "CURRENT";
List<ExecutorInfo> cExecList = mkExecutorInfoList(1, 2, 3, 4, 5);
LocalAssignment cAssignment =
mkLocalAssignment(cTopoId, cExecList, mkWorkerResources(100.0, 100.0, 100.0));
Container cContainer = mock(Container.class);
LSWorkerHeartbeat chb = mkWorkerHB(cTopoId, port, cExecList, Time.currentTimeSecs() + 100); //NOT going to timeout for a while
when(cContainer.readHeartbeat()).thenReturn(chb, chb, chb, chb, chb, chb);
when(cContainer.runProfiling(any(ProfileRequest.class), anyBoolean())).thenReturn(true);
AsyncLocalizer localizer = mock(AsyncLocalizer.class);
BlobChangingCallback cb = mock(BlobChangingCallback.class);
ContainerLauncher containerLauncher = mock(ContainerLauncher.class);
ISupervisor iSuper = mock(ISupervisor.class);
LocalState state = mock(LocalState.class);
StaticState staticState = new StaticState(localizer, 5000, 120000, 1000, 1000,
containerLauncher, "localhost", port, iSuper, state, cb, null, null, new SlotMetrics(new StormMetricsRegistry()));
Set<TopoProfileAction> profileActions = new HashSet<>();
ProfileRequest request = new ProfileRequest();
request.set_action(ProfileAction.JPROFILE_STOP);
NodeInfo info = new NodeInfo();
info.set_node("localhost");
info.add_to_port(port);
request.set_nodeInfo(info);
request.set_time_stamp(Time.currentTimeMillis() + 3000);//3 seconds from now
TopoProfileAction profile = new TopoProfileAction(cTopoId, request);
profileActions.add(profile);
Set<TopoProfileAction> expectedPending = new HashSet<>();
expectedPending.add(profile);
SlotMetrics slotMetrics = new SlotMetrics(new StormMetricsRegistry());
DynamicState dynamicState = new DynamicState(cAssignment, cContainer, cAssignment, slotMetrics)
.withProfileActions(profileActions, Collections.emptySet());
DynamicState nextState = Slot.stateMachineStep(dynamicState, staticState);
assertEquals(MachineState.RUNNING, nextState.state);
verify(cContainer).runProfiling(request, false);
assertEquals(expectedPending, nextState.pendingStopProfileActions);
assertEquals(expectedPending, nextState.profileActions);
assertTrue(Time.currentTimeMillis() > 1000);
nextState = Slot.stateMachineStep(nextState, staticState);
assertEquals(MachineState.RUNNING, nextState.state);
assertEquals(expectedPending, nextState.pendingStopProfileActions);
assertEquals(expectedPending, nextState.profileActions);
assertTrue(Time.currentTimeMillis() > 2000);
nextState = Slot.stateMachineStep(nextState, staticState);
assertEquals(MachineState.RUNNING, nextState.state);
assertEquals(expectedPending, nextState.pendingStopProfileActions);
assertEquals(expectedPending, nextState.profileActions);
assertTrue(Time.currentTimeMillis() > 3000);
nextState = Slot.stateMachineStep(nextState, staticState);
assertEquals(MachineState.RUNNING, nextState.state);
verify(cContainer).runProfiling(request, true);
assertEquals(Collections.<TopoProfileAction>emptySet(), nextState.pendingStopProfileActions);
assertEquals(Collections.<TopoProfileAction>emptySet(), nextState.profileActions);
assertTrue(Time.currentTimeMillis() > 4000);
nextState = Slot.stateMachineStep(nextState, staticState);
assertEquals(MachineState.RUNNING, nextState.state);
assertEquals(Collections.<TopoProfileAction>emptySet(), nextState.pendingStopProfileActions);
assertEquals(Collections.<TopoProfileAction>emptySet(), nextState.profileActions);
assertTrue(Time.currentTimeMillis() > 5000);
}
} |
public static List<UniqueConstraint> parse(String defaultCatalogName, String defaultDbName,
String defaultTableName, String constraintDescs) {
if (Strings.isNullOrEmpty(constraintDescs)) {
return null;
}
String[] constraintArray = constraintDescs.split(";");
List<UniqueConstraint> uniqueConstraints = Lists.newArrayList();
for (String constraintDesc : constraintArray) {
if (Strings.isNullOrEmpty(constraintDesc)) {
continue;
}
Pair<TableName, List<String>> descResult = parseUniqueConstraintDesc(
defaultCatalogName, defaultDbName, defaultTableName, constraintDesc);
uniqueConstraints.add(new UniqueConstraint(descResult.first.getCatalog(),
descResult.first.getDb(), descResult.first.getTbl(),
descResult.second.stream().map(ColumnId::create).collect(Collectors.toList())));
}
return uniqueConstraints;
} | @Test
public void testParseException() {
String constraintDescs = "hive_catalog.db1.table1.col1, col2, hive_catalog.db1.table2.col3";
exception.expect(SemanticException.class);
exception.expectMessage("unique constraint column should be in same table");
UniqueConstraint.parse(null, null, null, constraintDescs);
} |
@Override
public void init(TbContext ctx, TbNodeConfiguration configuration) throws TbNodeException {
this.config = TbNodeUtils.convert(configuration, TbJsonPathNodeConfiguration.class);
this.jsonPathValue = config.getJsonPath();
if (!TbJsonPathNodeConfiguration.DEFAULT_JSON_PATH.equals(this.jsonPathValue)) {
this.configurationJsonPath = Configuration.builder()
.jsonProvider(new JacksonJsonNodeJsonProvider())
.build();
this.jsonPath = JsonPath.compile(config.getJsonPath());
}
} | @Test
void givenJsonArrayWithFilter_whenOnMsg_thenVerifyOutput() throws Exception {
config.setJsonPath("$.Attribute_2[?(@.voltage > 200)]");
nodeConfiguration = new TbNodeConfiguration(JacksonUtil.valueToTree(config));
node.init(ctx, nodeConfiguration);
String data = "{\"Attribute_1\":22.5,\"Attribute_2\":[{\"voltage\":220}, {\"voltage\":250}, {\"voltage\":110}]}";
VerifyOutputMsg(data, 1, JacksonUtil.toJsonNode("[{\"voltage\":220}, {\"voltage\":250}]"));
} |
@Override
public Mono<Void> execute(final ServerWebExchange exchange, final ShenyuPluginChain chain) {
ServerHttpRequest request = exchange.getRequest();
ShenyuContext shenyuContext = exchange.getAttribute(Constants.CONTEXT);
if (Objects.nonNull(shenyuContext)) {
MediaType mediaType = request.getHeaders().getContentType();
if (MediaType.APPLICATION_JSON.isCompatibleWith(mediaType)) {
return body(exchange, request, chain);
}
if (MediaType.APPLICATION_FORM_URLENCODED.isCompatibleWith(mediaType)) {
return formData(exchange, request, chain);
}
return query(exchange, request, chain);
}
return chain.execute(exchange);
} | @Test
public void testNoBody() {
ServerWebExchange exchange = MockServerWebExchange.from(
MockServerHttpRequest.post("localhost"));
Mockito.when(chain.execute(exchange)).thenReturn(Mono.empty());
ShenyuContext context = new ShenyuContext();
context.setRpcType(RpcTypeEnum.DUBBO.getName());
exchange.getAttributes().put(Constants.CONTEXT, context);
Mono<Void> result = rpcParamTransformPlugin.execute(exchange, chain);
StepVerifier.create(result).expectSubscription().verifyComplete();
} |
@Override
public InputStream getAsciiStream(final int columnIndex) throws SQLException {
return mergeResultSet.getInputStream(columnIndex, ASCII);
} | @Test
void assertGetAsciiStreamWithColumnLabel() throws SQLException {
InputStream inputStream = mock(InputStream.class);
when(mergeResultSet.getInputStream(1, "Ascii")).thenReturn(inputStream);
assertThat(shardingSphereResultSet.getAsciiStream("label"), instanceOf(InputStream.class));
} |
void runOnce() {
if (transactionManager != null) {
try {
transactionManager.maybeResolveSequences();
RuntimeException lastError = transactionManager.lastError();
// do not continue sending if the transaction manager is in a failed state
if (transactionManager.hasFatalError()) {
if (lastError != null)
maybeAbortBatches(lastError);
client.poll(retryBackoffMs, time.milliseconds());
return;
}
if (transactionManager.hasAbortableError() && shouldHandleAuthorizationError(lastError)) {
return;
}
// Check whether we need a new producerId. If so, we will enqueue an InitProducerId
// request which will be sent below
transactionManager.bumpIdempotentEpochAndResetIdIfNeeded();
if (maybeSendAndPollTransactionalRequest()) {
return;
}
} catch (AuthenticationException e) {
// This is already logged as error, but propagated here to perform any clean ups.
log.trace("Authentication exception while processing transactional request", e);
transactionManager.authenticationFailed(e);
}
}
long currentTimeMs = time.milliseconds();
long pollTimeout = sendProducerData(currentTimeMs);
client.poll(pollTimeout, currentTimeMs);
} | @Test
public void testUnresolvedSequencesAreNotFatal() throws Exception {
ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0);
apiVersions.update("0", NodeApiVersions.create(ApiKeys.INIT_PRODUCER_ID.id, (short) 0, (short) 3));
TransactionManager txnManager = new TransactionManager(logContext, "testUnresolvedSeq", 60000, 100, apiVersions);
setupWithTransactionState(txnManager);
doInitTransactions(txnManager, producerIdAndEpoch);
txnManager.beginTransaction();
txnManager.maybeAddPartition(tp0);
client.prepareResponse(buildAddPartitionsToTxnResponseData(0, Collections.singletonMap(tp0, Errors.NONE)));
sender.runOnce();
// Send first ProduceRequest
Future<RecordMetadata> request1 = appendToAccumulator(tp0);
sender.runOnce(); // send request
time.sleep(1000L);
appendToAccumulator(tp0);
sender.runOnce(); // send request
assertEquals(2, client.inFlightRequestCount());
sendIdempotentProducerResponse(0, tp0, Errors.NOT_LEADER_OR_FOLLOWER, -1);
sender.runOnce(); // receive first response
Node node = metadata.fetch().nodes().get(0);
time.sleep(1000L);
client.disconnect(node.idString());
client.backoff(node, 10);
sender.runOnce(); // now expire the first batch.
assertFutureFailure(request1, TimeoutException.class);
assertTrue(txnManager.hasUnresolvedSequence(tp0));
// Loop once and confirm that the transaction manager does not enter a fatal error state
sender.runOnce();
assertTrue(txnManager.hasAbortableError());
} |
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
if(directory.isRoot()) {
final AttributedList<Path> list = new AttributedList<>();
for(RootFolder root : session.roots()) {
switch(root.getRootFolderType()) {
case 0: // My Files
case 1: // Common
list.add(new Path(directory, PathNormalizer.name(root.getName()), EnumSet.of(Path.Type.directory, Path.Type.volume),
attributes.toAttributes(root)));
break;
}
listener.chunk(directory, list);
}
return list;
}
else {
try {
final AttributedList<Path> children = new AttributedList<>();
int pageIndex = 0;
int fileCount = 0;
FileContents files;
do {
files = new FilesApi(this.session.getClient()).filesGetById(URIEncoder.encode(fileid.getFileId(directory)),
pageIndex,
chunksize,
"Name asc",
0, // All
true,
false,
false
);
for(File f : files.getFiles()) {
final PathAttributes attrs = attributes.toAttributes(f);
final EnumSet<Path.Type> type = (f.getFlags() & 1) == 1 ?
EnumSet.of(Path.Type.directory) :
EnumSet.of(Path.Type.file);
children.add(new Path(directory, f.getName(), type, attrs));
}
pageIndex++;
fileCount += files.getFiles().size();
listener.chunk(directory, children);
}
while(fileCount < files.getTotalRowCount());
return children;
}
catch(ApiException e) {
throw new StoregateExceptionMappingService(fileid).map("Listing directory {0} failed", e, directory);
}
}
} | @Test
public void testListDefaultPath() throws Exception {
final StoregateIdProvider nodeid = new StoregateIdProvider(session);
final Set<String> common = new StoregateListService(session, nodeid).list(
new Path("/common", EnumSet.of(AbstractPath.Type.directory, Path.Type.volume)), new DisabledListProgressListener()).toStream().map(Path::getName).collect(Collectors.toSet());
assertEquals(common, new StoregateListService(session, nodeid).list(
new Path("/Common", EnumSet.of(AbstractPath.Type.directory, Path.Type.volume)), new DisabledListProgressListener()).toStream().map(Path::getName).collect(Collectors.toSet()));
assertEquals(common, new StoregateListService(session, nodeid).list(
new Path("/Common files", EnumSet.of(AbstractPath.Type.directory, Path.Type.volume)), new DisabledListProgressListener()).toStream().map(Path::getName).collect(Collectors.toSet()));
final Set<String> home = new StoregateListService(session, nodeid).list(
new Path("/mduck", EnumSet.of(AbstractPath.Type.directory, Path.Type.volume)), new DisabledListProgressListener()).toStream().map(Path::getName).collect(Collectors.toSet());
assertEquals(home, new StoregateListService(session, nodeid).list(
new Path("/Home/mduck", EnumSet.of(AbstractPath.Type.directory, Path.Type.volume)), new DisabledListProgressListener()).toStream().map(Path::getName).collect(Collectors.toSet()));
} |
public static FieldScope fromSetFields(Message message) {
return fromSetFields(
message, AnyUtils.defaultTypeRegistry(), AnyUtils.defaultExtensionRegistry());
} | @Test
public void testFromSetFields_iterables_errorIfDescriptorMismatchesSubject() {
// Don't run this test twice.
if (!testIsRunOnce()) {
return;
}
Message message =
TestMessage2.newBuilder().setOInt(1).addRString("foo").addRString("bar").build();
Message eqMessage =
TestMessage2.newBuilder().setOInt(1).addRString("foo").addRString("bar").build();
try {
assertThat(message)
.withPartialScope(
FieldScopes.fromSetFields(
TestMessage3.newBuilder().setOInt(2).build(),
TestMessage3.newBuilder().addRString("foo").build()))
.isEqualTo(eqMessage);
fail("Expected failure.");
} catch (RuntimeException expected) {
expect
.that(expected)
.hasMessageThat()
.contains(
"Message given to FieldScopes.fromSetFields() "
+ "does not have the same descriptor as the message being tested");
expect.that(expected).hasMessageThat().contains(TestMessage2.getDescriptor().getFullName());
expect.that(expected).hasMessageThat().contains(TestMessage3.getDescriptor().getFullName());
}
} |
@Override
public synchronized void restartConnectorAndTasks(RestartRequest request, Callback<ConnectorStateInfo> cb) {
// Ensure the connector exists
String connectorName = request.connectorName();
if (!configState.contains(connectorName)) {
cb.onCompletion(new NotFoundException("Unknown connector: " + connectorName, null), null);
return;
}
Optional<RestartPlan> maybePlan = buildRestartPlan(request);
if (!maybePlan.isPresent()) {
cb.onCompletion(new NotFoundException("Status for connector " + connectorName + " not found", null), null);
return;
}
RestartPlan plan = maybePlan.get();
// If requested, stop the connector and any tasks, marking each as restarting
log.info("Received {}", plan);
if (plan.shouldRestartConnector()) {
worker.stopAndAwaitConnector(connectorName);
onRestart(connectorName);
}
if (plan.shouldRestartTasks()) {
// Stop the tasks and mark as restarting
worker.stopAndAwaitTasks(plan.taskIdsToRestart());
plan.taskIdsToRestart().forEach(this::onRestart);
}
// Now restart the connector and tasks
if (plan.shouldRestartConnector()) {
log.debug("Restarting connector '{}'", connectorName);
startConnector(connectorName, (error, targetState) -> {
if (error == null) {
log.info("Connector '{}' restart successful", connectorName);
} else {
log.error("Connector '{}' restart failed", connectorName, error);
}
});
}
if (plan.shouldRestartTasks()) {
log.debug("Restarting {} of {} tasks for {}", plan.restartTaskCount(), plan.totalTaskCount(), request);
createConnectorTasks(connectorName, plan.taskIdsToRestart());
log.debug("Restarted {} of {} tasks for {} as requested", plan.restartTaskCount(), plan.totalTaskCount(), request);
}
// Complete the restart request
log.info("Completed {}", plan);
cb.onCompletion(null, plan.restartConnectorStateInfo());
} | @Test
public void testRestartConnectorAndTasksUnknownConnector() {
initialize(false);
FutureCallback<ConnectorStateInfo> restartCallback = new FutureCallback<>();
RestartRequest restartRequest = new RestartRequest("UnknownConnector", false, true);
herder.restartConnectorAndTasks(restartRequest, restartCallback);
ExecutionException ee = assertThrows(ExecutionException.class, () -> restartCallback.get(WAIT_TIME_MS, TimeUnit.MILLISECONDS));
assertInstanceOf(NotFoundException.class, ee.getCause());
} |
public static int[] getPrecisionAndScale(String typeStr) {
Matcher matcher = Pattern.compile(DECIMAL_PATTERN).matcher(typeStr.toLowerCase(Locale.ROOT));
if (matcher.find()) {
return new int[] {Integer.parseInt(matcher.group(1)), Integer.parseInt(matcher.group(2))};
}
throw new StarRocksConnectorException("Failed to get precision and scale at " + typeStr);
} | @Test
public void testDecimalString() {
String t1 = "decimal(3,2)";
int[] res = getPrecisionAndScale(t1);
Assert.assertEquals(3, res[0]);
Assert.assertEquals(2, res[1]);
t1 = "decimal(222233,4442)";
res = getPrecisionAndScale(t1);
Assert.assertEquals(222233, res[0]);
Assert.assertEquals(4442, res[1]);
t1 = "decimal(3, 2)";
res = getPrecisionAndScale(t1);
Assert.assertEquals(3, res[0]);
Assert.assertEquals(2, res[1]);
try {
t1 = "decimal(3.222,2)";
getPrecisionAndScale(t1);
Assert.fail();
} catch (StarRocksConnectorException e) {
Assert.assertTrue(e.getMessage().contains("Failed to get"));
}
try {
t1 = "decimal(a,2)";
getPrecisionAndScale(t1);
Assert.fail();
} catch (StarRocksConnectorException e) {
Assert.assertTrue(e.getMessage().contains("Failed to get"));
}
try {
t1 = "decimal(-1,2)";
getPrecisionAndScale(t1);
Assert.fail();
} catch (StarRocksConnectorException e) {
Assert.assertTrue(e.getMessage().contains("Failed to get"));
}
try {
t1 = "decimal()";
getPrecisionAndScale(t1);
Assert.fail();
} catch (StarRocksConnectorException e) {
Assert.assertTrue(e.getMessage().contains("Failed to get"));
}
try {
t1 = "decimal(1)";
getPrecisionAndScale(t1);
Assert.fail();
} catch (StarRocksConnectorException e) {
Assert.assertTrue(e.getMessage().contains("Failed to get"));
}
} |
@Override
public String getSystemVersion() {
return original.getSystemVersion();
} | @Test
public void getSystemVersion() {
assertEquals(pluginManager.getSystemVersion(), wrappedPluginManager.getSystemVersion());
} |
public static byte[] signMessage(RawTransaction rawTransaction, Credentials credentials) {
byte[] encodedTransaction;
if (rawTransaction.getTransaction().getType().isEip4844()) {
encodedTransaction = encode4844(rawTransaction);
} else {
encodedTransaction = encode(rawTransaction);
}
Sign.SignatureData signatureData =
Sign.signMessage(encodedTransaction, credentials.getEcKeyPair());
return encode(rawTransaction, signatureData);
} | @Test
public void testEip1559Transaction() {
assertArrayEquals(
TransactionEncoder.signMessage(
createEip1559RawTransaction(), SampleKeys.CREDENTIALS_ETH_EXAMPLE),
(Numeric.hexStringToByteArray(
"02f8698206178082162e8310c8e082753094627306090abab3a6e1400e9345bc60c78a8bef577b80c001a0d1f9ee3bdde4d4e0792c7089b84059fb28e17f494556d8a775450b1dd6c318a1a038bd3e2fb9e018528e0a41f57c7a32a8d23b2693e0451aa6ef4519b234466e7f")));
assertArrayEquals(
TransactionEncoder.signMessage(
createEip1559RawTransaction(), 1559L, SampleKeys.CREDENTIALS_ETH_EXAMPLE),
(Numeric.hexStringToByteArray(
"02f8698206178082162e8310c8e082753094627306090abab3a6e1400e9345bc60c78a8bef577b80c001a0d1f9ee3bdde4d4e0792c7089b84059fb28e17f494556d8a775450b1dd6c318a1a038bd3e2fb9e018528e0a41f57c7a32a8d23b2693e0451aa6ef4519b234466e7f")));
} |
public static String executeDockerCommand(DockerCommand dockerCommand,
String containerId, Map<String, String> env,
PrivilegedOperationExecutor privilegedOperationExecutor,
boolean disableFailureLogging, Context nmContext)
throws ContainerExecutionException {
PrivilegedOperation dockerOp = dockerCommand.preparePrivilegedOperation(
dockerCommand, containerId, env, nmContext);
if (disableFailureLogging) {
dockerOp.disableFailureLogging();
}
LOG.debug("Running docker command: {}", dockerCommand);
try {
String result = privilegedOperationExecutor
.executePrivilegedOperation(null, dockerOp, null,
env, true, false);
if (result != null && !result.isEmpty()) {
result = result.trim();
}
return result;
} catch (PrivilegedOperationException e) {
throw new ContainerExecutionException("Docker operation failed",
e.getExitCode(), e.getOutput(), e.getErrorOutput());
}
} | @Test
public void testExecuteDockerLoad() throws Exception {
DockerLoadCommand dockerCommand =
new DockerLoadCommand(MOCK_LOCAL_IMAGE_NAME);
DockerCommandExecutor.executeDockerCommand(dockerCommand, MOCK_CONTAINER_ID,
env, mockExecutor, false, nmContext);
List<PrivilegedOperation> ops = MockPrivilegedOperationCaptor
.capturePrivilegedOperations(mockExecutor, 1, true);
List<String> dockerCommands = getValidatedDockerCommands(ops);
assertEquals(1, ops.size());
assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(),
ops.get(0).getOperationType().name());
assertEquals(3, dockerCommands.size());
assertEquals("[docker-command-execution]", dockerCommands.get(0));
assertEquals(" docker-command=load", dockerCommands.get(1));
assertEquals(" image=" + MOCK_LOCAL_IMAGE_NAME, dockerCommands.get(2));
} |
@Override
public InetAddress address(String inetHost, ResolvedAddressTypes resolvedAddressTypes) {
return firstAddress(addresses(inetHost, resolvedAddressTypes));
} | @Test
public void shouldRefreshHostsFileContentAfterRefreshInterval() throws Exception {
Map<String, List<InetAddress>> v4Addresses = Maps.newHashMap(LOCALHOST_V4_ADDRESSES);
Map<String, List<InetAddress>> v6Addresses = Maps.newHashMap(LOCALHOST_V6_ADDRESSES);
DefaultHostsFileEntriesResolver resolver =
new DefaultHostsFileEntriesResolver(givenHostsParserWith(v4Addresses, v6Addresses), /*nanos*/1);
String newHost = UUID.randomUUID().toString();
InetAddress address = resolver.address(newHost, ResolvedAddressTypes.IPV6_ONLY);
assertNull(address);
/*let refreshIntervalNanos = 1 elapse*/
Thread.sleep(1);
v4Addresses.put(newHost, Collections.<InetAddress>singletonList(NetUtil.LOCALHOST4));
v6Addresses.put(newHost, Collections.<InetAddress>singletonList(NetUtil.LOCALHOST6));
assertEquals(NetUtil.LOCALHOST4, resolver.address(newHost, ResolvedAddressTypes.IPV4_ONLY));
assertEquals(NetUtil.LOCALHOST6, resolver.address(newHost, ResolvedAddressTypes.IPV6_ONLY));
} |
public Map<String, Map<String, String>> parse() throws UserException {
while (nextToRead < lines.size()) {
Map.Entry<String, String> keyValue = parseOneItem();
if (keyValue == null) {
// Parse over!
continue;
}
switch (state) {
case START:
if (headLevel == 1) {
head = keyValue.getKey();
keyValues = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER);
state = ParseState.PARSED_H1;
} else {
// State error
throw new UserException("Head first read is not h1.");
}
break;
case PARSED_H1:
if (headLevel == 1) {
// Empty document, step over, do nothing
documents.put(head, keyValues);
head = keyValue.getKey();
keyValues = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER);
} else if (headLevel == 2) {
keyValues.put(keyValue.getKey(), keyValue.getValue());
state = ParseState.PARSED_H2;
} else {
throw new UserException("Unknown head level.");
}
break;
case PARSED_H2:
if (headLevel == 1) {
// One document read over.
documents.put(head, keyValues);
head = keyValue.getKey();
keyValues = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER);
} else if (headLevel == 2) {
keyValues.put(keyValue.getKey(), keyValue.getValue());
} else {
//Ignore headlevel greater than 2 instead of throwing a exception
//State error
//throw new UserException("Unknown head level when parsing head level(2)");
}
break;
default:
// State error
throw new UserException("Unknown parse state.");
}
}
if (head != null) {
documents.put(head, keyValues);
}
return documents;
} | @Test
public void testMultiHeadLevel() throws UserException {
List<String> lines = Lists.newArrayList();
lines.add("# SHOW TABLES");
lines.add("## name");
lines.add(" SHOW TABLES");
lines.add("## description");
lines.add("###Syntax");
lines.add("SYNTAX:\n\tSHOW TABLES [FROM] database");
lines.add("####Parameter");
lines.add(">table_name");
lines.add("## example");
lines.add("show tables;");
lines.add("### Exam1");
lines.add("exam1");
lines.add("## keyword");
lines.add("SHOW, TABLES");
lines.add("## url");
lines.add("http://www.baidu.com");
MarkDownParser parser = new MarkDownParser(lines);
Map<String, Map<String, String>> map = parser.parse();
Assert.assertNotNull(map.get("SHOW TABLES"));
Assert.assertEquals(" SHOW TABLES\n", map.get("SHOW TABLES").get("name"));
Assert.assertEquals("Syntax\nSYNTAX:\n\tSHOW TABLES [FROM] database\nParameter\n>table_name\n",
map.get("SHOW TABLES").get("description"));
Assert.assertEquals("show tables;\n Exam1\nexam1\n", map.get("SHOW TABLES").get("example"));
Assert.assertEquals("SHOW, TABLES\n", map.get("SHOW TABLES").get("keyword"));
Assert.assertEquals("http://www.baidu.com\n", map.get("SHOW TABLES").get("url"));
} |
static String encodeTokenValue(String value) throws URISyntaxException {
return URISupport.createQueryString(Collections.singletonMap("x", value)).substring(2)
.replace("+", "%2B") // sig is base64
.replace("%3A", ":"); // se has time separator
} | @Test
void encodeTokenValueShouldEncodeBase64PlusSlashAndPadding() throws Exception {
// e.g. for the sig base64 param on SAS token the encoding style must encode '+', '/', '='
assertEquals("%2B%2Fa%3D", FilesURIStrings.encodeTokenValue("+/a="));
} |
public int[] findMatchingLines(List<String> left, List<String> right) {
int[] index = new int[right.size()];
int dbLine = left.size();
int reportLine = right.size();
try {
PathNode node = new MyersDiff<String>().buildPath(left, right);
while (node.prev != null) {
PathNode prevNode = node.prev;
if (!node.isSnake()) {
// additions
reportLine -= (node.j - prevNode.j);
// removals
dbLine -= (node.i - prevNode.i);
} else {
// matches
for (int i = node.i; i > prevNode.i; i--) {
index[reportLine - 1] = dbLine;
reportLine--;
dbLine--;
}
}
node = prevNode;
}
} catch (DifferentiationFailedException e) {
LOG.error("Error finding matching lines", e);
return index;
}
return index;
} | @Test
public void shouldIgnoreDeletedLinesAtTheStartOfTheFile() {
List<String> database = new ArrayList<>();
database.add("line - 0");
database.add("line - 1");
database.add("line - 2");
database.add("line - 3");
List<String> report = new ArrayList<>();
report.add("line - 2");
report.add("line - 3");
int[] diff = new SourceLinesDiffFinder().findMatchingLines(database, report);
assertThat(diff).containsExactly(3, 4);
} |
@Override
public void acknowledge(List<? extends Acknowledgeable> messages) {
@SuppressWarnings("ConstantConditions")
final Optional<Long> max =
messages.stream()
.map(Acknowledgeable::getMessageQueueId)
.filter(this::isValidMessageQueueId)
.map(Long.class::cast)
.max(Long::compare);
max.ifPresent(this::doAcknowledge);
metrics.acknowledgedMessages().mark(messages.size());
} | @Test
void acknowledgeMessages(MessageFactory messageFactory) {
final Message firstMessage = messageFactory.createMessage("message", "source", DateTime.now(UTC));
firstMessage.setMessageQueueId(1L);
final Message nullOffsetMessage = messageFactory.createMessage("message", "source", DateTime.now(UTC));
final Message secondMessage = messageFactory.createMessage("message", "source", DateTime.now(UTC));
secondMessage.setMessageQueueId(2L);
final Message wrongOffsetTypeMessage = messageFactory.createMessage("message", "source", DateTime.now(UTC));
wrongOffsetTypeMessage.setMessageQueueId("foo");
acknowledger.acknowledge(ImmutableList.of(firstMessage, nullOffsetMessage, secondMessage, wrongOffsetTypeMessage));
verify(kafkaJournal).markJournalOffsetCommitted(2L);
} |
public static String toString(JobId jid) {
return jid.toString();
} | @Test
@Timeout(120000)
public void testTaskIDtoString() {
TaskId tid = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskId.class);
tid.setJobId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class));
tid.getJobId().setAppId(ApplicationId.newInstance(0, 0));
tid.setTaskType(TaskType.MAP);
TaskType type = tid.getTaskType();
System.err.println(type);
type = TaskType.REDUCE;
System.err.println(type);
System.err.println(tid.getTaskType());
assertEquals("task_0_0000_m_000000", MRApps.toString(tid));
tid.setTaskType(TaskType.REDUCE);
assertEquals("task_0_0000_r_000000", MRApps.toString(tid));
} |
@Override
public ParSeqBasedCompletionStage<Void> runAfterBoth(CompletionStage<?> other, Runnable action)
{
Task<?> that = getOrGenerateTaskFromStage(other);
return nextStageByComposingTask(Task.par(_task, that).flatMap("runAfterBoth", t -> Task.action(action::run)));
} | @Test public void testRunAfterBoth() throws Exception {
CompletionStage<String> completionStage1 = createTestStage(TESTVALUE1);
CompletionStage<String> completionStage2 = createTestStage(TESTVALUE2);
Runnable runnable = mock(Runnable.class);
finish(completionStage1.runAfterBoth(completionStage2, runnable));
verify(runnable, times(1)).run();
} |
public static void rethrowIOException(Throwable cause)
throws IOException {
if (cause instanceof IOException) {
throw (IOException) cause;
} else if (cause instanceof RuntimeException) {
throw (RuntimeException) cause;
} else if (cause instanceof Error) {
throw (Error) cause;
} else {
throw new IOException(cause.getMessage(), cause);
}
} | @Test
public void testRethrowOtherExceptionAsIOException() throws IOException {
Exception e = new Exception("test");
try {
rethrowIOException(e);
fail("Should rethrow IOException");
} catch (IOException ioe) {
assertEquals("test", ioe.getMessage());
assertSame(e, ioe.getCause());
}
} |
public List<Modification> parse(String svnLogOutput, String path, SAXBuilder builder) {
try {
Document document = builder.build(new StringReader(svnLogOutput));
return parseDOMTree(document, path);
} catch (Exception e) {
throw bomb("Unable to parse svn log output: " + svnLogOutput, e);
}
} | @Test
public void shouldParseLogWithEmptyRevision() {
SvnLogXmlParser parser = new SvnLogXmlParser();
List<Modification> materialRevisions = parser.parse("""
<?xml version="1.0"?>
<log>
<logentry
revision="2">
</logentry>
<logentry
revision="3">
<author>cceuser</author>
<date>2008-03-11T07:52:41.162075Z</date>
<paths>
<path
action="A">/trunk/revision3.txt</path>
</paths>
</logentry>
</log>""", "", new SAXBuilder());
assertThat(materialRevisions.size()).isEqualTo(1);
Modification mod = materialRevisions.get(0);
assertThat(mod.getRevision()).isEqualTo("3");
assertThat(mod.getComment()).isNull();
} |
@Override
public boolean authenticate(final ShardingSphereUser user, final Object[] authInfo) {
String password = (String) authInfo[0];
return Strings.isNullOrEmpty(user.getPassword()) || user.getPassword().equals(password);
} | @Test
void assertAuthenticateFailed() {
assertFalse(new PostgreSQLPasswordAuthenticator().authenticate(new ShardingSphereUser("root", "password", ""), new Object[]{"wrong", null}));
} |
public void cleanupOrphanedInternalTopics(
final ServiceContext serviceContext,
final Set<String> queryApplicationIds
) {
final KafkaTopicClient topicClient = serviceContext.getTopicClient();
final Set<String> topicNames;
try {
topicNames = topicClient.listTopicNames();
} catch (KafkaResponseGetFailedException e) {
LOG.error("Couldn't fetch topic names", e);
return;
}
// Find any transient query topics
final Set<String> orphanedQueryApplicationIds = topicNames.stream()
.map(topicName -> queryApplicationIds.stream().filter(topicName::startsWith).findFirst())
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.toSet());
for (final String queryApplicationId : orphanedQueryApplicationIds) {
cleanupService.addCleanupTask(
new QueryCleanupService.QueryCleanupTask(
serviceContext,
queryApplicationId,
Optional.empty(),
true,
ksqlConfig.getKsqlStreamConfigProps()
.getOrDefault(
StreamsConfig.STATE_DIR_CONFIG,
StreamsConfig.configDef().defaultValues().get(StreamsConfig.STATE_DIR_CONFIG))
.toString(),
ksqlConfig.getString(KsqlConfig.KSQL_SERVICE_ID_CONFIG),
ksqlConfig.getString(KsqlConfig.KSQL_PERSISTENT_QUERY_NAME_PREFIX_CONFIG)));
}
} | @Test
public void shouldCleanup_allApplicationIds() {
// Given
when(topicClient.listTopicNames()).thenReturn(ImmutableSet.of(TOPIC1, TOPIC2, TOPIC3));
// When
cleaner.cleanupOrphanedInternalTopics(serviceContext, ImmutableSet.of(APP_ID_1, APP_ID_2));
// Then
verify(queryCleanupService, times(2)).addCleanupTask(taskCaptor.capture());
assertThat(taskCaptor.getAllValues().get(0).getAppId(), is(APP_ID_1));
assertThat(taskCaptor.getAllValues().get(1).getAppId(), is(APP_ID_2));
} |
@Override
public void indexOnStartup(Set<IndexType> uninitializedIndexTypes) {
// TODO do not load everything in memory. Db rows should be scrolled.
List<IndexPermissions> authorizations = getAllAuthorizations();
Stream<AuthorizationScope> scopes = getScopes(uninitializedIndexTypes);
index(authorizations, scopes, Size.LARGE);
} | @Test
public void indexOnStartup_grants_access_on_many_projects() {
UserDto user1 = db.users().insertUser();
UserDto user2 = db.users().insertUser();
ProjectDto project = null;
for (int i = 0; i < 10; i++) {
project = createAndIndexPrivateProject();
db.users().insertProjectPermissionOnUser(user1, USER, project);
}
indexOnStartup();
verifyAnyoneNotAuthorized(project);
verifyAuthorized(project, user1);
verifyNotAuthorized(project, user2);
} |
@Override
public List<Integer> embed(String s, Context context) {
var start = System.nanoTime();
var tokens = tokenizer.embed(s, context);
runtime.sampleSequenceLength(tokens.size(), context);
runtime.sampleEmbeddingLatency((System.nanoTime() - start)/1_000_000d, context);
return tokens;
} | @Test
public void testThatWrongTensorTypeThrows() {
var context = new Embedder.Context("schema.indexing");
String input = "This is a test";
assertThrows(IllegalArgumentException.class, () -> {
// throws because the target tensor type is mapped
embedder.embed(input, context, TensorType.fromSpec(("tensor<float>(x{})")));
});
assertThrows(IllegalArgumentException.class, () -> {
// throws because the target tensor is 0d
embedder.embed(input, context, TensorType.fromSpec(("tensor<float>(x[0]")));
});
assertThrows(IllegalArgumentException.class, () -> {
// throws because the target tensor is 2d
embedder.embed(input, context, TensorType.fromSpec(("tensor<float>(x{}, y[2])")));
});
} |
@Override
public LocalMapStats getLocalMapStats() {
return map.getLocalMapStats();
} | @Test
public void testGetLocalMapStats() {
assertNotNull(adapter.getLocalMapStats());
assertEquals(0, adapter.getLocalMapStats().getOwnedEntryCount());
adapter.put(23, "value-23");
assertEquals(1, adapter.getLocalMapStats().getOwnedEntryCount());
} |
@Override
public boolean assign(final Map<ProcessId, ClientState> clients,
final Set<TaskId> allTaskIds,
final Set<TaskId> statefulTaskIds,
final AssignmentConfigs configs) {
final int numStandbyReplicas = configs.numStandbyReplicas();
final Set<String> rackAwareAssignmentTags = new HashSet<>(tagsFunction.apply(configs));
final Map<TaskId, Integer> tasksToRemainingStandbys = computeTasksToRemainingStandbys(
numStandbyReplicas,
statefulTaskIds
);
final Map<String, Set<String>> tagKeyToValues = new HashMap<>();
final Map<TagEntry, Set<ProcessId>> tagEntryToClients = new HashMap<>();
fillClientsTagStatistics(clients, tagEntryToClients, tagKeyToValues);
final ConstrainedPrioritySet standbyTaskClientsByTaskLoad = createLeastLoadedPrioritySetConstrainedByAssignedTask(clients);
final Map<TaskId, ProcessId> pendingStandbyTasksToClientId = new HashMap<>();
for (final TaskId statefulTaskId : statefulTaskIds) {
for (final Map.Entry<ProcessId, ClientState> entry : clients.entrySet()) {
final ProcessId clientId = entry.getKey();
final ClientState clientState = entry.getValue();
if (clientState.activeTasks().contains(statefulTaskId)) {
assignStandbyTasksToClientsWithDifferentTags(
numStandbyReplicas,
standbyTaskClientsByTaskLoad,
statefulTaskId,
clientId,
rackAwareAssignmentTags,
clients,
tasksToRemainingStandbys,
tagKeyToValues,
tagEntryToClients,
pendingStandbyTasksToClientId
);
}
}
}
if (!tasksToRemainingStandbys.isEmpty()) {
assignPendingStandbyTasksToLeastLoadedClients(clients,
numStandbyReplicas,
standbyTaskClientsByTaskLoad,
tasksToRemainingStandbys);
}
// returning false, because standby task assignment will never require a follow-up probing rebalance.
return false;
} | @Test
public void shouldHandleOverlappingTagValuesBetweenDifferentTagKeys() {
final Map<ProcessId, ClientState> clientStates = mkMap(
mkEntry(PID_1, createClientStateWithCapacity(PID_1, 2, mkMap(mkEntry(ZONE_TAG, ZONE_1), mkEntry(CLUSTER_TAG, CLUSTER_1)), TASK_0_0)),
mkEntry(PID_2, createClientStateWithCapacity(PID_2, 2, mkMap(mkEntry(ZONE_TAG, CLUSTER_1), mkEntry(CLUSTER_TAG, CLUSTER_3))))
);
final Set<TaskId> allActiveTasks = findAllActiveTasks(clientStates);
final AssignmentConfigs assignmentConfigs = newAssignmentConfigs(1, ZONE_TAG, CLUSTER_TAG);
standbyTaskAssignor.assign(clientStates, allActiveTasks, allActiveTasks, assignmentConfigs);
assertTotalNumberOfStandbyTasksEqualsTo(clientStates, 1);
assertTrue(
standbyClientsHonorRackAwareness(
TASK_0_0,
clientStates,
singletonList(
mkSet(PID_2)
)
)
);
} |
@Delete(uri = "{namespace}/{id}")
@ExecuteOn(TaskExecutors.IO)
@Operation(tags = {"Flows"}, summary = "Delete a flow")
@ApiResponse(responseCode = "204", description = "On success")
public HttpResponse<Void> delete(
@Parameter(description = "The flow namespace") @PathVariable String namespace,
@Parameter(description = "The flow id") @PathVariable String id
) {
Optional<Flow> flow = flowRepository.findById(tenantService.resolveTenant(), namespace, id);
if (flow.isPresent()) {
flowRepository.delete(flow.get());
return HttpResponse.status(HttpStatus.NO_CONTENT);
} else {
return HttpResponse.status(HttpStatus.NOT_FOUND);
}
} | @Test
void commaInSingleLabelsValue() {
String encodedCommaWithinLabel = URLEncoder.encode("project:foo,bar", StandardCharsets.UTF_8);
MutableHttpRequest<Object> searchRequest = HttpRequest
.GET("/api/v1/flows/search?labels=" + encodedCommaWithinLabel);
assertDoesNotThrow(() -> client.toBlocking().retrieve(searchRequest, PagedResults.class));
MutableHttpRequest<Object> exportRequest = HttpRequest
.GET("/api/v1/flows/export/by-query?labels=" + encodedCommaWithinLabel);
assertDoesNotThrow(() -> client.toBlocking().retrieve(exportRequest, byte[].class));
MutableHttpRequest<List<Object>> deleteRequest = HttpRequest
.DELETE("/api/v1/flows/delete/by-query?labels=" + encodedCommaWithinLabel);
assertDoesNotThrow(() -> client.toBlocking().retrieve(deleteRequest, BulkResponse.class));
MutableHttpRequest<List<Object>> disableRequest = HttpRequest
.POST("/api/v1/flows/disable/by-query?labels=" + encodedCommaWithinLabel, List.of());
assertDoesNotThrow(() -> client.toBlocking().retrieve(disableRequest, BulkResponse.class));
MutableHttpRequest<List<Object>> enableRequest = HttpRequest
.POST("/api/v1/flows/enable/by-query?labels=" + encodedCommaWithinLabel, List.of());
assertDoesNotThrow(() -> client.toBlocking().retrieve(enableRequest, BulkResponse.class));
} |
public static Object convertAvroFormat(
FieldType beamFieldType, Object avroValue, BigQueryUtils.ConversionOptions options) {
TypeName beamFieldTypeName = beamFieldType.getTypeName();
if (avroValue == null) {
if (beamFieldType.getNullable()) {
return null;
} else {
throw new IllegalArgumentException(String.format("Field %s not nullable", beamFieldType));
}
}
switch (beamFieldTypeName) {
case BYTE:
case INT16:
case INT32:
case INT64:
case FLOAT:
case DOUBLE:
case STRING:
case BYTES:
case BOOLEAN:
return convertAvroPrimitiveTypes(beamFieldTypeName, avroValue);
case DATETIME:
// Expecting value in microseconds.
switch (options.getTruncateTimestamps()) {
case TRUNCATE:
return truncateToMillis(avroValue);
case REJECT:
return safeToMillis(avroValue);
default:
throw new IllegalArgumentException(
String.format(
"Unknown timestamp truncation option: %s", options.getTruncateTimestamps()));
}
case DECIMAL:
return convertAvroNumeric(avroValue);
case ARRAY:
return convertAvroArray(beamFieldType, avroValue, options);
case LOGICAL_TYPE:
LogicalType<?, ?> logicalType = beamFieldType.getLogicalType();
assert logicalType != null;
String identifier = logicalType.getIdentifier();
if (SqlTypes.DATE.getIdentifier().equals(identifier)) {
return convertAvroDate(avroValue);
} else if (SqlTypes.TIME.getIdentifier().equals(identifier)) {
return convertAvroTime(avroValue);
} else if (SqlTypes.DATETIME.getIdentifier().equals(identifier)) {
return convertAvroDateTime(avroValue);
} else if (SQL_DATE_TIME_TYPES.contains(identifier)) {
switch (options.getTruncateTimestamps()) {
case TRUNCATE:
return truncateToMillis(avroValue);
case REJECT:
return safeToMillis(avroValue);
default:
throw new IllegalArgumentException(
String.format(
"Unknown timestamp truncation option: %s", options.getTruncateTimestamps()));
}
} else if (logicalType instanceof PassThroughLogicalType) {
return convertAvroFormat(logicalType.getBaseType(), avroValue, options);
} else {
throw new RuntimeException("Unknown logical type " + identifier);
}
case ROW:
Schema rowSchema = beamFieldType.getRowSchema();
if (rowSchema == null) {
throw new IllegalArgumentException("Nested ROW missing row schema");
}
GenericData.Record record = (GenericData.Record) avroValue;
return toBeamRow(record, rowSchema, options);
case MAP:
return convertAvroRecordToMap(beamFieldType, avroValue, options);
default:
throw new RuntimeException(
"Does not support converting unknown type value: " + beamFieldTypeName);
}
} | @Test
public void testMicroPrecisionDateTimeType() {
LocalDateTime dt = LocalDateTime.parse("2020-06-04T12:34:56.789876");
assertThat(
BigQueryUtils.convertAvroFormat(
FieldType.logicalType(SqlTypes.DATETIME), new Utf8(dt.toString()), REJECT_OPTIONS),
equalTo(dt));
} |
public static String getHostNameOfIP(String ipPort) {
if (null == ipPort || !ipPortPattern.matcher(ipPort).matches()) {
return null;
}
try {
int colonIdx = ipPort.indexOf(':');
String ip = (-1 == colonIdx) ? ipPort
: ipPort.substring(0, ipPort.indexOf(':'));
return InetAddress.getByName(ip).getHostName();
} catch (UnknownHostException e) {
return null;
}
} | @Test
public void testGetHostNameOfIP() {
assertNull(NetUtils.getHostNameOfIP(null));
assertNull(NetUtils.getHostNameOfIP(""));
assertNull(NetUtils.getHostNameOfIP("crazytown"));
assertNull(NetUtils.getHostNameOfIP("127.0.0.1:")); // no port
assertNull(NetUtils.getHostNameOfIP("127.0.0.1:-1")); // bogus port
assertNull(NetUtils.getHostNameOfIP("127.0.0.1:A")); // bogus port
assertNotNull(NetUtils.getHostNameOfIP("127.0.0.1"));
assertNotNull(NetUtils.getHostNameOfIP("127.0.0.1:1"));
} |
@Override
public BasicTypeDefine reconvert(Column column) {
BasicTypeDefine.BasicTypeDefineBuilder builder =
BasicTypeDefine.builder()
.name(column.getName())
.nullable(column.isNullable())
.comment(column.getComment())
.defaultValue(column.getDefaultValue());
switch (column.getDataType().getSqlType()) {
case BOOLEAN:
builder.columnType(PG_BOOLEAN);
builder.dataType(PG_BOOLEAN);
break;
case TINYINT:
case SMALLINT:
builder.columnType(PG_SMALLINT);
builder.dataType(PG_SMALLINT);
break;
case INT:
builder.columnType(PG_INTEGER);
builder.dataType(PG_INTEGER);
break;
case BIGINT:
builder.columnType(PG_BIGINT);
builder.dataType(PG_BIGINT);
break;
case FLOAT:
builder.columnType(PG_REAL);
builder.dataType(PG_REAL);
break;
case DOUBLE:
builder.columnType(PG_DOUBLE_PRECISION);
builder.dataType(PG_DOUBLE_PRECISION);
break;
case DECIMAL:
if (column.getSourceType() != null
&& column.getSourceType().equalsIgnoreCase(PG_MONEY)) {
builder.columnType(PG_MONEY);
builder.dataType(PG_MONEY);
} else {
DecimalType decimalType = (DecimalType) column.getDataType();
long precision = decimalType.getPrecision();
int scale = decimalType.getScale();
if (precision <= 0) {
precision = DEFAULT_PRECISION;
scale = DEFAULT_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is precision less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (precision > MAX_PRECISION) {
scale = (int) Math.max(0, scale - (precision - MAX_PRECISION));
precision = MAX_PRECISION;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum precision of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_PRECISION,
precision,
scale);
}
if (scale < 0) {
scale = 0;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is scale less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (scale > MAX_SCALE) {
scale = MAX_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_SCALE,
precision,
scale);
}
builder.columnType(String.format("%s(%s,%s)", PG_NUMERIC, precision, scale));
builder.dataType(PG_NUMERIC);
builder.precision(precision);
builder.scale(scale);
}
break;
case BYTES:
builder.columnType(PG_BYTEA);
builder.dataType(PG_BYTEA);
break;
case STRING:
if (column.getColumnLength() == null || column.getColumnLength() <= 0) {
builder.columnType(PG_TEXT);
builder.dataType(PG_TEXT);
} else if (column.getColumnLength() <= MAX_VARCHAR_LENGTH) {
builder.columnType(
String.format("%s(%s)", PG_VARCHAR, column.getColumnLength()));
builder.dataType(PG_VARCHAR);
} else {
builder.columnType(PG_TEXT);
builder.dataType(PG_TEXT);
}
break;
case DATE:
builder.columnType(PG_DATE);
builder.dataType(PG_DATE);
break;
case TIME:
Integer timeScale = column.getScale();
if (timeScale != null && timeScale > MAX_TIME_SCALE) {
timeScale = MAX_TIME_SCALE;
log.warn(
"The time column {} type time({}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to time({})",
column.getName(),
column.getScale(),
MAX_SCALE,
timeScale);
}
if (timeScale != null && timeScale > 0) {
builder.columnType(String.format("%s(%s)", PG_TIME, timeScale));
} else {
builder.columnType(PG_TIME);
}
builder.dataType(PG_TIME);
builder.scale(timeScale);
break;
case TIMESTAMP:
Integer timestampScale = column.getScale();
if (timestampScale != null && timestampScale > MAX_TIMESTAMP_SCALE) {
timestampScale = MAX_TIMESTAMP_SCALE;
log.warn(
"The timestamp column {} type timestamp({}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to timestamp({})",
column.getName(),
column.getScale(),
MAX_TIMESTAMP_SCALE,
timestampScale);
}
if (timestampScale != null && timestampScale > 0) {
builder.columnType(String.format("%s(%s)", PG_TIMESTAMP, timestampScale));
} else {
builder.columnType(PG_TIMESTAMP);
}
builder.dataType(PG_TIMESTAMP);
builder.scale(timestampScale);
break;
case ARRAY:
ArrayType arrayType = (ArrayType) column.getDataType();
SeaTunnelDataType elementType = arrayType.getElementType();
switch (elementType.getSqlType()) {
case BOOLEAN:
builder.columnType(PG_BOOLEAN_ARRAY);
builder.dataType(PG_BOOLEAN_ARRAY);
break;
case TINYINT:
case SMALLINT:
builder.columnType(PG_SMALLINT_ARRAY);
builder.dataType(PG_SMALLINT_ARRAY);
break;
case INT:
builder.columnType(PG_INTEGER_ARRAY);
builder.dataType(PG_INTEGER_ARRAY);
break;
case BIGINT:
builder.columnType(PG_BIGINT_ARRAY);
builder.dataType(PG_BIGINT_ARRAY);
break;
case FLOAT:
builder.columnType(PG_REAL_ARRAY);
builder.dataType(PG_REAL_ARRAY);
break;
case DOUBLE:
builder.columnType(PG_DOUBLE_PRECISION_ARRAY);
builder.dataType(PG_DOUBLE_PRECISION_ARRAY);
break;
case BYTES:
builder.columnType(PG_BYTEA);
builder.dataType(PG_BYTEA);
break;
case STRING:
builder.columnType(PG_TEXT_ARRAY);
builder.dataType(PG_TEXT_ARRAY);
break;
default:
throw CommonError.convertToConnectorTypeError(
DatabaseIdentifier.POSTGRESQL,
elementType.getSqlType().name(),
column.getName());
}
break;
default:
throw CommonError.convertToConnectorTypeError(
DatabaseIdentifier.POSTGRESQL,
column.getDataType().getSqlType().name(),
column.getName());
}
return builder.build();
} | @Test
public void testReconvertFloat() {
Column column =
PhysicalColumn.builder().name("test").dataType(BasicType.FLOAT_TYPE).build();
BasicTypeDefine typeDefine = PostgresTypeConverter.INSTANCE.reconvert(column);
Assertions.assertEquals(column.getName(), typeDefine.getName());
Assertions.assertEquals(PostgresTypeConverter.PG_REAL, typeDefine.getColumnType());
Assertions.assertEquals(PostgresTypeConverter.PG_REAL, typeDefine.getDataType());
} |
public static TriggerStateMachine stateMachineForTrigger(RunnerApi.Trigger trigger) {
switch (trigger.getTriggerCase()) {
case AFTER_ALL:
return AfterAllStateMachine.of(
stateMachinesForTriggers(trigger.getAfterAll().getSubtriggersList()));
case AFTER_ANY:
return AfterFirstStateMachine.of(
stateMachinesForTriggers(trigger.getAfterAny().getSubtriggersList()));
case AFTER_END_OF_WINDOW:
return stateMachineForAfterEndOfWindow(trigger.getAfterEndOfWindow());
case ELEMENT_COUNT:
return AfterPaneStateMachine.elementCountAtLeast(
trigger.getElementCount().getElementCount());
case AFTER_SYNCHRONIZED_PROCESSING_TIME:
return AfterSynchronizedProcessingTimeStateMachine.ofFirstElement();
case DEFAULT:
return DefaultTriggerStateMachine.of();
case NEVER:
return NeverStateMachine.ever();
case ALWAYS:
return ReshuffleTriggerStateMachine.create();
case OR_FINALLY:
return stateMachineForTrigger(trigger.getOrFinally().getMain())
.orFinally(stateMachineForTrigger(trigger.getOrFinally().getFinally()));
case REPEAT:
return RepeatedlyStateMachine.forever(
stateMachineForTrigger(trigger.getRepeat().getSubtrigger()));
case AFTER_EACH:
return AfterEachStateMachine.inOrder(
stateMachinesForTriggers(trigger.getAfterEach().getSubtriggersList()));
case AFTER_PROCESSING_TIME:
return stateMachineForAfterProcessingTime(trigger.getAfterProcessingTime());
case TRIGGER_NOT_SET:
throw new IllegalArgumentException(
String.format("Required field 'trigger' not set on %s", trigger));
default:
throw new IllegalArgumentException(String.format("Unknown trigger type %s", trigger));
}
} | @Test
public void testNeverTranslation() {
RunnerApi.Trigger trigger =
RunnerApi.Trigger.newBuilder()
.setNever(RunnerApi.Trigger.Never.getDefaultInstance())
.build();
checkNotNull(TriggerStateMachines.stateMachineForTrigger(trigger));
// No parameters, so if it doesn't crash, we win!
} |
@Override
public Object getValue(final int columnIndex, final Class<?> type) throws SQLException {
if (boolean.class == type) {
return resultSet.getBoolean(columnIndex);
}
if (byte.class == type) {
return resultSet.getByte(columnIndex);
}
if (short.class == type) {
return resultSet.getShort(columnIndex);
}
if (int.class == type) {
return resultSet.getInt(columnIndex);
}
if (long.class == type) {
return resultSet.getLong(columnIndex);
}
if (float.class == type) {
return resultSet.getFloat(columnIndex);
}
if (double.class == type) {
return resultSet.getDouble(columnIndex);
}
if (String.class == type) {
return resultSet.getString(columnIndex);
}
if (BigDecimal.class == type) {
return resultSet.getBigDecimal(columnIndex);
}
if (byte[].class == type) {
return resultSet.getBytes(columnIndex);
}
if (Date.class == type) {
return resultSet.getDate(columnIndex);
}
if (Time.class == type) {
return resultSet.getTime(columnIndex);
}
if (Timestamp.class == type) {
return resultSet.getTimestamp(columnIndex);
}
if (Blob.class == type) {
return resultSet.getBlob(columnIndex);
}
if (Clob.class == type) {
return resultSet.getClob(columnIndex);
}
if (Array.class == type) {
return resultSet.getArray(columnIndex);
}
return resultSet.getObject(columnIndex);
} | @Test
void assertGetValueByDate() throws SQLException {
ResultSet resultSet = mock(ResultSet.class);
when(resultSet.getDate(1)).thenReturn(new Date(0L));
assertThat(new JDBCStreamQueryResult(resultSet).getValue(1, Date.class), is(new Date(0L)));
} |
public List<R> scanForResourcesInPackage(String packageName, Predicate<String> packageFilter) {
requireValidPackageName(packageName);
requireNonNull(packageFilter, "packageFilter must not be null");
BiFunction<Path, Path, Resource> createResource = createPackageResource(packageName);
List<URI> rootUrisForPackage = getUrisForPackage(getClassLoader(), packageName);
return findResourcesForUris(rootUrisForPackage, packageName, packageFilter, createResource);
} | @Test
void scanForResourcesInPackage() {
String basePackageName = "io.cucumber.core.resource.test";
List<URI> resources = resourceScanner.scanForResourcesInPackage(basePackageName, aPackage -> true);
assertThat(resources, containsInAnyOrder(
URI.create("classpath:io/cucumber/core/resource/test/resource.txt"),
URI.create("classpath:io/cucumber/core/resource/test/other-resource.txt"),
URI.create("classpath:io/cucumber/core/resource/test/spaces%20in%20name%20resource.txt")));
} |
public static void main(String[] args) {
// prepare the objects
var businessDelegate = new BusinessDelegate();
var businessLookup = new BusinessLookup();
businessLookup.setNetflixService(new NetflixService());
businessLookup.setYouTubeService(new YouTubeService());
businessDelegate.setLookupService(businessLookup);
// create the client and use the business delegate
var client = new MobileClient(businessDelegate);
client.playbackMovie("Die Hard 2");
client.playbackMovie("Maradona: The Greatest Ever");
} | @Test
void shouldExecuteApplicationWithoutException() {
assertDoesNotThrow(() -> App.main(new String[]{}));
} |
public double getStayDuration() {
return stayDuration;
} | @Test
public void getStayDuration() {
SAExposureConfig saExposureConfig = new SAExposureConfig(1,1,true);
assertEquals(1, saExposureConfig.getStayDuration(), 0.2);
} |
public FEELFnResult<Boolean> invoke(@ParameterName( "point1" ) Comparable point1, @ParameterName( "point2" ) Comparable point2) {
if ( point1 == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point1", "cannot be null"));
}
if ( point2 == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point2", "cannot be null"));
}
try {
boolean result = point1.compareTo( point2 ) > 0;
return FEELFnResult.ofResult( result );
} catch( Exception e ) {
// points are not comparable
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point1", "cannot be compared to point2"));
}
} | @Test
void invokeParamsCantBeCompared() {
FunctionTestUtil.assertResultError( afterFunction.invoke("a", BigDecimal.valueOf(2) ), InvalidParametersEvent.class );
} |
public String registerPCollection(PCollection<?> pCollection) throws IOException {
String existing = pCollectionIds.get(pCollection);
if (existing != null) {
return existing;
}
String uniqueName = uniqify(pCollection.getName(), pCollectionIds.values());
pCollectionIds.put(pCollection, uniqueName);
componentsBuilder.putPcollections(
uniqueName, PCollectionTranslation.toProto(pCollection, this));
return uniqueName;
} | @Test
public void registerPCollection() throws IOException {
PCollection<Long> pCollection = pipeline.apply(GenerateSequence.from(0)).setName("foo");
String id = components.registerPCollection(pCollection);
assertThat(id, equalTo("foo"));
components.toComponents().getPcollectionsOrThrow(id);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.