focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public void translate(Pipeline pipeline) { this.flinkBatchEnv = null; this.flinkStreamEnv = null; final boolean hasUnboundedOutput = PipelineTranslationModeOptimizer.hasUnboundedOutput(pipeline); if (hasUnboundedOutput) { LOG.info("Found unbounded PCollection. Switching to streaming execution."); options.setStreaming(true); } // Staged files need to be set before initializing the execution environments prepareFilesToStageForRemoteClusterExecution(options); FlinkPipelineTranslator translator; if (options.isStreaming() || options.getUseDataStreamForBatch()) { this.flinkStreamEnv = FlinkExecutionEnvironments.createStreamExecutionEnvironment(options); if (hasUnboundedOutput && !flinkStreamEnv.getCheckpointConfig().isCheckpointingEnabled()) { LOG.warn( "UnboundedSources present which rely on checkpointing, but checkpointing is disabled."); } translator = new FlinkStreamingPipelineTranslator(flinkStreamEnv, options, options.isStreaming()); if (!options.isStreaming()) { flinkStreamEnv.setRuntimeMode(RuntimeExecutionMode.BATCH); } } else { this.flinkBatchEnv = FlinkExecutionEnvironments.createBatchExecutionEnvironment(options); translator = new FlinkBatchPipelineTranslator(flinkBatchEnv, options); } // Transform replacements need to receive the finalized PipelineOptions // including execution mode (batch/streaming) and parallelism. pipeline.replaceAll(FlinkTransformOverrides.getDefaultOverrides(options)); translator.translate(pipeline); }
@Test public void shouldUseStreamingTransformOverridesWithUnboundedSources() { FlinkPipelineOptions options = getDefaultPipelineOptions(); // no explicit streaming mode set options.setRunner(FlinkRunner.class); FlinkPipelineExecutionEnvironment flinkEnv = new FlinkPipelineExecutionEnvironment(options); Pipeline p = Mockito.spy(Pipeline.create(options)); // Add unbounded source which will set the streaming mode to true p.apply(GenerateSequence.from(0)); flinkEnv.translate(p); ArgumentCaptor<ImmutableList> captor = ArgumentCaptor.forClass(ImmutableList.class); Mockito.verify(p).replaceAll(captor.capture()); ImmutableList<PTransformOverride> overridesList = captor.getValue(); assertThat( overridesList, hasItem( PTransformOverride.of( PTransformMatchers.urnEqualTo(PTransformTranslation.CREATE_VIEW_TRANSFORM_URN), CreateStreamingFlinkView.Factory.INSTANCE))); }
@Override public int hashCode() { return new HashCodeBuilder(303, 367) .append(StringUtils.lowerCase(name)) .append(StringUtils.lowerCase(source)) .append(StringUtils.lowerCase(value)) .append(confidence) .toHashCode(); }
@Test public void testHashcodeContract() throws Exception { final Evidence titleCase = new Evidence("Manifest", "Implementation-Title", "Spring Framework", Confidence.HIGH); final Evidence lowerCase = new Evidence("manifest", "implementation-title", "spring framework", Confidence.HIGH); assertThat(titleCase, is(equalTo(lowerCase))); assertThat(titleCase.hashCode(), is(equalTo(lowerCase.hashCode()))); }
public int getVersion() { return _version; }
@Test public void withDisabledTrue() throws JsonProcessingException { String confStr = "{\"disabled\": true}"; RangeIndexConfig config = JsonUtils.stringToObject(confStr, RangeIndexConfig.class); assertTrue(config.isDisabled(), "Unexpected disabled"); assertEquals(config.getVersion(), RangeIndexConfig.DEFAULT.getVersion(), "Unexpected version"); }
@Override public String buildContext() { final SelectorDO after = (SelectorDO) getAfter(); if (Objects.isNull(getBefore())) { return String.format("the namespace [%s] selector [%s] is %s", after.getNamespaceId(), after.getName(), StringUtils.lowerCase(getType().getType().toString())); } return String.format("the namespace [%s] selector [%s] is %s : %s", after.getNamespaceId(), after.getName(), StringUtils.lowerCase(getType().getType().toString()), contrast()); }
@Test void buildContextAndBeforeIsNull() { SelectorChangedEvent selectorChangedEvent = new SelectorChangedEvent(before, null, EventTypeEnum.SELECTOR_CREATE, "test-operator"); SelectorDO after = (SelectorDO) selectorChangedEvent.getAfter(); String expectMsg = String.format("the namespace [%s] selector [%s] is %s", after.getNamespaceId(), after.getName(), StringUtils.lowerCase(selectorChangedEvent.getType().getType().toString())); String actualMsg = selectorChangedEvent.buildContext(); assertEquals(expectMsg, actualMsg); }
@VisibleForTesting String getLevelsUntilTooltip() { // grab combat skills from player int attackLevel = client.getRealSkillLevel(Skill.ATTACK); int strengthLevel = client.getRealSkillLevel(Skill.STRENGTH); int defenceLevel = client.getRealSkillLevel(Skill.DEFENCE); int hitpointsLevel = client.getRealSkillLevel(Skill.HITPOINTS); int magicLevel = client.getRealSkillLevel(Skill.MAGIC); int rangeLevel = client.getRealSkillLevel(Skill.RANGED); int prayerLevel = client.getRealSkillLevel(Skill.PRAYER); // find the needed levels until level up int meleeNeed = Experience.getNextCombatLevelMelee(attackLevel, strengthLevel, defenceLevel, hitpointsLevel, magicLevel, rangeLevel, prayerLevel); int hpDefNeed = Experience.getNextCombatLevelHpDef(attackLevel, strengthLevel, defenceLevel, hitpointsLevel, magicLevel, rangeLevel, prayerLevel); int rangeNeed = Experience.getNextCombatLevelRange(attackLevel, strengthLevel, defenceLevel, hitpointsLevel, magicLevel, rangeLevel, prayerLevel); int magicNeed = Experience.getNextCombatLevelMagic(attackLevel, strengthLevel, defenceLevel, hitpointsLevel, magicLevel, rangeLevel, prayerLevel); int prayerNeed = Experience.getNextCombatLevelPrayer(attackLevel, strengthLevel, defenceLevel, hitpointsLevel, magicLevel, rangeLevel, prayerLevel); // create tooltip string StringBuilder sb = new StringBuilder(); sb.append(ColorUtil.wrapWithColorTag("Next combat level:</br>", COMBAT_LEVEL_COLOUR)); if ((attackLevel + strengthLevel) < Experience.MAX_REAL_LEVEL * 2) { sb.append(meleeNeed).append(" Attack/Strength</br>"); } if ((hitpointsLevel + defenceLevel) < Experience.MAX_REAL_LEVEL * 2) { sb.append(hpDefNeed).append(" Defence/Hitpoints</br>"); } if (rangeLevel < Experience.MAX_REAL_LEVEL) { sb.append(rangeNeed).append(" Ranged</br>"); } if (magicLevel < Experience.MAX_REAL_LEVEL) { sb.append(magicNeed).append(" Magic</br>"); } if (prayerLevel < Experience.MAX_REAL_LEVEL) { sb.append(prayerNeed).append(" Prayer"); } return sb.toString(); }
@Test public void testGetLevelsUntilTooltip() { when(client.getRealSkillLevel(Skill.ATTACK)).thenReturn(99); when(client.getRealSkillLevel(Skill.STRENGTH)).thenReturn(99); when(client.getRealSkillLevel(Skill.DEFENCE)).thenReturn(97); when(client.getRealSkillLevel(Skill.HITPOINTS)).thenReturn(99); when(client.getRealSkillLevel(Skill.MAGIC)).thenReturn(99); when(client.getRealSkillLevel(Skill.RANGED)).thenReturn(99); when(client.getRealSkillLevel(Skill.PRAYER)).thenReturn(94); assertEquals("<col=ff981f>Next combat level:</br></col>4 Defence/Hitpoints</br>8 Prayer", combatLevelOverlay.getLevelsUntilTooltip()); }
@Override public EncodedMessage transform(ActiveMQMessage message) throws Exception { if (message == null) { return null; } long messageFormat = 0; Header header = null; Properties properties = null; Map<Symbol, Object> daMap = null; Map<Symbol, Object> maMap = null; Map<String,Object> apMap = null; Map<Object, Object> footerMap = null; Section body = convertBody(message); if (message.isPersistent()) { if (header == null) { header = new Header(); } header.setDurable(true); } byte priority = message.getPriority(); if (priority != Message.DEFAULT_PRIORITY) { if (header == null) { header = new Header(); } header.setPriority(UnsignedByte.valueOf(priority)); } String type = message.getType(); if (type != null) { if (properties == null) { properties = new Properties(); } properties.setSubject(type); } MessageId messageId = message.getMessageId(); if (messageId != null) { if (properties == null) { properties = new Properties(); } properties.setMessageId(getOriginalMessageId(message)); } ActiveMQDestination destination = message.getDestination(); if (destination != null) { if (properties == null) { properties = new Properties(); } properties.setTo(destination.getQualifiedName()); if (maMap == null) { maMap = new HashMap<>(); } maMap.put(JMS_DEST_TYPE_MSG_ANNOTATION, destinationType(destination)); } ActiveMQDestination replyTo = message.getReplyTo(); if (replyTo != null) { if (properties == null) { properties = new Properties(); } properties.setReplyTo(replyTo.getQualifiedName()); if (maMap == null) { maMap = new HashMap<>(); } maMap.put(JMS_REPLY_TO_TYPE_MSG_ANNOTATION, destinationType(replyTo)); } String correlationId = message.getCorrelationId(); if (correlationId != null) { if (properties == null) { properties = new Properties(); } try { properties.setCorrelationId(AMQPMessageIdHelper.INSTANCE.toIdObject(correlationId)); } catch (AmqpProtocolException e) { properties.setCorrelationId(correlationId); } } long expiration = message.getExpiration(); if (expiration != 0) { long ttl = expiration - System.currentTimeMillis(); if (ttl < 0) { ttl = 1; } if (header == null) { header = new Header(); } header.setTtl(new UnsignedInteger((int) ttl)); if (properties == null) { properties = new Properties(); } properties.setAbsoluteExpiryTime(new Date(expiration)); } long timeStamp = message.getTimestamp(); if (timeStamp != 0) { if (properties == null) { properties = new Properties(); } properties.setCreationTime(new Date(timeStamp)); } // JMSX Message Properties int deliveryCount = message.getRedeliveryCounter(); if (deliveryCount > 0) { if (header == null) { header = new Header(); } header.setDeliveryCount(UnsignedInteger.valueOf(deliveryCount)); } String userId = message.getUserID(); if (userId != null) { if (properties == null) { properties = new Properties(); } properties.setUserId(new Binary(userId.getBytes(StandardCharsets.UTF_8))); } String groupId = message.getGroupID(); if (groupId != null) { if (properties == null) { properties = new Properties(); } properties.setGroupId(groupId); } int groupSequence = message.getGroupSequence(); if (groupSequence > 0) { if (properties == null) { properties = new Properties(); } properties.setGroupSequence(UnsignedInteger.valueOf(groupSequence)); } final Map<String, Object> entries; try { entries = message.getProperties(); } catch (IOException e) { throw JMSExceptionSupport.create(e); } for (Map.Entry<String, Object> entry : entries.entrySet()) { String key = entry.getKey(); Object value = entry.getValue(); if (key.startsWith(JMS_AMQP_PREFIX)) { if (key.startsWith(NATIVE, JMS_AMQP_PREFIX_LENGTH)) { // skip transformer appended properties continue; } else if (key.startsWith(ORIGINAL_ENCODING, JMS_AMQP_PREFIX_LENGTH)) { // skip transformer appended properties continue; } else if (key.startsWith(MESSAGE_FORMAT, JMS_AMQP_PREFIX_LENGTH)) { messageFormat = (long) TypeConversionSupport.convert(entry.getValue(), Long.class); continue; } else if (key.startsWith(HEADER, JMS_AMQP_PREFIX_LENGTH)) { if (header == null) { header = new Header(); } continue; } else if (key.startsWith(PROPERTIES, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } continue; } else if (key.startsWith(MESSAGE_ANNOTATION_PREFIX, JMS_AMQP_PREFIX_LENGTH)) { if (maMap == null) { maMap = new HashMap<>(); } String name = key.substring(JMS_AMQP_MESSAGE_ANNOTATION_PREFIX.length()); maMap.put(Symbol.valueOf(name), value); continue; } else if (key.startsWith(FIRST_ACQUIRER, JMS_AMQP_PREFIX_LENGTH)) { if (header == null) { header = new Header(); } header.setFirstAcquirer((boolean) TypeConversionSupport.convert(value, Boolean.class)); continue; } else if (key.startsWith(CONTENT_TYPE, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } properties.setContentType(Symbol.getSymbol((String) TypeConversionSupport.convert(value, String.class))); continue; } else if (key.startsWith(CONTENT_ENCODING, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } properties.setContentEncoding(Symbol.getSymbol((String) TypeConversionSupport.convert(value, String.class))); continue; } else if (key.startsWith(REPLYTO_GROUP_ID, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } properties.setReplyToGroupId((String) TypeConversionSupport.convert(value, String.class)); continue; } else if (key.startsWith(DELIVERY_ANNOTATION_PREFIX, JMS_AMQP_PREFIX_LENGTH)) { if (daMap == null) { daMap = new HashMap<>(); } String name = key.substring(JMS_AMQP_DELIVERY_ANNOTATION_PREFIX.length()); daMap.put(Symbol.valueOf(name), value); continue; } else if (key.startsWith(FOOTER_PREFIX, JMS_AMQP_PREFIX_LENGTH)) { if (footerMap == null) { footerMap = new HashMap<>(); } String name = key.substring(JMS_AMQP_FOOTER_PREFIX.length()); footerMap.put(Symbol.valueOf(name), value); continue; } } else if (key.startsWith(AMQ_SCHEDULED_MESSAGE_PREFIX )) { // strip off the scheduled message properties continue; } // The property didn't map into any other slot so we store it in the // Application Properties section of the message. if (apMap == null) { apMap = new HashMap<>(); } apMap.put(key, value); int messageType = message.getDataStructureType(); if (messageType == CommandTypes.ACTIVEMQ_MESSAGE) { // Type of command to recognize advisory message Object data = message.getDataStructure(); if(data != null) { apMap.put("ActiveMqDataStructureType", data.getClass().getSimpleName()); } } } final AmqpWritableBuffer buffer = new AmqpWritableBuffer(); encoder.setByteBuffer(buffer); if (header != null) { encoder.writeObject(header); } if (daMap != null) { encoder.writeObject(new DeliveryAnnotations(daMap)); } if (maMap != null) { encoder.writeObject(new MessageAnnotations(maMap)); } if (properties != null) { encoder.writeObject(properties); } if (apMap != null) { encoder.writeObject(new ApplicationProperties(apMap)); } if (body != null) { encoder.writeObject(body); } if (footerMap != null) { encoder.writeObject(new Footer(footerMap)); } return new EncodedMessage(messageFormat, buffer.getArray(), 0, buffer.getArrayLength()); }
@Test public void testConvertEmptyBytesMessageToAmqpMessageWithAmqpValueBody() throws Exception { ActiveMQBytesMessage outbound = createBytesMessage(); outbound.setShortProperty(JMS_AMQP_ORIGINAL_ENCODING, AMQP_VALUE_BINARY); outbound.onSend(); outbound.storeContent(); JMSMappingOutboundTransformer transformer = new JMSMappingOutboundTransformer(); EncodedMessage encoded = transformer.transform(outbound); assertNotNull(encoded); Message amqp = encoded.decode(); assertNotNull(amqp.getBody()); assertTrue(amqp.getBody() instanceof AmqpValue); assertTrue(((AmqpValue) amqp.getBody()).getValue() instanceof Binary); assertEquals(0, ((Binary) ((AmqpValue) amqp.getBody()).getValue()).getLength()); }
@PostMapping("/plugin/selectorAndRule") public Mono<String> selectorAndRule(@RequestBody final SelectorRuleData selectorRuleData) { SelectorData selectorData = SelectorData.builder() .pluginName(selectorRuleData.getPluginName()) .handle(selectorRuleData.getSelectorHandler()) .conditionList(selectorRuleData.getConditionDataList()) .type(SelectorTypeEnum.CUSTOM_FLOW.getCode()) .build(); SelectorData result = buildDefaultSelectorData(selectorData); subscriber.onSelectorSubscribe(result); saveDiscoveryUpstreamData(result); RuleData ruleData = RuleData.builder() .selectorId(result.getId()) .matchRestful(Boolean.FALSE) .pluginName(selectorRuleData.getPluginName()) .handle(selectorRuleData.getRuleHandler()) .conditionDataList(selectorRuleData.getConditionDataList()) .build(); subscriber.onRuleSubscribe(buildDefaultRuleData(ruleData)); return Mono.just(Constants.SUCCESS); }
@Test public void testSelectorAndRule() throws Exception { final String testPluginName = "testPluginName"; final SelectorRuleData selectorRuleData = new SelectorRuleData(); selectorRuleData.setPluginName(testPluginName); this.mockMvc .perform(MockMvcRequestBuilders.post("/shenyu/plugin/selectorAndRule") .content(GsonUtils.getGson().toJson(selectorRuleData)) .contentType(MediaType.APPLICATION_JSON)) .andExpect(status().isOk()) .andReturn(); assertThat(baseDataCache.obtainSelectorData(testPluginName)).isNotNull(); }
@Operation(summary = "get configs", tags = { SwaggerConfig.SHARED }, operationId = "get_configurations", parameters = {@Parameter(ref = "API-V"), @Parameter(ref = "OS-T"), @Parameter(ref = "APP-V"), @Parameter(ref = "OS-V"), @Parameter(ref = "REL-T")}) @GetMapping(value = "config", produces = "application/json") @ResponseBody public ConfigResponse getConfig() throws SharedServiceClientException { return configService.getConfig(); }
@Test void validateIfCorrectProcessesAreCalledGetConfig() throws SharedServiceClientException { configController.getConfig(); verify(configService, times(1)).getConfig(); }
@Override public KStream<K, V> filter(final Predicate<? super K, ? super V> predicate) { return filter(predicate, NamedInternal.empty()); }
@Test public void shouldNotAllowNullPredicateOnFilter() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.filter(null)); assertThat(exception.getMessage(), equalTo("predicate can't be null")); }
@SuppressWarnings("unchecked") public Mono<RateLimiterResponse> isAllowed(final String id, final RateLimiterHandle limiterHandle) { double replenishRate = limiterHandle.getReplenishRate(); double burstCapacity = limiterHandle.getBurstCapacity(); double requestCount = limiterHandle.getRequestCount(); RateLimiterAlgorithm<?> rateLimiterAlgorithm = RateLimiterAlgorithmFactory.newInstance(limiterHandle.getAlgorithmName()); RedisScript<?> script = rateLimiterAlgorithm.getScript(); List<String> keys = rateLimiterAlgorithm.getKeys(id); List<String> scriptArgs = Stream.of(replenishRate, burstCapacity, Instant.now().getEpochSecond(), requestCount).map(String::valueOf).collect(Collectors.toList()); Flux<List<Long>> resultFlux = Singleton.INST.get(ReactiveRedisTemplate.class).execute(script, keys, scriptArgs); return resultFlux.onErrorResume(throwable -> Flux.just(Arrays.asList(1L, -1L))) .reduce(new ArrayList<Long>(), (longs, l) -> { longs.addAll(l); return longs; }).map(results -> { boolean allowed = results.get(0) == 1L; Long tokensLeft = results.get(1); return new RateLimiterResponse(allowed, tokensLeft, keys); }) .doOnError(throwable -> { rateLimiterAlgorithm.callback(rateLimiterAlgorithm.getScript(), keys, scriptArgs); LOG.error("Error occurred while judging if user is allowed by RedisRateLimiter:{}", throwable.getMessage()); }); }
@Test public void slidingWindowNotAllowedTest() { slidingWindowPreInit(0L, 0L); rateLimiterHandle.setAlgorithmName("slidingWindow"); Mono<RateLimiterResponse> responseMono = redisRateLimiter.isAllowed(DEFAULT_TEST_ID, rateLimiterHandle); StepVerifier.create(responseMono).assertNext(r -> { assertThat(r.getTokensRemaining(), is((long) DEFAULT_TEST_BURST_CAPACITY - 300L)); assertFalse(r.isAllowed()); }).verifyComplete(); }
@Override public void setVariable(String name, Object value) { if (variables.containsKey(name)) { setVariableLocally(name, value); } else { if (parent != null) { parent.setVariable(name, value); } else { setVariableLocally(name, value); } } }
@Test public void testSetVariable() { ProcessContextImpl context = new ProcessContextImpl(); context.setVariable("key", "value"); context.setVariable("key", "value1"); Assertions.assertEquals("value1", context.getVariable("key")); context.removeVariable("key"); ProcessContextImpl parentContext = new ProcessContextImpl(); parentContext.setVariable("key", "value"); context.setParent(parentContext); Assertions.assertEquals("value", context.getVariable("key")); }
public OptExpression next() { // For logic scan to physical scan, we only need to match once if (isPatternWithoutChildren && groupExpressionIndex.get(0) > 0) { return null; } OptExpression expression; do { this.groupTraceKey = 0; // Match with the next groupExpression of the last group node int lastNode = this.groupExpressionIndex.size() - 1; int lastNodeIndex = this.groupExpressionIndex.get(lastNode); this.groupExpressionIndex.set(lastNode, lastNodeIndex + 1); expression = match(pattern, groupExpression); } while (expression == null && this.groupExpressionIndex.size() != 1); nextIdx++; return expression; }
@Test public void testBinderDepth2Repeat2() { OptExpression expr1 = OptExpression.create(new MockOperator(OperatorType.LOGICAL_JOIN, 0), OptExpression.create(new MockOperator(OperatorType.LOGICAL_OLAP_SCAN, 1)), OptExpression.create(new MockOperator(OperatorType.LOGICAL_OLAP_SCAN, 2))); OptExpression expr2 = OptExpression.create(new MockOperator(OperatorType.LOGICAL_OLAP_SCAN, 3)); OptExpression expr3 = OptExpression.create(new MockOperator(OperatorType.LOGICAL_OLAP_SCAN, 4)); Memo memo = new Memo(); GroupExpression ge = memo.init(expr1); memo.copyIn(ge.inputAt(0), expr2); memo.copyIn(ge.inputAt(1), expr3); Pattern pattern = Pattern.create(OperatorType.LOGICAL_JOIN) .addChildren(Pattern.create(OperatorType.LOGICAL_OLAP_SCAN)) .addChildren(Pattern.create(OperatorType.PATTERN_LEAF)); Binder binder = new Binder(pattern, ge); OptExpression result; result = binder.next(); assertEquals(OperatorType.LOGICAL_JOIN, result.getOp().getOpType()); assertEquals(OperatorType.LOGICAL_OLAP_SCAN, result.inputAt(0).getOp().getOpType()); assertEquals(1, ((MockOperator) result.inputAt(0).getOp()).getValue()); assertEquals(OperatorType.LOGICAL_OLAP_SCAN, result.inputAt(1).getOp().getOpType()); assertEquals(2, ((MockOperator) result.inputAt(1).getOp()).getValue()); result = binder.next(); assertEquals(OperatorType.LOGICAL_JOIN, result.getOp().getOpType()); assertEquals(OperatorType.LOGICAL_OLAP_SCAN, result.inputAt(0).getOp().getOpType()); assertEquals(3, ((MockOperator) result.inputAt(0).getOp()).getValue()); assertEquals(OperatorType.LOGICAL_OLAP_SCAN, result.inputAt(1).getOp().getOpType()); assertEquals(2, ((MockOperator) result.inputAt(1).getOp()).getValue()); assertNull(binder.next()); }
static ParamFlowRule applyToParamRule(/*@Valid*/ GatewayFlowRule gatewayRule, int idx) { ParamFlowRule paramRule = new ParamFlowRule(gatewayRule.getResource()) .setCount(gatewayRule.getCount()) .setGrade(gatewayRule.getGrade()) .setDurationInSec(gatewayRule.getIntervalSec()) .setBurstCount(gatewayRule.getBurst()) .setControlBehavior(gatewayRule.getControlBehavior()) .setMaxQueueingTimeMs(gatewayRule.getMaxQueueingTimeoutMs()) .setParamIdx(idx); GatewayParamFlowItem gatewayItem = gatewayRule.getParamItem(); // Apply the current idx to gateway rule item. gatewayItem.setIndex(idx); // Apply for pattern-based parameters. String valuePattern = gatewayItem.getPattern(); if (valuePattern != null) { paramRule.getParamFlowItemList().add(generateNonMatchPassParamItem()); } return paramRule; }
@Test public void testConvertAndApplyToParamRule() { GatewayFlowRule routeRule1 = new GatewayFlowRule("routeId1") .setCount(2) .setIntervalSec(2) .setBurst(2) .setParamItem(new GatewayParamFlowItem() .setParseStrategy(SentinelGatewayConstants.PARAM_PARSE_STRATEGY_CLIENT_IP) ); int idx = 1; ParamFlowRule paramRule = GatewayRuleConverter.applyToParamRule(routeRule1, idx); assertEquals(routeRule1.getResource(), paramRule.getResource()); assertEquals(routeRule1.getCount(), paramRule.getCount(), 0.01); assertEquals(routeRule1.getControlBehavior(), paramRule.getControlBehavior()); assertEquals(routeRule1.getIntervalSec(), paramRule.getDurationInSec()); assertEquals(routeRule1.getBurst(), paramRule.getBurstCount()); assertEquals(idx, (int)paramRule.getParamIdx()); assertEquals(idx, (int)routeRule1.getParamItem().getIndex()); }
public long appendControlMessages(MemoryRecordsCreator valueCreator) { appendLock.lock(); try { ByteBuffer buffer = memoryPool.tryAllocate(maxBatchSize); if (buffer != null) { try { forceDrain(); MemoryRecords memoryRecords = valueCreator.create( nextOffset, epoch, compression, buffer ); int numberOfRecords = validateMemoryRecordsAndReturnCount(memoryRecords); completed.add( new CompletedBatch<>( nextOffset, numberOfRecords, memoryRecords, memoryPool, buffer ) ); nextOffset += numberOfRecords; } catch (Exception e) { // Release the buffer now since the buffer was not stored in completed for a delayed release memoryPool.release(buffer); throw e; } } else { throw new IllegalStateException("Could not allocate buffer for the control record"); } return nextOffset - 1; } finally { appendLock.unlock(); } }
@Test public void testEmptyControlBatch() { int leaderEpoch = 17; long baseOffset = 157; int lingerMs = 50; int maxBatchSize = 512; ByteBuffer buffer = ByteBuffer.allocate(maxBatchSize); Mockito.when(memoryPool.tryAllocate(maxBatchSize)) .thenReturn(buffer); BatchAccumulator.MemoryRecordsCreator creator = (offset, epoch, compression, buf) -> { long now = 1234; try (MemoryRecordsBuilder builder = controlRecordsBuilder( offset, epoch, compression, now, buf ) ) { // Create a control batch without any records return builder.build(); } }; try (BatchAccumulator<String> acc = buildAccumulator( leaderEpoch, baseOffset, lingerMs, maxBatchSize ) ) { assertThrows(IllegalArgumentException.class, () -> acc.appendControlMessages(creator)); } }
public int run() throws IOException { ObjectMapper objectMapper = new ObjectMapper(); List<JobMasterStatus> allMasterStatus = mJobMasterClient.getAllMasterStatus(); List<JobWorkerHealth> allWorkerHealth = mJobMasterClient.getAllWorkerHealth(); JobServiceSummary jobServiceSummary = mJobMasterClient.getJobServiceSummary(); JobServiceOutput jobServiceInfo = new JobServiceOutput( allMasterStatus, allWorkerHealth, jobServiceSummary); try { String json = objectMapper.writeValueAsString(jobServiceInfo); mPrintStream.println(json); } catch (JsonProcessingException e) { mPrintStream.println("Failed to convert jobServiceInfo output to JSON. " + "Check the command line log for the detailed error message."); LOG.error("Failed to output JSON object {}", jobServiceInfo); e.printStackTrace(); return -1; } return 0; }
@Test public void testBasic() throws IOException, ParseException { long now = Instant.now().toEpochMilli(); String startTimeStr = String.valueOf(now); JobMasterStatus primaryMaster = JobMasterStatus.newBuilder() .setMasterAddress(NetAddress.newBuilder() .setHost("master-node-1").setRpcPort(19998).build()) .setState("PRIMARY").setStartTime(now).setVersion(BuildVersion.newBuilder() .setVersion("alluxio-version-2.9").setRevision("abcdef").build()).build(); JobMasterStatus standbyMaster1 = JobMasterStatus.newBuilder() .setMasterAddress(NetAddress.newBuilder() .setHost("master-node-0").setRpcPort(19998).build()) .setState("STANDBY").setStartTime(now).setVersion( BuildVersion.newBuilder().setVersion("alluxio-version-2.10") .setRevision("abcdef").build()).build(); JobMasterStatus standbyMaster2 = JobMasterStatus.newBuilder() .setMasterAddress(NetAddress.newBuilder() .setHost("master-node-2").setRpcPort(19998).build()) .setState("STANDBY").setStartTime(now).setVersion( BuildVersion.newBuilder().setVersion("alluxio-version-2.10") .setRevision("bcdefg").build()).build(); Mockito.when(mJobMasterClient.getAllMasterStatus()) .thenReturn(Lists.newArrayList(primaryMaster, standbyMaster1, standbyMaster2)); JobWorkerHealth jobWorkerHealth = new JobWorkerHealth( 1, Lists.newArrayList(1.2, 0.9, 0.7), 10, 2, 2, "testHost", BuildVersion.newBuilder() .setVersion("2.10.0-SNAPSHOT").setRevision("ac6a0616").build()); Mockito.when(mJobMasterClient.getAllWorkerHealth()) .thenReturn(Lists.newArrayList(jobWorkerHealth)); List<JobInfo> jobInfos = new ArrayList<>(); jobInfos.add(new PlanInfo(1, "Test1", Status.RUNNING, 1547697600000L, null)); jobInfos.add(new PlanInfo(2, "Test2", Status.FAILED, 1547699415000L, null)); Mockito.when(mJobMasterClient.getJobServiceSummary()) .thenReturn(new JobServiceSummary(jobInfos)); new JobServiceMetricsCommand(mJobMasterClient, mPrintStream, "yyyyMMdd-HHmmss").run(); String output = new String(mOutputStream.toByteArray(), StandardCharsets.UTF_8); ObjectMapper mapper = new ObjectMapper(); JsonNode jsonNode = mapper.readTree(output); // Master Status Section JsonNode masterStatuses = jsonNode.get("masterStatus"); assertEquals(startTimeStr, masterStatuses.get(0).get("startTime").asText()); assertEquals("abcdef", masterStatuses.get(0).get("revision").asText()); assertEquals("19998", masterStatuses.get(0).get("port").asText()); assertEquals("master-node-1", masterStatuses.get(0).get("host").asText()); assertEquals("alluxio-version-2.9", masterStatuses.get(0).get("version").asText()); assertEquals("PRIMARY", masterStatuses.get(0).get("state").asText()); assertEquals(startTimeStr, masterStatuses.get(1).get("startTime").asText()); assertEquals("abcdef", masterStatuses.get(1).get("revision").asText()); assertEquals("19998", masterStatuses.get(1).get("port").asText()); assertEquals("master-node-0", masterStatuses.get(1).get("host").asText()); assertEquals("alluxio-version-2.10", masterStatuses.get(1).get("version").asText()); assertEquals("STANDBY", masterStatuses.get(1).get("state").asText()); assertEquals(startTimeStr, masterStatuses.get(2).get("startTime").asText()); assertEquals("bcdefg", masterStatuses.get(2).get("revision").asText()); assertEquals("19998", masterStatuses.get(2).get("port").asText()); assertEquals("master-node-2", masterStatuses.get(2).get("host").asText()); assertEquals("alluxio-version-2.10", masterStatuses.get(2).get("version").asText()); assertEquals("STANDBY", masterStatuses.get(2).get("state").asText()); // Worker Health Section JsonNode workerHealth = jsonNode.get("workerHealth"); assertEquals("ac6a0616", workerHealth.get(0).get("revision").asText()); assertEquals("2", workerHealth.get(0).get("activeTasks").asText()); assertEquals("1.2", workerHealth.get(0).get("loadAverage").get(0).asText()); assertEquals("0.9", workerHealth.get(0).get("loadAverage").get(1).asText()); assertEquals("0.7", workerHealth.get(0).get("loadAverage").get(2).asText()); assertEquals("10", workerHealth.get(0).get("taskPoolSize").asText()); assertEquals("2", workerHealth.get(0).get("unfinishedTasks").asText()); assertEquals("testHost", workerHealth.get(0).get("host").asText()); assertEquals("2.10.0-SNAPSHOT", workerHealth.get(0).get("version").asText()); // Group By Status JsonNode statusSummary = jsonNode.get("statusSummary"); assertEquals("CREATED", statusSummary.get(0).get("status").asText()); assertEquals("0", statusSummary.get(0).get("count").asText()); assertEquals("CANCELED", statusSummary.get(1).get("status").asText()); assertEquals("0", statusSummary.get(1).get("count").asText()); assertEquals("FAILED", statusSummary.get(2).get("status").asText()); assertEquals("1", statusSummary.get(2).get("count").asText()); assertEquals("RUNNING", statusSummary.get(3).get("status").asText()); assertEquals("1", statusSummary.get(3).get("count").asText()); assertEquals("COMPLETED", statusSummary.get(4).get("status").asText()); assertEquals("0", statusSummary.get(4).get("count").asText()); // Top 10 JsonNode recentModifiedJobs = jsonNode.get("recentModifiedJobs"); assertEquals("2", recentModifiedJobs.get(0).get("id").asText()); assertEquals("FAILED", recentModifiedJobs.get(0).get("status").asText()); assertEquals("1547699415000", recentModifiedJobs.get(0).get("lastUpdatedTime").asText()); assertEquals("Test2", recentModifiedJobs.get(0).get("name").asText()); assertEquals("1", recentModifiedJobs.get(1).get("id").asText()); assertEquals("RUNNING", recentModifiedJobs.get(1).get("status").asText()); assertEquals("1547697600000", recentModifiedJobs.get(1).get("lastUpdatedTime").asText()); assertEquals("Test1", recentModifiedJobs.get(1).get("name").asText()); JsonNode recentFailedJobs = jsonNode.get("recentFailedJobs"); assertEquals("2", recentFailedJobs.get(0).get("id").asText()); assertEquals("FAILED", recentFailedJobs.get(0).get("status").asText()); assertEquals("1547699415000", recentFailedJobs.get(0).get("lastUpdatedTime").asText()); assertEquals("Test2", recentFailedJobs.get(0).get("name").asText()); JsonNode longestRunningJobs = jsonNode.get("longestRunningJobs"); assertEquals("1", longestRunningJobs.get(0).get("id").asText()); assertEquals("RUNNING", longestRunningJobs.get(0).get("status").asText()); assertEquals("1547697600000", longestRunningJobs.get(0).get("lastUpdatedTime").asText()); assertEquals("Test1", longestRunningJobs.get(0).get("name").asText()); }
@Override public void execute(Context context) { PreMeasuresComputationCheck.Context extensionContext = new ContextImpl(); for (PreMeasuresComputationCheck extension : extensions) { try { extension.onCheck(extensionContext); } catch (PreMeasuresComputationCheckException pmcce) { ceTaskMessages.add(new CeTaskMessages.Message(pmcce.getMessage(), System2.INSTANCE.now(), MessageType.GENERIC)); } } }
@Test public void context_contains_project_uuid_from_analysis_metadata_holder() throws PreMeasuresComputationCheckException { Project project = Project.from(newPrivateProjectDto()); when(analysisMetadataHolder.getProject()).thenReturn(project); PreMeasuresComputationCheck check = mock(PreMeasuresComputationCheck.class); newStep(check).execute(new TestComputationStepContext()); ArgumentCaptor<Context> contextArgumentCaptor = ArgumentCaptor.forClass(Context.class); verify(check).onCheck(contextArgumentCaptor.capture()); assertThat(contextArgumentCaptor.getValue().getProjectUuid()).isEqualTo(project.getUuid()); }
public synchronized void synchronizePartitionSchemas( PartitionSchema partitionSchema ) { synchronizePartitionSchemas( partitionSchema, partitionSchema.getName() ); }
@Test public void synchronizePartitionSchemas_sync_shared_only() throws Exception { final String partitionSchemaName = "PartitionSchema"; TransMeta transformarion1 = createTransMeta(); PartitionSchema partitionSchema1 = createPartitionSchema( partitionSchemaName, true ); transformarion1.setPartitionSchemas( Collections.singletonList( partitionSchema1 ) ); spoonDelegates.trans.addTransformation( transformarion1 ); TransMeta transformarion2 = createTransMeta(); PartitionSchema unsharedPartitionSchema2 = createPartitionSchema( partitionSchemaName, false ); transformarion2.setPartitionSchemas( Collections.singletonList( unsharedPartitionSchema2 ) ); spoonDelegates.trans.addTransformation( transformarion2 ); TransMeta transformarion3 = createTransMeta(); PartitionSchema partitionSchema3 = createPartitionSchema( partitionSchemaName, true ); transformarion3.setPartitionSchemas( Collections.singletonList( partitionSchema3 ) ); spoonDelegates.trans.addTransformation( transformarion3 ); partitionSchema3.setNumberOfPartitionsPerSlave( AFTER_SYNC_VALUE ); sharedUtil.synchronizePartitionSchemas( partitionSchema3 ); assertThat( partitionSchema1.getNumberOfPartitionsPerSlave(), equalTo( AFTER_SYNC_VALUE ) ); assertThat( unsharedPartitionSchema2.getNumberOfPartitionsPerSlave(), equalTo( BEFORE_SYNC_VALUE ) ); }
@Override public ListShareGroupsResult listShareGroups(ListShareGroupsOptions options) { final KafkaFutureImpl<Collection<Object>> all = new KafkaFutureImpl<>(); final long nowMetadata = time.milliseconds(); final long deadline = calcDeadlineMs(nowMetadata, options.timeoutMs()); runnable.call(new Call("findAllBrokers", deadline, new LeastLoadedNodeProvider()) { @Override MetadataRequest.Builder createRequest(int timeoutMs) { return new MetadataRequest.Builder(new MetadataRequestData() .setTopics(Collections.emptyList()) .setAllowAutoTopicCreation(true)); } @Override void handleResponse(AbstractResponse abstractResponse) { MetadataResponse metadataResponse = (MetadataResponse) abstractResponse; Collection<Node> nodes = metadataResponse.brokers(); if (nodes.isEmpty()) throw new StaleMetadataException("Metadata fetch failed due to missing broker list"); HashSet<Node> allNodes = new HashSet<>(nodes); final ListShareGroupsResults results = new ListShareGroupsResults(allNodes, all); for (final Node node : allNodes) { final long nowList = time.milliseconds(); runnable.call(new Call("listShareGroups", deadline, new ConstantNodeIdProvider(node.id())) { @Override ListGroupsRequest.Builder createRequest(int timeoutMs) { List<String> states = options.states() .stream() .map(ShareGroupState::toString) .collect(Collectors.toList()); List<String> types = Collections.singletonList(GroupType.SHARE.toString()); return new ListGroupsRequest.Builder(new ListGroupsRequestData() .setStatesFilter(states) .setTypesFilter(types) ); } private void maybeAddShareGroup(ListGroupsResponseData.ListedGroup group) { final String groupId = group.groupId(); final Optional<ShareGroupState> state = group.groupState().isEmpty() ? Optional.empty() : Optional.of(ShareGroupState.parse(group.groupState())); final ShareGroupListing groupListing = new ShareGroupListing(groupId, state); results.addListing(groupListing); } @Override void handleResponse(AbstractResponse abstractResponse) { final ListGroupsResponse response = (ListGroupsResponse) abstractResponse; synchronized (results) { Errors error = Errors.forCode(response.data().errorCode()); if (error == Errors.COORDINATOR_LOAD_IN_PROGRESS || error == Errors.COORDINATOR_NOT_AVAILABLE) { throw error.exception(); } else if (error != Errors.NONE) { results.addError(error.exception(), node); } else { for (ListGroupsResponseData.ListedGroup group : response.data().groups()) { maybeAddShareGroup(group); } } results.tryComplete(node); } } @Override void handleFailure(Throwable throwable) { synchronized (results) { results.addError(throwable, node); results.tryComplete(node); } } }, nowList); } } @Override void handleFailure(Throwable throwable) { KafkaException exception = new KafkaException("Failed to find brokers to send ListGroups", throwable); all.complete(Collections.singletonList(exception)); } }, nowMetadata); return new ListShareGroupsResult(all); }
@Test public void testListShareGroupsMetadataFailure() throws Exception { final Cluster cluster = mockCluster(3, 0); final Time time = new MockTime(); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, AdminClientConfig.RETRIES_CONFIG, "0")) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); // Empty metadata causes the request to fail since we have no list of brokers // to send the ListGroups requests to env.kafkaClient().prepareResponse( RequestTestUtils.metadataResponse( Collections.emptyList(), env.cluster().clusterResource().clusterId(), -1, Collections.emptyList())); final ListShareGroupsResult result = env.adminClient().listShareGroups(); TestUtils.assertFutureError(result.all(), KafkaException.class); } }
@Override public void exportData(JsonWriter writer) throws IOException { // version tag at the root writer.name(THIS_VERSION); writer.beginObject(); // clients list writer.name(CLIENTS); writer.beginArray(); writeClients(writer); writer.endArray(); writer.name(GRANTS); writer.beginArray(); writeGrants(writer); writer.endArray(); writer.name(WHITELISTEDSITES); writer.beginArray(); writeWhitelistedSites(writer); writer.endArray(); writer.name(BLACKLISTEDSITES); writer.beginArray(); writeBlacklistedSites(writer); writer.endArray(); writer.name(AUTHENTICATIONHOLDERS); writer.beginArray(); writeAuthenticationHolders(writer); writer.endArray(); writer.name(ACCESSTOKENS); writer.beginArray(); writeAccessTokens(writer); writer.endArray(); writer.name(REFRESHTOKENS); writer.beginArray(); writeRefreshTokens(writer); writer.endArray(); writer.name(SYSTEMSCOPES); writer.beginArray(); writeSystemScopes(writer); writer.endArray(); for (MITREidDataServiceExtension extension : extensions) { if (extension.supportsVersion(THIS_VERSION)) { extension.exportExtensionData(writer); break; } } writer.endObject(); // end mitreid-connect-1.3 }
@Test public void testExportWhitelistedSites() throws IOException { WhitelistedSite site1 = new WhitelistedSite(); site1.setId(1L); site1.setClientId("foo"); WhitelistedSite site2 = new WhitelistedSite(); site2.setId(2L); site2.setClientId("bar"); WhitelistedSite site3 = new WhitelistedSite(); site3.setId(3L); site3.setClientId("baz"); Set<WhitelistedSite> allWhitelistedSites = ImmutableSet.of(site1, site2, site3); Mockito.when(clientRepository.getAllClients()).thenReturn(new HashSet<ClientDetailsEntity>()); Mockito.when(approvedSiteRepository.getAll()).thenReturn(new HashSet<ApprovedSite>()); Mockito.when(blSiteRepository.getAll()).thenReturn(new HashSet<BlacklistedSite>()); Mockito.when(wlSiteRepository.getAll()).thenReturn(allWhitelistedSites); Mockito.when(authHolderRepository.getAll()).thenReturn(new ArrayList<AuthenticationHolderEntity>()); Mockito.when(tokenRepository.getAllAccessTokens()).thenReturn(new HashSet<OAuth2AccessTokenEntity>()); Mockito.when(tokenRepository.getAllRefreshTokens()).thenReturn(new HashSet<OAuth2RefreshTokenEntity>()); Mockito.when(sysScopeRepository.getAll()).thenReturn(new HashSet<SystemScope>()); // do the data export StringWriter stringWriter = new StringWriter(); JsonWriter writer = new JsonWriter(stringWriter); writer.beginObject(); dataService.exportData(writer); writer.endObject(); writer.close(); // parse the output as a JSON object for testing JsonElement elem = new JsonParser().parse(stringWriter.toString()); JsonObject root = elem.getAsJsonObject(); // make sure the root is there assertThat(root.has(MITREidDataService.MITREID_CONNECT_1_3), is(true)); JsonObject config = root.get(MITREidDataService.MITREID_CONNECT_1_3).getAsJsonObject(); // make sure all the root elements are there assertThat(config.has(MITREidDataService.CLIENTS), is(true)); assertThat(config.has(MITREidDataService.GRANTS), is(true)); assertThat(config.has(MITREidDataService.WHITELISTEDSITES), is(true)); assertThat(config.has(MITREidDataService.BLACKLISTEDSITES), is(true)); assertThat(config.has(MITREidDataService.REFRESHTOKENS), is(true)); assertThat(config.has(MITREidDataService.ACCESSTOKENS), is(true)); assertThat(config.has(MITREidDataService.SYSTEMSCOPES), is(true)); assertThat(config.has(MITREidDataService.AUTHENTICATIONHOLDERS), is(true)); // make sure the root elements are all arrays assertThat(config.get(MITREidDataService.CLIENTS).isJsonArray(), is(true)); assertThat(config.get(MITREidDataService.GRANTS).isJsonArray(), is(true)); assertThat(config.get(MITREidDataService.WHITELISTEDSITES).isJsonArray(), is(true)); assertThat(config.get(MITREidDataService.BLACKLISTEDSITES).isJsonArray(), is(true)); assertThat(config.get(MITREidDataService.REFRESHTOKENS).isJsonArray(), is(true)); assertThat(config.get(MITREidDataService.ACCESSTOKENS).isJsonArray(), is(true)); assertThat(config.get(MITREidDataService.SYSTEMSCOPES).isJsonArray(), is(true)); assertThat(config.get(MITREidDataService.AUTHENTICATIONHOLDERS).isJsonArray(), is(true)); // check our scope list (this test) JsonArray sites = config.get(MITREidDataService.WHITELISTEDSITES).getAsJsonArray(); assertThat(sites.size(), is(3)); // check for both of our sites in turn Set<WhitelistedSite> checked = new HashSet<>(); for (JsonElement e : sites) { assertThat(e.isJsonObject(), is(true)); JsonObject site = e.getAsJsonObject(); WhitelistedSite compare = null; if (site.get("id").getAsLong() == site1.getId().longValue()) { compare = site1; } else if (site.get("id").getAsLong() == site2.getId().longValue()) { compare = site2; } else if (site.get("id").getAsLong() == site3.getId().longValue()) { compare = site3; } if (compare == null) { fail("Could not find matching whitelisted site id: " + site.get("id").getAsString()); } else { assertThat(site.get("clientId").getAsString(), equalTo(compare.getClientId())); checked.add(compare); } } // make sure all of our clients were found assertThat(checked.containsAll(allWhitelistedSites), is(true)); }
public static Expression convert(Filter[] filters) { Expression expression = Expressions.alwaysTrue(); for (Filter filter : filters) { Expression converted = convert(filter); Preconditions.checkArgument( converted != null, "Cannot convert filter to Iceberg: %s", filter); expression = Expressions.and(expression, converted); } return expression; }
@Test public void testNotIn() { Not filter = Not.apply(In.apply("col", new Integer[] {1, 2})); Expression actual = SparkFilters.convert(filter); Expression expected = Expressions.and(Expressions.notNull("col"), Expressions.notIn("col", 1, 2)); assertThat(actual.toString()).as("Expressions should match").isEqualTo(expected.toString()); }
public static Type getExactSubtype(Type type, Class<?> rawSubtype) { return uncapture(GenericTypeReflector.getExactSubType(type, rawSubtype)); }
@Test public void getExactSubtype() { assertThat( Reflection.getExactSubtype( Types.parameterizedType(Collection.class, Person.class), ArrayList.class)) .isEqualTo(Types.parameterizedType(ArrayList.class, Person.class)); }
@SuppressWarnings("deprecation") @Override public ByteBuf asReadOnly() { if (isReadOnly()) { return this; } return Unpooled.unmodifiableBuffer(this); }
@Test public void testReadyOnlyNioBuffers() { assertReadyOnlyNioBuffers(buffer.asReadOnly()); }
private boolean readLookupValues() throws KettleException { data.infoStream = meta.getStepIOMeta().getInfoStreams().get( 0 ); if ( data.infoStream.getStepMeta() == null ) { logError( BaseMessages.getString( PKG, "FuzzyMatch.Log.NoLookupStepSpecified" ) ); return false; } if ( isDetailed() ) { logDetailed( BaseMessages.getString( PKG, "FuzzyMatch.Log.ReadingFromStream" ) + data.infoStream.getStepname() + "]" ); } boolean firstRun = true; // Which row set do we read from? // RowSet rowSet = findInputRowSet( data.infoStream.getStepname() ); Object[] rowData = getRowFrom( rowSet ); // rows are originating from "lookup_from" while ( rowData != null ) { if ( firstRun ) { data.infoMeta = rowSet.getRowMeta().clone(); // Check lookup field int indexOfLookupField = data.infoMeta.indexOfValue( environmentSubstitute( meta.getLookupField() ) ); if ( indexOfLookupField < 0 ) { // The field is unreachable ! throw new KettleException( BaseMessages.getString( PKG, "FuzzyMatch.Exception.CouldnotFindLookField", meta.getLookupField() ) ); } data.infoCache = new RowMeta(); ValueMetaInterface keyValueMeta = data.infoMeta.getValueMeta( indexOfLookupField ); keyValueMeta.setStorageType( ValueMetaInterface.STORAGE_TYPE_NORMAL ); data.infoCache.addValueMeta( keyValueMeta ); // Add key data.indexOfCachedFields[0] = indexOfLookupField; // Check additional fields if ( data.addAdditionalFields ) { ValueMetaInterface additionalFieldValueMeta; for ( int i = 0; i < meta.getValue().length; i++ ) { int fi = i + 1; data.indexOfCachedFields[fi] = data.infoMeta.indexOfValue( meta.getValue()[i] ); if ( data.indexOfCachedFields[fi] < 0 ) { // The field is unreachable ! throw new KettleException( BaseMessages.getString( PKG, "FuzzyMatch.Exception.CouldnotFindLookField", meta.getValue()[i] ) ); } additionalFieldValueMeta = data.infoMeta.getValueMeta( data.indexOfCachedFields[fi] ); additionalFieldValueMeta.setStorageType( ValueMetaInterface.STORAGE_TYPE_NORMAL ); data.infoCache.addValueMeta( additionalFieldValueMeta ); } data.nrCachedFields += meta.getValue().length; } } if ( log.isRowLevel() ) { logRowlevel( BaseMessages.getString( PKG, "FuzzyMatch.Log.ReadLookupRow" ) + rowSet.getRowMeta().getString( rowData ) ); } // Look up the keys in the source rows // and store values in cache Object[] storeData = new Object[data.nrCachedFields]; // Add key field if ( rowData[data.indexOfCachedFields[0]] == null ) { storeData[0] = ""; } else { ValueMetaInterface fromStreamRowMeta = rowSet.getRowMeta().getValueMeta( data.indexOfCachedFields[0] ); if ( fromStreamRowMeta.isStorageBinaryString() ) { storeData[0] = fromStreamRowMeta.convertToNormalStorageType( rowData[data.indexOfCachedFields[0]] ); } else { storeData[0] = rowData[data.indexOfCachedFields[0]]; } } // Add additional fields? for ( int i = 1; i < data.nrCachedFields; i++ ) { ValueMetaInterface fromStreamRowMeta = rowSet.getRowMeta().getValueMeta( data.indexOfCachedFields[i] ); if ( fromStreamRowMeta.isStorageBinaryString() ) { storeData[i] = fromStreamRowMeta.convertToNormalStorageType( rowData[data.indexOfCachedFields[i]] ); } else { storeData[i] = rowData[data.indexOfCachedFields[i]]; } } if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "FuzzyMatch.Log.AddingValueToCache", data.infoCache .getString( storeData ) ) ); } addToCache( storeData ); rowData = getRowFrom( rowSet ); if ( firstRun ) { firstRun = false; } } return true; }
@Test public void testReadLookupValues() throws Exception { FuzzyMatchData data = spy( new FuzzyMatchData() ); data.indexOfCachedFields = new int[2]; data.minimalDistance = 0; data.maximalDistance = 5; FuzzyMatchMeta meta = spy( new FuzzyMatchMeta() ); meta.setOutputMatchField( "I don't want NPE here!" ); data.readLookupValues = true; fuzzyMatch = new FuzzyMatchHandler( mockHelper.stepMeta, mockHelper.stepDataInterface, 0, mockHelper.transMeta, mockHelper.trans ); fuzzyMatch.init( meta, data ); RowSet lookupRowSet = mockHelper.getMockInputRowSet( binaryLookupRows ); fuzzyMatch.addRowSetToInputRowSets( mockHelper.getMockInputRowSet( binaryRows ) ); fuzzyMatch.addRowSetToInputRowSets( lookupRowSet ); fuzzyMatch.rowset = lookupRowSet; RowMetaInterface rowMetaInterface = new RowMeta(); ValueMetaInterface valueMeta = new ValueMetaString( "field1" ); valueMeta.setStorageMetadata( new ValueMetaString( "field1" ) ); valueMeta.setStorageType( ValueMetaInterface.STORAGE_TYPE_BINARY_STRING ); rowMetaInterface.addValueMeta( valueMeta ); when( lookupRowSet.getRowMeta() ).thenReturn( rowMetaInterface ); when( meta.getLookupField() ).thenReturn( "field1" ); when( meta.getMainStreamField() ).thenReturn( "field1" ); fuzzyMatch.setInputRowMeta( rowMetaInterface.clone() ); when( meta.getAlgorithmType() ).thenReturn( 1 ); StepIOMetaInterface stepIOMetaInterface = mock( StepIOMetaInterface.class ); when( meta.getStepIOMeta() ).thenReturn( stepIOMetaInterface ); StreamInterface streamInterface = mock( StreamInterface.class ); List<StreamInterface> streamInterfaceList = new ArrayList<StreamInterface>(); streamInterfaceList.add( streamInterface ); when( streamInterface.getStepMeta() ).thenReturn( mockHelper.stepMeta ); when( stepIOMetaInterface.getInfoStreams() ).thenReturn( streamInterfaceList ); fuzzyMatch.processRow( meta, data ); Assert.assertEquals( rowMetaInterface.getString( row3B, 0 ), data.outputRowMeta.getString( fuzzyMatch.resultRow, 1 ) ); }
protected static PrivateKey toPrivateKey(File keyFile, String keyPassword) throws NoSuchAlgorithmException, NoSuchPaddingException, InvalidKeySpecException, InvalidAlgorithmParameterException, KeyException, IOException { return toPrivateKey(keyFile, keyPassword, true); }
@Test public void testUnencryptedEmptyPassword() throws Exception { assertThrows(IOException.class, new Executable() { @Override public void execute() throws Throwable { SslContext.toPrivateKey( ResourcesUtil.getFile(getClass(), "test2_unencrypted.pem"), ""); } }); }
protected TaskExecutionContext buildTaskContext(DefaultGoPublisher publisher, EnvironmentVariableContext environmentVariableContext, Charset consoleLogCharset) { CompositeConsumer errorStreamConsumer = new CompositeConsumer(CompositeConsumer.ERR, publisher); CompositeConsumer outputStreamConsumer = new CompositeConsumer(CompositeConsumer.OUT, publisher); SafeOutputStreamConsumer safeOutputStreamConsumer = new SafeOutputStreamConsumer(new ProcessOutputStreamConsumer<>(errorStreamConsumer, outputStreamConsumer)); safeOutputStreamConsumer.addSecrets(environmentVariableContext.secrets()); return new PluggableTaskContext(safeOutputStreamConsumer, environmentVariableContext, workingDir, consoleLogCharset); }
@Test public void shouldReturnPluggableTaskContext() { PluggableTask task = mock(PluggableTask.class); when(task.getPluginConfiguration()).thenReturn(new PluginConfiguration()); String workingDir = "test-directory"; PluggableTaskBuilder taskBuilder = new PluggableTaskBuilder(runIfConfigs, cancelBuilder, task, TEST_PLUGIN_ID, workingDir); TaskExecutionContext taskExecutionContext = taskBuilder.buildTaskContext(goPublisher, variableContext, UTF_8); assertThat(taskExecutionContext instanceof PluggableTaskContext).isEqualTo(true); assertThat(taskExecutionContext.workingDir()).isEqualTo(workingDir); }
public void notify(DashboardNotification e) { notificationMappers.stream() .filter(notificationMapper -> notificationMapper.supports(e)) .map(notificationMapper -> notificationMapper.map(e)) .forEach(this::saveDashboardNotificationAsMetadata); }
@Test void noExceptionIsThrownForConcurrentSlowRunNotification() { doThrow(new StorageException("a storage exception")) .when(storageProviderMock).saveMetadata(any(JobRunrMetadata.class)); assertThatCode(() -> dashboardNotificationManager.notify(new PollIntervalInSecondsTimeBoxIsTooSmallNotification(1, 5, Instant.now(), 6))) .doesNotThrowAnyException(); }
public synchronized void deleteResourceGroup(String rgName) { try { if (rgService.resourceGroupGet(rgName) != null) { LOG.info("Deleting resource group {}", rgName); rgService.resourceGroupDelete(rgName); } } catch (PulsarAdminException e) { LOG.error("Got exception while deleting resource group {}, {}", rgName, e); } }
@Test public void testResourceGroupDeleteNonExistent() throws Exception { assertThrows(PulsarAdminException.class, () -> admin.resourcegroups().deleteResourceGroup(rgName)); }
public static void notNullOrEmpty(String string) { notNullOrEmpty(string, String.format("string [%s] is null or empty", string)); }
@Test public void testNotNull1NotEmpty2() { Precondition.notNullOrEmpty(" test "); }
public int splitAndReenqueue(ProducerBatch bigBatch) { // Reset the estimated compression ratio to the initial value or the big batch compression ratio, whichever // is bigger. There are several different ways to do the reset. We chose the most conservative one to ensure // the split doesn't happen too often. CompressionRatioEstimator.setEstimation(bigBatch.topicPartition.topic(), compression.type(), Math.max(1.0f, (float) bigBatch.compressionRatio())); Deque<ProducerBatch> dq = bigBatch.split(this.batchSize); int numSplitBatches = dq.size(); Deque<ProducerBatch> partitionDequeue = getOrCreateDeque(bigBatch.topicPartition); while (!dq.isEmpty()) { ProducerBatch batch = dq.pollLast(); incomplete.add(batch); // We treat the newly split batches as if they are not even tried. synchronized (partitionDequeue) { if (transactionManager != null) { // We should track the newly created batches since they already have assigned sequences. transactionManager.addInFlightBatch(batch); insertInSequenceOrder(partitionDequeue, batch); } else { partitionDequeue.addFirst(batch); } } } return numSplitBatches; }
@Test public void testSplitAndReenqueue() throws ExecutionException, InterruptedException { long now = time.milliseconds(); RecordAccumulator accum = createTestRecordAccumulator(1024, 10 * 1024, Compression.gzip().build(), 10); // Create a big batch ByteBuffer buffer = ByteBuffer.allocate(4096); MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, Compression.NONE, TimestampType.CREATE_TIME, 0L); ProducerBatch batch = new ProducerBatch(tp1, builder, now, true); byte[] value = new byte[1024]; final AtomicInteger acked = new AtomicInteger(0); Callback cb = (metadata, exception) -> acked.incrementAndGet(); // Append two messages so the batch is too big. Future<RecordMetadata> future1 = batch.tryAppend(now, null, value, Record.EMPTY_HEADERS, cb, now); Future<RecordMetadata> future2 = batch.tryAppend(now, null, value, Record.EMPTY_HEADERS, cb, now); assertNotNull(future1); assertNotNull(future2); batch.close(); // Enqueue the batch to the accumulator as if the batch was created by the accumulator. accum.reenqueue(batch, now); // Re-enqueuing counts as a second attempt, so the delay with jitter is 100 * (1 + 0.2) + 1 time.sleep(121L); // Drain the batch. RecordAccumulator.ReadyCheckResult result = accum.ready(metadataCache, time.milliseconds()); assertFalse(result.readyNodes.isEmpty(), "The batch should be ready"); Map<Integer, List<ProducerBatch>> drained = accum.drain(metadataCache, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertEquals(1, drained.size(), "Only node1 should be drained"); assertEquals(1, drained.get(node1.id()).size(), "Only one batch should be drained"); // Split and reenqueue the batch. accum.splitAndReenqueue(drained.get(node1.id()).get(0)); time.sleep(101L); drained = accum.drain(metadataCache, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertFalse(drained.isEmpty()); assertFalse(drained.get(node1.id()).isEmpty()); drained.get(node1.id()).get(0).complete(acked.get(), 100L); assertEquals(1, acked.get(), "The first message should have been acked."); assertTrue(future1.isDone()); assertEquals(0, future1.get().offset()); drained = accum.drain(metadataCache, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertFalse(drained.isEmpty()); assertFalse(drained.get(node1.id()).isEmpty()); drained.get(node1.id()).get(0).complete(acked.get(), 100L); assertEquals(2, acked.get(), "Both message should have been acked."); assertTrue(future2.isDone()); assertEquals(1, future2.get().offset()); }
@Override public Stream<CoreExtension> loadedCoreExtensions() { checkInitialized(); return coreExtensions.stream(); }
@Test public void loadedCoreExtensions_fails_with_ISE_if_called_before_setLoadedCoreExtensions() { assertThatThrownBy(() -> underTest.loadedCoreExtensions()) .isInstanceOf(IllegalStateException.class) .hasMessage("Repository has not been initialized yet"); }
public static boolean notEqualWithinTolerance(double left, double right, double tolerance) { if (Doubles.isFinite(left) && Doubles.isFinite(right)) { return Math.abs(left - right) > Math.abs(tolerance); } else { return false; } }
@Test public void doubleNotEquals() { assertThat(notEqualWithinTolerance(1.3d, 1.3d, 0.00000000000001d)).isFalse(); assertThat(notEqualWithinTolerance(1.3d, 1.3d, 0.0d)).isFalse(); assertThat( notEqualWithinTolerance(0.0d, 1.0d + 2.0d - 3.0d, 0.00000000000000000000000000000001d)) .isFalse(); assertThat(notEqualWithinTolerance(1.3d, 1.303d, 0.004d)).isFalse(); assertThat(notEqualWithinTolerance(1.3d, 1.303d, 0.002d)).isTrue(); assertThat(notEqualWithinTolerance(Double.POSITIVE_INFINITY, Double.POSITIVE_INFINITY, 0.01d)) .isFalse(); assertThat(notEqualWithinTolerance(Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0.01d)) .isFalse(); assertThat(notEqualWithinTolerance(Double.NEGATIVE_INFINITY, Double.NEGATIVE_INFINITY, 0.01d)) .isFalse(); assertThat(notEqualWithinTolerance(Double.NaN, Double.NaN, 0.01d)).isFalse(); }
private void handle(FailureBatch failureBatch) { suitableHandlers(failureBatch) .forEach(handler -> { try { handler.handle(failureBatch); } catch (Exception e) { logger.error("Error occurred while handling failures by {}", handler.getClass().getName()); } }); final List<Message> requiresAcknowledgement = failureBatch.getFailures().stream() .filter(Failure::requiresAcknowledgement) .map(Failure::failedMessage) .filter(Message.class::isInstance) .map(Message.class::cast) .collect(Collectors.toList()); if (!requiresAcknowledgement.isEmpty()) { acknowledger.acknowledge(requiresAcknowledgement); } }
@Test public void run_serviceNotInterruptedUponHandlerException() throws Exception { // given final FailureBatch indexingFailureBatch1 = indexingFailureBatch(createIndexingFailure()); final FailureBatch indexingFailureBatch2 = indexingFailureBatch(createIndexingFailure()); final FailureHandler fallbackIndexingFailureHandler = enabledFailureHandler(indexingFailureBatch1, indexingFailureBatch2); doThrow(new RuntimeException()).when(fallbackIndexingFailureHandler).handle(indexingFailureBatch2); final FailureHandlingService underTest = new FailureHandlingService(fallbackIndexingFailureHandler, ImmutableSet.of(), failureSubmissionQueue, configuration, acknowledger); underTest.startAsync(); underTest.awaitRunning(); // when failureSubmissionQueue.submitBlocking(indexingFailureBatch2); failureSubmissionQueue.submitBlocking(indexingFailureBatch1); Awaitility.waitAtMost(Durations.ONE_SECOND) .until(() -> failureSubmissionQueue.queueSize() == 0); // then verify(fallbackIndexingFailureHandler).handle(indexingFailureBatch2); verify(fallbackIndexingFailureHandler).handle(indexingFailureBatch1); }
public static CoordinatorRecord newConsumerGroupCurrentAssignmentRecord( String groupId, ConsumerGroupMember member ) { return new CoordinatorRecord( new ApiMessageAndVersion( new ConsumerGroupCurrentMemberAssignmentKey() .setGroupId(groupId) .setMemberId(member.memberId()), (short) 8 ), new ApiMessageAndVersion( new ConsumerGroupCurrentMemberAssignmentValue() .setMemberEpoch(member.memberEpoch()) .setPreviousMemberEpoch(member.previousMemberEpoch()) .setState(member.state().value()) .setAssignedPartitions(toTopicPartitions(member.assignedPartitions())) .setPartitionsPendingRevocation(toTopicPartitions(member.partitionsPendingRevocation())), (short) 0 ) ); }
@Test public void testNewConsumerGroupCurrentAssignmentRecord() { Uuid topicId1 = Uuid.randomUuid(); Uuid topicId2 = Uuid.randomUuid(); Map<Uuid, Set<Integer>> assigned = mkOrderedAssignment( mkOrderedTopicAssignment(topicId1, 11, 12, 13), mkOrderedTopicAssignment(topicId2, 21, 22, 23) ); Map<Uuid, Set<Integer>> revoking = mkOrderedAssignment( mkOrderedTopicAssignment(topicId1, 14, 15, 16), mkOrderedTopicAssignment(topicId2, 24, 25, 26) ); CoordinatorRecord expectedRecord = new CoordinatorRecord( new ApiMessageAndVersion( new ConsumerGroupCurrentMemberAssignmentKey() .setGroupId("group-id") .setMemberId("member-id"), (short) 8), new ApiMessageAndVersion( new ConsumerGroupCurrentMemberAssignmentValue() .setState(MemberState.UNREVOKED_PARTITIONS.value()) .setMemberEpoch(22) .setPreviousMemberEpoch(21) .setAssignedPartitions(Arrays.asList( new ConsumerGroupCurrentMemberAssignmentValue.TopicPartitions() .setTopicId(topicId1) .setPartitions(Arrays.asList(11, 12, 13)), new ConsumerGroupCurrentMemberAssignmentValue.TopicPartitions() .setTopicId(topicId2) .setPartitions(Arrays.asList(21, 22, 23)))) .setPartitionsPendingRevocation(Arrays.asList( new ConsumerGroupCurrentMemberAssignmentValue.TopicPartitions() .setTopicId(topicId1) .setPartitions(Arrays.asList(14, 15, 16)), new ConsumerGroupCurrentMemberAssignmentValue.TopicPartitions() .setTopicId(topicId2) .setPartitions(Arrays.asList(24, 25, 26)))), (short) 0)); assertEquals(expectedRecord, newConsumerGroupCurrentAssignmentRecord( "group-id", new ConsumerGroupMember.Builder("member-id") .setState(MemberState.UNREVOKED_PARTITIONS) .setMemberEpoch(22) .setPreviousMemberEpoch(21) .setAssignedPartitions(assigned) .setPartitionsPendingRevocation(revoking) .build() )); }
boolean isModified(Namespace namespace) { Release release = releaseService.findLatestActiveRelease(namespace); List<Item> items = itemService.findItemsWithoutOrdered(namespace.getId()); if (release == null) { return hasNormalItems(items); } Map<String, String> releasedConfiguration = GSON.fromJson(release.getConfigurations(), GsonType.CONFIG); Map<String, String> configurationFromItems = generateConfigurationFromItems(namespace, items); MapDifference<String, String> difference = Maps.difference(releasedConfiguration, configurationFromItems); return !difference.areEqual(); }
@Test public void testNamespaceDeleteItem() { long namespaceId = 1; Namespace namespace = createNamespace(namespaceId); Release release = createRelease("{\"k1\":\"v1\"}"); List<Item> items = Collections.singletonList(createItem("k2", "v2")); when(releaseService.findLatestActiveRelease(namespace)).thenReturn(release); when(itemService.findItemsWithoutOrdered(namespaceId)).thenReturn(items); when(namespaceService.findParentNamespace(namespace)).thenReturn(null); boolean isModified = namespaceUnlockAspect.isModified(namespace); Assert.assertTrue(isModified); }
@Override public void bindTo(MeterRegistry registry) { List<BufferPoolMXBean> bufferPoolBeans = ManagementFactory.getPlatformMXBeans(BufferPoolMXBean.class); for (BufferPoolMXBean bufferPoolBean : bufferPoolBeans) { String name = bufferPoolBean.getName(); // avoid illegal characters due to beans named for example: "mapped - 'non-volatile memory'" String metricName = normalizeMetricName(name); Gauge.builder(PREFIX + "BufferPool.used.memory." + metricName, bufferPoolBean, BufferPoolMXBean::getMemoryUsed) .baseUnit(BaseUnits.BYTES) .description("The memory used by the NIO pool:" + name) .register(registry); } List<MemoryPoolMXBean> memoryPoolBeans = ManagementFactory.getMemoryPoolMXBeans(); for (MemoryPoolMXBean memoryPoolBean : memoryPoolBeans) { String name = memoryPoolBean.getName(); // avoid illegal characters due to beans named for example: "CodeHeap 'non-nmethods'" String metricName = normalizeMetricName(name); Gauge.builder(PREFIX + "memoryPool." + metricName + ".usage", memoryPoolBean, (mem) -> mem.getUsage().getUsed()) .baseUnit(BaseUnits.BYTES) .description("Current usage of the " + name + " memory pool") .register(registry); Gauge.builder(PREFIX + "memoryPool." + metricName + ".usage.max", memoryPoolBean, (mem) -> mem.getPeakUsage().getUsed()) .baseUnit(BaseUnits.BYTES) .description("Peak usage of the " + name + " memory pool") .register(registry); } }
@Test public void testMetricNamesContainOnlyValidCharacters() { SimpleMeterRegistry registry = new SimpleMeterRegistry(); new VendorAdditionalMetrics().bindTo(registry); SoftAssert asserts = new SoftAssert(); registry.getMeters().forEach(meter -> { asserts.assertTrue(meter.getId().getName().matches("[.\\w]+"), "metric name contains invalid characters: " + meter.getId().getName()); asserts.assertTrue(meter.getId().getName().matches("^[^_].*[^_]$"), "metric name should not begin or end with an underscore: " + meter.getId().getName()); }); asserts.assertAll(); registry.close(); }
public Optional<UfsStatus[]> listFromUfs(String path, boolean isRecursive) throws IOException { ListOptions ufsListOptions = ListOptions.defaults().setRecursive(isRecursive); UnderFileSystem ufs = getUfsInstance(path); try { UfsStatus[] listResults = ufs.listStatus(path, ufsListOptions); if (listResults != null) { return Optional.of(listResults); } } catch (IOException e) { if (!(e instanceof FileNotFoundException)) { throw e; } } // TODO(yimin) put the ufs status into the metastore // If list does not give a result, // the request path might either be a regular file/object or not exist. // Try getStatus() instead. try { UfsStatus status = ufs.getStatus(path); if (status == null) { return Optional.empty(); } // Success. Create an array with only one element. status.setName(""); // listStatus() expects relative name to the @path. return Optional.of(new UfsStatus[] {status}); } catch (FileNotFoundException e) { return Optional.empty(); } }
@Test public void testListFromUfsListUfsWhenFail() throws IOException { UnderFileSystem system = mock(UnderFileSystem.class); doThrow(new IOException()).when(system).listStatus(anyString(), any()); doReturn(system).when(mDoraUfsManager).getOrAdd(any(), any()); assertThrows(IOException.class, () -> { mManager.listFromUfs("/test", false); }); }
public Future<Void> reconcile(boolean isOpenShift, ImagePullPolicy imagePullPolicy, List<LocalObjectReference> imagePullSecrets, Clock clock) { return serviceAccount() .compose(i -> entityOperatorRole()) .compose(i -> topicOperatorRole()) .compose(i -> userOperatorRole()) .compose(i -> networkPolicy()) .compose(i -> topicOperatorRoleBindings()) .compose(i -> userOperatorRoleBindings()) .compose(i -> topicOperatorConfigMap()) .compose(i -> userOperatorConfigMap()) .compose(i -> topicOperatorCruiseControlApiSecret()) .compose(i -> deleteOldEntityOperatorSecret()) .compose(i -> topicOperatorSecret(clock)) .compose(i -> userOperatorSecret(clock)) .compose(i -> deployment(isOpenShift, imagePullPolicy, imagePullSecrets)) .compose(i -> waitForDeploymentReadiness()); }
@Test public void reconcileWithoutEo(VertxTestContext context) { ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); DeploymentOperator mockDepOps = supplier.deploymentOperations; SecretOperator mockSecretOps = supplier.secretOperations; ServiceAccountOperator mockSaOps = supplier.serviceAccountOperations; RoleOperator mockRoleOps = supplier.roleOperations; RoleBindingOperator mockRoleBindingOps = supplier.roleBindingOperations; NetworkPolicyOperator mockNetPolicyOps = supplier.networkPolicyOperator; ConfigMapOperator mockCmOps = supplier.configMapOperations; ArgumentCaptor<ServiceAccount> saCaptor = ArgumentCaptor.forClass(ServiceAccount.class); when(mockSaOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityOperatorDeploymentName(NAME)), saCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<Secret> operatorSecretCaptor = ArgumentCaptor.forClass(Secret.class); when(mockSecretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityOperatorSecretName(NAME)), operatorSecretCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<Secret> toSecretCaptor = ArgumentCaptor.forClass(Secret.class); when(mockSecretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityTopicOperatorSecretName(NAME)), toSecretCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<Secret> uoSecretCaptor = ArgumentCaptor.forClass(Secret.class); when(mockSecretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityUserOperatorSecretName(NAME)), uoSecretCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<Role> operatorRoleCaptor = ArgumentCaptor.forClass(Role.class); when(mockRoleOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityOperatorDeploymentName(NAME)), operatorRoleCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<RoleBinding> toRoleBindingCaptor = ArgumentCaptor.forClass(RoleBinding.class); when(mockRoleBindingOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityTopicOperatorRoleBinding(NAME)), toRoleBindingCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<RoleBinding> uoRoleBindingCaptor = ArgumentCaptor.forClass(RoleBinding.class); when(mockRoleBindingOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityUserOperatorRoleBinding(NAME)), uoRoleBindingCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<NetworkPolicy> netPolicyCaptor = ArgumentCaptor.forClass(NetworkPolicy.class); when(mockNetPolicyOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityOperatorDeploymentName(NAME)), netPolicyCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<ConfigMap> toCmCaptor = ArgumentCaptor.forClass(ConfigMap.class); when(mockCmOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityTopicOperatorLoggingConfigMapName(NAME)), toCmCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<ConfigMap> uoCmCaptor = ArgumentCaptor.forClass(ConfigMap.class); when(mockCmOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityUserOperatorLoggingConfigMapName(NAME)), uoCmCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<Deployment> depCaptor = ArgumentCaptor.forClass(Deployment.class); when(mockDepOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityOperatorDeploymentName(NAME)), depCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockDepOps.waitForObserved(any(), eq(NAMESPACE), eq(KafkaResources.entityOperatorDeploymentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); when(mockDepOps.readiness(any(), eq(NAMESPACE), eq(KafkaResources.entityOperatorDeploymentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); EntityOperatorReconciler rcnclr = new EntityOperatorReconciler( Reconciliation.DUMMY_RECONCILIATION, ResourceUtils.dummyClusterOperatorConfig(), supplier, KAFKA, CLUSTER_CA ); Checkpoint async = context.checkpoint(); rcnclr.reconcile(false, null, null, Clock.systemUTC()) .onComplete(context.succeeding(v -> context.verify(() -> { assertThat(saCaptor.getAllValues().size(), is(1)); assertThat(saCaptor.getValue(), is(nullValue())); assertThat(operatorSecretCaptor.getAllValues().size(), is(1)); assertThat(operatorSecretCaptor.getAllValues().get(0), is(nullValue())); assertThat(toSecretCaptor.getAllValues().size(), is(1)); assertThat(toSecretCaptor.getAllValues().get(0), is(nullValue())); assertThat(uoSecretCaptor.getAllValues().size(), is(1)); assertThat(uoSecretCaptor.getAllValues().get(0), is(nullValue())); assertThat(netPolicyCaptor.getAllValues().size(), is(1)); assertThat(netPolicyCaptor.getValue(), is(nullValue())); assertThat(operatorRoleCaptor.getAllValues().size(), is(1)); assertThat(operatorRoleCaptor.getValue(), is(nullValue())); assertThat(toRoleBindingCaptor.getAllValues().size(), is(1)); assertThat(toRoleBindingCaptor.getAllValues().get(0), is(nullValue())); assertThat(uoRoleBindingCaptor.getAllValues().size(), is(1)); assertThat(uoRoleBindingCaptor.getAllValues().get(0), is(nullValue())); assertThat(toCmCaptor.getAllValues().size(), is(1)); assertThat(toCmCaptor.getValue(), is(nullValue())); assertThat(uoCmCaptor.getAllValues().size(), is(1)); assertThat(uoCmCaptor.getValue(), is(nullValue())); assertThat(depCaptor.getAllValues().size(), is(1)); assertThat(depCaptor.getValue(), is(nullValue())); async.flag(); }))); }
public static Optional<String> getValidLogUrlPattern( final Configuration config, final ConfigOption<String> option) { String pattern = config.get(option); if (StringUtils.isNullOrWhitespaceOnly(pattern)) { return Optional.empty(); } pattern = pattern.trim(); String scheme = pattern.substring(0, Math.max(pattern.indexOf(SCHEME_SEPARATOR), 0)); if (scheme.isEmpty()) { return Optional.of(HTTP_SCHEME + SCHEME_SEPARATOR + pattern); } else if (HTTP_SCHEME.equalsIgnoreCase(scheme) || HTTPS_SCHEME.equalsIgnoreCase(scheme)) { return Optional.of(pattern); } else { LOG.warn( "Ignore configured value for '{}': unsupported scheme {}", option.key(), scheme); return Optional.empty(); } }
@Test void testGetValidLogUrlPatternNotConfigured() { Configuration config = new Configuration(); assertThat( LogUrlUtil.getValidLogUrlPattern( config, HISTORY_SERVER_JOBMANAGER_LOG_URL_PATTERN)) .isNotPresent(); assertThat( LogUrlUtil.getValidLogUrlPattern( config, HISTORY_SERVER_TASKMANAGER_LOG_URL_PATTERN)) .isNotPresent(); }
private static void refreshIp() { String tmpSelfIp = getNacosIp(); if (StringUtils.isBlank(tmpSelfIp)) { tmpSelfIp = getPreferHostnameOverIP(); } if (StringUtils.isBlank(tmpSelfIp)) { tmpSelfIp = Objects.requireNonNull(findFirstNonLoopbackAddress()).getHostAddress(); } if (InternetAddressUtil.PREFER_IPV6_ADDRESSES && !tmpSelfIp.startsWith(InternetAddressUtil.IPV6_START_MARK) && !tmpSelfIp.endsWith(InternetAddressUtil.IPV6_END_MARK)) { tmpSelfIp = InternetAddressUtil.IPV6_START_MARK + tmpSelfIp + InternetAddressUtil.IPV6_END_MARK; if (StringUtils.contains(tmpSelfIp, InternetAddressUtil.PERCENT_SIGN_IN_IPV6)) { tmpSelfIp = tmpSelfIp.substring(0, tmpSelfIp.indexOf(InternetAddressUtil.PERCENT_SIGN_IN_IPV6)) + InternetAddressUtil.IPV6_END_MARK; } } if (!Objects.equals(selfIP, tmpSelfIp) && Objects.nonNull(selfIP)) { IPChangeEvent event = new IPChangeEvent(); event.setOldIP(selfIP); event.setNewIP(tmpSelfIp); NotifyCenter.publishEvent(event); } selfIP = tmpSelfIp; }
@Test void testRefreshIp() throws InterruptedException { assertEquals("1.1.1.1", InetUtils.getSelfIP()); System.setProperty(NACOS_SERVER_IP, "1.1.1.2"); TimeUnit.MILLISECONDS.sleep(300L); assertTrue(StringUtils.equalsIgnoreCase(InetUtils.getSelfIP(), "1.1.1.2")); }
@Override public Port getPort(DeviceId deviceId, PortNumber portNumber) { checkNotNull(deviceId, DEVICE_NULL); Optional<VirtualPort> foundPort = manager.getVirtualPorts(this.networkId, deviceId) .stream() .filter(port -> port.number().equals(portNumber)) .findFirst(); if (foundPort.isPresent()) { return foundPort.get(); } return null; }
@Test public void testGetPort() { manager.registerTenantId(TenantId.tenantId(tenantIdValue1)); VirtualNetwork virtualNetwork = manager.createVirtualNetwork(TenantId.tenantId(tenantIdValue1)); VirtualDevice virtualDevice = manager.createVirtualDevice(virtualNetwork.id(), DID1); manager.createVirtualDevice(virtualNetwork.id(), DID2); DeviceService deviceService = manager.get(virtualNetwork.id(), DeviceService.class); ConnectPoint cp = new ConnectPoint(virtualDevice.id(), PortNumber.portNumber(1)); VirtualPort virtualPort1 = manager.createVirtualPort(virtualNetwork.id(), virtualDevice.id(), PortNumber.portNumber(1), cp); manager.createVirtualPort(virtualNetwork.id(), virtualDevice.id(), PortNumber.portNumber(2), cp); // test the getPort() method assertEquals("The port did not match as expected.", virtualPort1, deviceService.getPort(DID1, PortNumber.portNumber(1))); assertNotEquals("The port did not match as expected.", virtualPort1, deviceService.getPort(DID1, PortNumber.portNumber(3))); }
public ErrorResponse buildErrorResponse(RestLiServiceException result) { // In some cases, people use 3XX to signal client a redirection. This falls into the category of blurred boundary // whether this should be an error or not, in order to not disrupt change the behavior of existing code // Thus excluding logging errors for 3XX if (result.getStatus() != null && result.getStatus().getCode() < HttpStatus.S_300_MULTIPLE_CHOICES.getCode()) { // Invalid to send an error response with success status codes. This should be converted to 500 errors. // Logging an error message now to detect and fix current use cases before we start converting to 500. LOGGER.error("Incorrect use of success status code with error response", result); } if (result.getStatus() == HttpStatus.S_204_NO_CONTENT) { // HTTP Spec requires the response body to be empty for HTTP status 204. return new ErrorResponse(); } return buildErrorResponse(result, result.hasOverridingErrorResponseFormat() ? result.getOverridingFormat() : _errorResponseFormat); }
@Test public void testErrorDetailsFromDataMap() { RestLiServiceException exception = new RestLiServiceException(HttpStatus.S_200_OK, "Some message", new IllegalStateException("Some other message")); exception.setCode("INVALID_SOMETHING"); exception.setDocUrl("www.documentation.com"); exception.setRequestId("id123"); exception.setErrorDetails((DataMap)null); Assert.assertFalse(exception.hasErrorDetails()); ErrorResponseBuilder builder = new ErrorResponseBuilder(ErrorResponseFormat.FULL); ErrorResponse errorResponse = builder.buildErrorResponse(exception); Assert.assertFalse(errorResponse.hasErrorDetails()); Assert.assertTrue(errorResponse.hasExceptionClass()); Assert.assertTrue(errorResponse.hasStatus()); Assert.assertTrue(errorResponse.hasMessage()); Assert.assertTrue(errorResponse.hasCode()); Assert.assertTrue(errorResponse.hasStackTrace()); Assert.assertTrue(errorResponse.hasDocUrl()); Assert.assertTrue(errorResponse.hasRequestId()); exception.setOverridingFormat(ErrorResponseFormat.MESSAGE_AND_SERVICECODE); errorResponse = builder.buildErrorResponse(exception); Assert.assertFalse(errorResponse.hasErrorDetails()); Assert.assertFalse(errorResponse.hasExceptionClass()); Assert.assertTrue(errorResponse.hasStatus()); Assert.assertTrue(errorResponse.hasMessage()); Assert.assertTrue(errorResponse.hasCode()); Assert.assertFalse(errorResponse.hasStackTrace()); Assert.assertFalse(errorResponse.hasDocUrl()); Assert.assertFalse(errorResponse.hasRequestId()); }
@Override public RegisteredClient getClientConfiguration(ServerConfiguration issuer) { return clients.get(issuer.getIssuer()); }
@Test public void getClientConfiguration_noIssuer() { Mockito.when(mockServerConfig.getIssuer()).thenReturn("www.badexample.net"); RegisteredClient actualClient = service.getClientConfiguration(mockServerConfig); assertThat(actualClient, is(nullValue())); }
public static <T, K> AggregateOperation1<T, Map<K, List<T>>, Map<K, List<T>>> groupingBy( FunctionEx<? super T, ? extends K> keyFn ) { checkSerializable(keyFn, "keyFn"); return groupingBy(keyFn, toList()); }
@Test public void when_groupingBy_withDifferentKey() { Entry<String, Integer> entryA = entry("a", 1); Entry<String, Integer> entryB = entry("b", 1); validateOpWithoutDeduct( groupingBy(entryKey()), identity(), entryA, entryB, asMap("a", singletonList(entryA)), asMap("a", singletonList(entryA), "b", singletonList(entryB)), asMap("a", singletonList(entryA), "b", singletonList(entryB)) ); }
@Override public String[] listFiles(URI fileUri, boolean recursive) throws IOException { ImmutableList.Builder<String> builder = ImmutableList.builder(); visitFiles(fileUri, recursive, s3Object -> { // TODO: Looks like S3PinotFS filters out directories, inconsistent with the other implementations. // Only add files and not directories if (!s3Object.key().equals(fileUri.getPath()) && !s3Object.key().endsWith(DELIMITER)) { builder.add(S3_SCHEME + fileUri.getHost() + DELIMITER + getNormalizedFileKey(s3Object)); } }); String[] listedFiles = builder.build().toArray(new String[0]); LOGGER.info("Listed {} files from URI: {}, is recursive: {}", listedFiles.length, fileUri, recursive); return listedFiles; }
@Test public void testListFilesInBucketNonRecursive() throws Exception { String[] originalFiles = new String[]{"a-list.txt", "b-list.txt", "c-list.txt"}; List<String> expectedFileNames = new ArrayList<>(); for (String fileName : originalFiles) { createEmptyFile("", fileName); expectedFileNames.add(String.format(FILE_FORMAT, SCHEME, BUCKET, fileName)); } String[] actualFiles = _s3PinotFS.listFiles(URI.create(String.format(DIR_FORMAT, SCHEME, BUCKET)), false); actualFiles = Arrays.stream(actualFiles).filter(x -> x.contains("list")).toArray(String[]::new); Assert.assertEquals(actualFiles.length, originalFiles.length); Assert.assertTrue(Arrays.equals(actualFiles, expectedFileNames.toArray())); }
void populateContainedMediaList(UUID jobId, TokensAndUrlAuthData authData) throws IOException, InvalidTokenException, PermissionDeniedException, UploadErrorException, FailedToListAlbumsException, FailedToListMediaItemsException { // This method is only called once at the beginning of the transfer, so we can start by // initializing a new TempMediaData to be store in the job store. TempMediaData tempMediaData = new TempMediaData(jobId); String albumToken = null; AlbumListResponse albumListResponse; MediaItemSearchResponse containedMediaSearchResponse; do { albumListResponse = listAlbums(jobId, authData, Optional.ofNullable(albumToken)); albumToken = albumListResponse.getNextPageToken(); if (albumListResponse.getAlbums() == null) { continue; } for (GoogleAlbum album : albumListResponse.getAlbums()) { String albumId = album.getId(); String photoToken = null; do { containedMediaSearchResponse = listMediaItems(jobId, authData, Optional.of(albumId), Optional.ofNullable(photoToken)); if (containedMediaSearchResponse.getMediaItems() != null) { for (GoogleMediaItem mediaItem : containedMediaSearchResponse.getMediaItems()) { tempMediaData.addContainedPhotoId(mediaItem.getId()); } } photoToken = containedMediaSearchResponse.getNextPageToken(); } while (photoToken != null); } albumToken = albumListResponse.getNextPageToken(); } while (albumToken != null); // TODO: if we see complaints about objects being too large for JobStore in other places, we // should consider putting logic in JobStore itself to handle it InputStream stream = convertJsonToInputStream(tempMediaData); jobStore.create(jobId, createCacheKey(), stream); }
@Test public void populateContainedMediaList() throws IOException, InvalidTokenException, PermissionDeniedException, UploadErrorException, FailedToListAlbumsException, FailedToListMediaItemsException { // Set up an album with two photos setUpSingleAlbum(); when(albumListResponse.getNextPageToken()).thenReturn(null); MediaItemSearchResponse albumMediaResponse = mock(MediaItemSearchResponse.class); GoogleMediaItem firstPhoto = setUpSinglePhoto("some://fake/gphotoapi/uri", "some-upstream-generated-photo-id"); String secondUri = "second uri"; String secondId = "second id"; GoogleMediaItem secondPhoto = setUpSinglePhoto(secondUri, secondId); when(photosInterface.listMediaItems(eq(Optional.of(ALBUM_ID)), any(Optional.class))) .thenReturn(albumMediaResponse); when(albumMediaResponse.getMediaItems()) .thenReturn(new GoogleMediaItem[] {firstPhoto, secondPhoto}); when(albumMediaResponse.getNextPageToken()).thenReturn(null); // Run test googleMediaExporter.populateContainedMediaList(uuid, null); // Check contents of job store ArgumentCaptor<InputStream> inputStreamArgumentCaptor = ArgumentCaptor.forClass(InputStream.class); verify(jobStore).create(eq(uuid), eq("tempMediaData"), inputStreamArgumentCaptor.capture()); TempMediaData tempMediaData = new ObjectMapper().readValue(inputStreamArgumentCaptor.getValue(), TempMediaData.class); assertThat(tempMediaData.lookupContainedPhotoIds()).containsExactly("some-upstream-generated-photo-id", secondId); }
public static Row ofKind(RowKind kind, Object... values) { final Row row = new Row(kind, values.length); for (int i = 0; i < values.length; i++) { row.setField(i, values[i]); } return row; }
@Test void testDeepEqualsAndHashCodePositioned() { final Map<String, byte[]> originalMap = new HashMap<>(); originalMap.put("k1", new byte[] {1, 2, 3}); originalMap.put("k2", new byte[] {3, 4, 6}); final Row originalRow = Row.ofKind( RowKind.INSERT, true, new Integer[] {1, null, 3}, Arrays.asList(1, null, 3), originalMap, Collections.emptyMap(), new int[][] {{1, 2, 3}, {}, {4, 5}}, 1.44); assertThat(originalRow).isEqualTo(originalRow); assertThat(originalRow).hasSameHashCodeAs(originalRow); { // no diff final Row row = Row.ofKind( RowKind.INSERT, true, new Integer[] {1, null, 3}, Arrays.asList(1, null, 3), originalMap, Collections.emptyMap(), new int[][] {{1, 2, 3}, {}, {4, 5}}, 1.44); assertThat(originalRow).isEqualTo(row); assertThat(originalRow).hasSameHashCodeAs(row); } { final Map<String, byte[]> map = new HashMap<>(); map.put("k1", new byte[] {1, 2, 3}); map.put("k2", new byte[] {3, 4, 6}); final Row row = Row.ofKind( RowKind.INSERT, true, new Integer[] {1, null, 3, 99}, // diff here Arrays.asList(1, null, 3), map, Collections.emptyMap(), new int[][] {{1, 2, 3}, {}, {4, 5}}, 1.44); assertThat(originalRow).isNotEqualTo(row); assertThat(originalRow).doesNotHaveSameHashCodeAs(row); } { final Map<String, byte[]> map = new HashMap<>(); map.put("k1", new byte[] {1, 2, 2}); // diff here map.put("k2", new byte[] {3, 4, 6}); final Row row = Row.ofKind( RowKind.INSERT, true, new Integer[] {1, null, 3}, Arrays.asList(1, null, 3), map, Collections.emptyMap(), new int[][] {{1, 2, 3}, {}, {4, 5}}, 1.44); assertThat(originalRow).isNotEqualTo(row); assertThat(originalRow).doesNotHaveSameHashCodeAs(row); } { final Map<String, byte[]> map = new HashMap<>(); map.put("k1", new byte[] {1, 2, 3}); map.put("k2", new byte[] {3, 4, 6}); final Row row = Row.ofKind( RowKind.INSERT, true, new Integer[] {1, null, 3}, Arrays.asList(1, null, 3), map, Collections.emptyMap(), new Integer[][] {{1, 2, 3}, {}, {4, 5}}, // diff here 1.44); assertThat(originalRow).isNotEqualTo(row); assertThat(originalRow).doesNotHaveSameHashCodeAs(row); } }
public boolean tryRollingFile(long interval) { FileSegment fileSegment = this.getFileToWrite(); long timestamp = fileSegment.getMinTimestamp(); if (timestamp != Long.MAX_VALUE && timestamp + interval < System.currentTimeMillis() && fileSegment.getAppendPosition() >= fileSegmentFactory.getStoreConfig().getCommitLogRollingMinimumSize()) { this.rollingNewFile(this.getAppendOffset()); return true; } return false; }
@Test public void tryRollingFileTest() throws InterruptedException { String filePath = MessageStoreUtil.toFilePath(queue); FlatCommitLogFile flatFile = flatFileFactory.createFlatFileForCommitLog(filePath); for (int i = 0; i < 3; i++) { ByteBuffer byteBuffer = MessageFormatUtilTest.buildMockedMessageBuffer(); byteBuffer.putLong(MessageFormatUtil.QUEUE_OFFSET_POSITION, i); Assert.assertEquals(AppendResult.SUCCESS, flatFile.append(byteBuffer, i)); TimeUnit.MILLISECONDS.sleep(2); storeConfig.setCommitLogRollingMinimumSize(byteBuffer.remaining()); Assert.assertTrue(flatFile.tryRollingFile(1)); } Assert.assertEquals(4, flatFile.fileSegmentTable.size()); Assert.assertFalse(flatFile.tryRollingFile(1000)); flatFile.destroy(); }
public static MetadataUpdate fromJson(String json) { return JsonUtil.parse(json, MetadataUpdateParser::fromJson); }
@Test public void testSetSnapshotRefTagFromJsonAllFields_ExplicitNullValues() { String action = MetadataUpdateParser.SET_SNAPSHOT_REF; long snapshotId = 1L; SnapshotRefType type = SnapshotRefType.TAG; String refName = "hank"; Integer minSnapshotsToKeep = null; Long maxSnapshotAgeMs = null; Long maxRefAgeMs = 1L; String json = "{\"action\":\"set-snapshot-ref\",\"ref-name\":\"hank\",\"snapshot-id\":1,\"type\":\"tag\"," + "\"max-ref-age-ms\":1,\"min-snapshots-to-keep\":null,\"max-snapshot-age-ms\":null}"; MetadataUpdate expected = new MetadataUpdate.SetSnapshotRef( refName, snapshotId, type, minSnapshotsToKeep, maxSnapshotAgeMs, maxRefAgeMs); assertEquals(action, expected, MetadataUpdateParser.fromJson(json)); }
public void checkIsDuplicateName(List<String> names) { Set<String> namesSet = new HashSet<>(names); if (namesSet.size() < names.size()) { throw new SectionNameDuplicateException(); } }
@Test void 요청에_중복된_이름이_있다면_에러를_반환한다() { // given List<String> names = List.of("Section1", "Section1", "Section2"); // when // then assertThrows( SectionNameDuplicateException.class, () -> createSectionService.checkIsDuplicateName(names)); }
@Override public String getInstanceStatus() { return client.getInstanceStatus(); }
@Test public void getInstanceStatus() { Assert.assertEquals(status, scRegister.getInstanceStatus()); }
public static String toCloudDuration(ReadableDuration duration) { // Note that since Joda objects use millisecond resolution, we always // produce either no fractional seconds or fractional seconds with // millisecond resolution. long millis = duration.getMillis(); long seconds = millis / 1000; millis = millis % 1000; if (millis == 0) { return String.format("%ds", seconds); } else { return String.format("%d.%03ds", seconds, millis); } }
@Test public void toCloudDurationShouldPrintDurationStrings() { assertEquals("0s", toCloudDuration(Duration.ZERO)); assertEquals("4s", toCloudDuration(Duration.millis(4000))); assertEquals("4.001s", toCloudDuration(Duration.millis(4001))); }
@Override public final char readChar() throws EOFException { final char c = readChar(pos); pos += CHAR_SIZE_IN_BYTES; return c; }
@Test public void testReadCharPosition() throws Exception { char c = in.readChar(0); char expected = Bits.readChar(INIT_DATA, 0, byteOrder == BIG_ENDIAN); assertEquals(expected, c); }
@Override public Result invoke(Invocation invocation) throws RpcException { Result result; String value = getUrl().getMethodParameter( RpcUtils.getMethodName(invocation), MOCK_KEY, Boolean.FALSE.toString()) .trim(); if (ConfigUtils.isEmpty(value)) { // no mock result = this.invoker.invoke(invocation); } else if (value.startsWith(FORCE_KEY)) { if (logger.isWarnEnabled()) { logger.warn( CLUSTER_FAILED_MOCK_REQUEST, "force mock", "", "force-mock: " + RpcUtils.getMethodName(invocation) + " force-mock enabled , url : " + getUrl()); } // force:direct mock result = doMockInvoke(invocation, null); } else { // fail-mock try { result = this.invoker.invoke(invocation); // fix:#4585 if (result.getException() != null && result.getException() instanceof RpcException) { RpcException rpcException = (RpcException) result.getException(); if (rpcException.isBiz()) { throw rpcException; } else { result = doMockInvoke(invocation, rpcException); } } } catch (RpcException e) { if (e.isBiz()) { throw e; } if (logger.isWarnEnabled()) { logger.warn( CLUSTER_FAILED_MOCK_REQUEST, "failed to mock invoke", "", "fail-mock: " + RpcUtils.getMethodName(invocation) + " fail-mock enabled , url : " + getUrl(), e); } result = doMockInvoke(invocation, e); } } return result; }
@Test void testMockInvokerFromOverride_Invoke_checkCompatible_ImplMock3() { URL url = URL.valueOf("remote://1.2.3.4/" + IHelloService.class.getName()) .addParameter( REFER_KEY, URL.encode(PATH_KEY + "=" + IHelloService.class.getName() + "&" + "mock=force")); Invoker<IHelloService> cluster = getClusterInvoker(url); // Configured with mock RpcInvocation invocation = new RpcInvocation(); invocation.setMethodName("getSomething"); Result ret = cluster.invoke(invocation); Assertions.assertEquals("somethingmock", ret.getValue()); }
@Override public TopicAssignment place( PlacementSpec placement, ClusterDescriber cluster ) throws InvalidReplicationFactorException { RackList rackList = new RackList(random, cluster.usableBrokers()); throwInvalidReplicationFactorIfNonPositive(placement.numReplicas()); throwInvalidReplicationFactorIfZero(rackList.numUnfencedBrokers()); throwInvalidReplicationFactorIfTooFewBrokers(placement.numReplicas(), rackList.numTotalBrokers()); List<List<Integer>> placements = new ArrayList<>(placement.numPartitions()); for (int partition = 0; partition < placement.numPartitions(); partition++) { placements.add(rackList.place(placement.numReplicas())); } return new TopicAssignment( placements.stream().map(replicas -> new PartitionAssignment(replicas, cluster)).collect(Collectors.toList()) ); }
@Test public void testRackListWithMultipleRacks() { MockRandom random = new MockRandom(); RackList rackList = new RackList(random, Arrays.asList( new UsableBroker(11, Optional.of("1"), false), new UsableBroker(10, Optional.of("1"), false), new UsableBroker(30, Optional.of("3"), false), new UsableBroker(31, Optional.of("3"), false), new UsableBroker(21, Optional.of("2"), false), new UsableBroker(20, Optional.of("2"), true)).iterator()); assertEquals(6, rackList.numTotalBrokers()); assertEquals(5, rackList.numUnfencedBrokers()); assertEquals(Arrays.asList(Optional.of("1"), Optional.of("2"), Optional.of("3")), rackList.rackNames()); assertEquals(Arrays.asList(11, 21, 31, 10), rackList.place(4)); assertEquals(Arrays.asList(21, 30, 10, 20), rackList.place(4)); assertEquals(Arrays.asList(31, 11, 21, 30), rackList.place(4)); }
@Nullable @Override public Message decode(@Nonnull final RawMessage rawMessage) { final GELFMessage gelfMessage = new GELFMessage(rawMessage.getPayload(), rawMessage.getRemoteAddress()); final String json = gelfMessage.getJSON(decompressSizeLimit, charset); final JsonNode node; try { node = objectMapper.readTree(json); if (node == null) { throw new IOException("null result"); } } catch (final Exception e) { log.error("Could not parse JSON, first 400 characters: " + StringUtils.abbreviate(json, 403), e); throw new IllegalStateException("JSON is null/could not be parsed (invalid JSON)", e); } try { validateGELFMessage(node, rawMessage.getId(), rawMessage.getRemoteAddress()); } catch (IllegalArgumentException e) { log.trace("Invalid GELF message <{}>", node); throw e; } // Timestamp. final double messageTimestamp = timestampValue(node); final DateTime timestamp; if (messageTimestamp <= 0) { timestamp = rawMessage.getTimestamp(); } else { // we treat this as a unix timestamp timestamp = Tools.dateTimeFromDouble(messageTimestamp); } final Message message = messageFactory.createMessage( stringValue(node, "short_message"), stringValue(node, "host"), timestamp ); message.addField(Message.FIELD_FULL_MESSAGE, stringValue(node, "full_message")); final String file = stringValue(node, "file"); if (file != null && !file.isEmpty()) { message.addField("file", file); } final long line = longValue(node, "line"); if (line > -1) { message.addField("line", line); } // Level is set by server if not specified by client. final int level = intValue(node, "level"); if (level > -1) { message.addField("level", level); } // Facility is set by server if not specified by client. final String facility = stringValue(node, "facility"); if (facility != null && !facility.isEmpty()) { message.addField("facility", facility); } // Add additional data if there is some. final Iterator<Map.Entry<String, JsonNode>> fields = node.fields(); while (fields.hasNext()) { final Map.Entry<String, JsonNode> entry = fields.next(); String key = entry.getKey(); // Do not index useless GELF "version" field. if ("version".equals(key)) { continue; } // Don't include GELF syntax underscore in message field key. if (key.startsWith("_") && key.length() > 1) { key = key.substring(1); } // We already set short_message and host as message and source. Do not add as fields again. if ("short_message".equals(key) || "host".equals(key)) { continue; } // Skip standard or already set fields. if (message.getField(key) != null || Message.RESERVED_FIELDS.contains(key) && !Message.RESERVED_SETTABLE_FIELDS.contains(key)) { continue; } // Convert JSON containers to Strings, and pick a suitable number representation. final JsonNode value = entry.getValue(); final Object fieldValue; if (value.isContainerNode()) { fieldValue = value.toString(); } else if (value.isFloatingPointNumber()) { fieldValue = value.asDouble(); } else if (value.isIntegralNumber()) { fieldValue = value.asLong(); } else if (value.isNull()) { log.debug("Field [{}] is NULL. Skipping.", key); continue; } else if (value.isTextual()) { fieldValue = value.asText(); } else { log.debug("Field [{}] has unknown value type. Skipping.", key); continue; } message.addField(key, fieldValue); } return message; }
@Test public void decodeIncludesSourceAddressIfItFails() throws Exception { final String json = "{" + "\"version\": \"1.1\"," + "\"host\": \"example.org\"" + "}"; final RawMessage rawMessage = new RawMessage(json.getBytes(StandardCharsets.UTF_8), new InetSocketAddress("198.51.100.42", 24783)); assertThatIllegalArgumentException().isThrownBy(() -> codec.decode(rawMessage)) .withNoCause() .withMessageMatching("GELF message <[0-9a-f-]+> \\(received from <198\\.51\\.100\\.42:24783>\\) is missing mandatory \"short_message\" or \"message\" field."); }
@Override public void execute(ComputationStep.Context context) { executeForBranch(treeRootHolder.getRoot()); }
@Test public void givenRuleTextResolverException_whenEventStep_thenLogAndContinue() { // given logTester.setLevel(Level.ERROR); QualityProfile existingQP = qp(QP_NAME_1, LANGUAGE_KEY_1, BEFORE_DATE); QualityProfile newQP = qp(QP_NAME_1, LANGUAGE_KEY_1, AFTER_DATE); // mock updated profile qProfileStatusRepository.register(newQP.getQpKey(), UPDATED); mockQualityProfileMeasures(treeRootHolder.getRoot(), arrayOf(existingQP), arrayOf(newQP)); when(qualityProfileRuleChangeTextResolver.mapChangeToNumberOfRules(newQP, treeRootHolder.getRoot().getUuid())).thenThrow(new RuntimeException("error")); var context = new TestComputationStepContext(); // when underTest.execute(context); // then assertThat(logTester.logs(Level.ERROR)).containsExactly("Failed to generate 'change' event for Quality Profile " + newQP.getQpKey()); verify(eventRepository, never()).add(any(Event.class)); }
public static Read<JmsRecord> read() { return new AutoValue_JmsIO_Read.Builder<JmsRecord>() .setMaxNumRecords(Long.MAX_VALUE) .setCoder(SerializableCoder.of(JmsRecord.class)) .setCloseTimeout(DEFAULT_CLOSE_TIMEOUT) .setRequiresDeduping(false) .setMessageMapper( new MessageMapper<JmsRecord>() { @Override public JmsRecord mapMessage(Message message) throws Exception { TextMessage textMessage = (TextMessage) message; Map<String, Object> properties = new HashMap<>(); @SuppressWarnings("rawtypes") Enumeration propertyNames = textMessage.getPropertyNames(); while (propertyNames.hasMoreElements()) { String propertyName = (String) propertyNames.nextElement(); properties.put(propertyName, textMessage.getObjectProperty(propertyName)); } return new JmsRecord( textMessage.getJMSMessageID(), textMessage.getJMSTimestamp(), textMessage.getJMSCorrelationID(), textMessage.getJMSReplyTo(), textMessage.getJMSDestination(), textMessage.getJMSDeliveryMode(), textMessage.getJMSRedelivered(), textMessage.getJMSType(), textMessage.getJMSExpiration(), textMessage.getJMSPriority(), properties, textMessage.getText()); } }) .build(); }
@Test public void testDiscardCheckpointMark() throws Exception { Connection connection = connectionFactoryWithSyncAcksAndWithoutPrefetch.createConnection(USERNAME, PASSWORD); connection.start(); Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE); MessageProducer producer = session.createProducer(session.createQueue(QUEUE)); for (int i = 0; i < 10; i++) { producer.send(session.createTextMessage("test " + i)); } producer.close(); session.close(); connection.close(); JmsIO.Read spec = JmsIO.read() .withConnectionFactory(connectionFactoryWithSyncAcksAndWithoutPrefetch) .withUsername(USERNAME) .withPassword(PASSWORD) .withQueue(QUEUE); JmsIO.UnboundedJmsSource source = new JmsIO.UnboundedJmsSource(spec); JmsIO.UnboundedJmsReader reader = source.createReader(PipelineOptionsFactory.create(), null); // start the reader and move to the first record assertTrue(reader.start()); // consume 3 more messages (NB: start already consumed the first message) for (int i = 0; i < 3; i++) { assertTrue(reader.advance()); } // the messages are still pending in the queue (no ACK yet) assertEquals(10, count(QUEUE)); // we finalize the checkpoint reader.getCheckpointMark().finalizeCheckpoint(); // the checkpoint finalize ack the messages, and so they are not pending in the queue anymore assertEquals(6, count(QUEUE)); // we read the 6 pending messages for (int i = 0; i < 6; i++) { assertTrue(reader.advance()); } // still 6 pending messages as we didn't finalize the checkpoint assertEquals(6, count(QUEUE)); // But here we discard the pending checkpoint reader.checkpointMarkPreparer.discard(); // we finalize the checkpoint: no messages should be acked reader.getCheckpointMark().finalizeCheckpoint(); assertEquals(6, count(QUEUE)); }
public TolerantDoubleComparison isNotWithin(double tolerance) { return new TolerantDoubleComparison() { @Override public void of(double expected) { Double actual = DoubleSubject.this.actual; checkNotNull( actual, "actual value cannot be null. tolerance=%s expected=%s", tolerance, expected); checkTolerance(tolerance); if (!notEqualWithinTolerance(actual, expected, tolerance)) { failWithoutActual( fact("expected not to be", doubleToString(expected)), butWas(), fact("within tolerance", doubleToString(tolerance))); } } }; }
@Test public void isNotWithinOf() { assertThatIsNotWithinFails(2.0, 0.0, 2.0); assertThatIsNotWithinFails(2.0, 0.00001, 2.0); assertThatIsNotWithinFails(2.0, 1000.0, 2.0); assertThatIsNotWithinFails(2.0, 1.00001, 3.0); assertThat(2.0).isNotWithin(0.99999).of(3.0); assertThat(2.0).isNotWithin(1000.0).of(1003.0); assertThatIsNotWithinFails(2.0, 0.0, Double.POSITIVE_INFINITY); assertThatIsNotWithinFails(2.0, 0.0, Double.NaN); assertThatIsNotWithinFails(Double.NEGATIVE_INFINITY, 1000.0, 2.0); assertThatIsNotWithinFails(Double.NaN, 1000.0, 2.0); }
public static Throwable getThrowable(String throwstr) { Throwable throwable = THROWABLE_MAP.get(throwstr); if (throwable != null) { return throwable; } try { Throwable t; Class<?> bizException = ReflectUtils.forName(throwstr); Constructor<?> constructor; constructor = ReflectUtils.findConstructor(bizException, String.class); t = (Throwable) constructor.newInstance(new Object[] {"mocked exception for service degradation."}); if (THROWABLE_MAP.size() < 1000) { THROWABLE_MAP.put(throwstr, t); } return t; } catch (Exception e) { throw new RpcException("mock throw error :" + throwstr + " argument error.", e); } }
@Test void testGetThrowable() { Assertions.assertThrows(RpcException.class, () -> MockInvoker.getThrowable("Exception.class")); }
public URL getInterNodeListener( final Function<URL, Integer> portResolver ) { return getInterNodeListener(portResolver, LOGGER); }
@Test public void shouldUseExplicitInterNodeListenerIfSetToIpv4Loopback() { // Given: final URL expected = url("https://127.0.0.2:12345"); final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.<String, Object>builder() .putAll(MIN_VALID_CONFIGS) .put(ADVERTISED_LISTENER_CONFIG, expected.toString()) .build() ); // When: final URL actual = config.getInterNodeListener(portResolver, logger); // Then: assertThat(actual, is(expected)); verifyLogsInterNodeListener(expected, QUOTED_INTER_NODE_LISTENER_CONFIG); verifyLogsLoopBackWarning(expected, QUOTED_INTER_NODE_LISTENER_CONFIG); verifyNoMoreInteractions(logger); }
@Override public int deleteAll() throws FileSystemException { return requireResolvedFileObject().deleteAll(); }
@Test public void testDelegatesDeleteAll() throws FileSystemException { fileObject.deleteAll(); verify( resolvedFileObject, times( 1 ) ).deleteAll(); }
public abstract void filter(Metadata metadata) throws TikaException;
@Test public void testConfigIncludeAndUCFilter() throws Exception { TikaConfig config = getConfig("TIKA-3137-include-uc.xml"); String[] expectedTitles = new String[]{"TITLE1", "TITLE2", "TITLE3"}; Metadata metadata = new Metadata(); metadata.add("title", "title1"); metadata.add("title", "title2"); metadata.add("title", "title3"); metadata.set("author", "author"); metadata.set("content", "content"); config.getMetadataFilter().filter(metadata); assertEquals(2, metadata.size()); assertArrayEquals(expectedTitles, metadata.getValues("title")); assertEquals("AUTHOR", metadata.get("author")); }
@Override public T deserialize(final String topic, final byte[] bytes) { try { if (bytes == null) { return null; } // don't use the JsonSchemaConverter to read this data because // we require that the MAPPER enables USE_BIG_DECIMAL_FOR_FLOATS, // which is not currently available in the standard converters final JsonNode value = isJsonSchema ? JsonSerdeUtils.readJsonSR(bytes, MAPPER, JsonNode.class) : MAPPER.readTree(bytes); final Object coerced = enforceFieldType( "$", new JsonValueContext(value, schema) ); if (LOG.isTraceEnabled()) { LOG.trace("Deserialized {}. topic:{}, row:{}", target, topic, coerced); } return SerdeUtils.castToTargetType(coerced, targetType); } catch (final Exception e) { // Clear location in order to avoid logging data, for security reasons if (e instanceof JsonParseException) { ((JsonParseException) e).clearLocation(); } throw new SerializationException( "Failed to deserialize " + target + " from topic: " + topic + ". " + e.getMessage(), e); } }
@Test public void shouldThrowIfCanNotCoerceToTime() { // Given: final KsqlJsonDeserializer<java.sql.Time> deserializer = givenDeserializerForSchema(Time.SCHEMA, java.sql.Time.class); final byte[] bytes = serializeJson(BooleanNode.valueOf(true)); // When: final Exception e = assertThrows( SerializationException.class, () -> deserializer.deserialize(SOME_TOPIC, bytes) ); // Then: assertThat(e.getCause(), (hasMessage(startsWith( "Can't convert type. sourceType: BooleanNode, requiredType: TIME")))); }
@Override @SuppressWarnings({"unchecked", "rawtypes"}) public RouteContext route(final ConnectionContext connectionContext, final QueryContext queryContext, final RuleMetaData globalRuleMetaData, final ShardingSphereDatabase database) { RouteContext result = new RouteContext(); Optional<String> dataSourceName = findDataSourceByHint(queryContext.getHintValueContext(), database.getResourceMetaData().getStorageUnits()); if (dataSourceName.isPresent()) { result.getRouteUnits().add(new RouteUnit(new RouteMapper(dataSourceName.get(), dataSourceName.get()), Collections.emptyList())); return result; } for (Entry<ShardingSphereRule, SQLRouter> entry : routers.entrySet()) { if (result.getRouteUnits().isEmpty() && entry.getValue() instanceof EntranceSQLRouter) { result = ((EntranceSQLRouter) entry.getValue()).createRouteContext(queryContext, globalRuleMetaData, database, entry.getKey(), props, connectionContext); } else if (entry.getValue() instanceof DecorateSQLRouter) { ((DecorateSQLRouter) entry.getValue()).decorateRouteContext(result, queryContext, database, entry.getKey(), props, connectionContext); } } if (result.getRouteUnits().isEmpty() && 1 == database.getResourceMetaData().getStorageUnits().size()) { String singleDataSourceName = database.getResourceMetaData().getStorageUnits().keySet().iterator().next(); result.getRouteUnits().add(new RouteUnit(new RouteMapper(singleDataSourceName, singleDataSourceName), Collections.emptyList())); } return result; }
@Test void assertRouteBySQLCommentHint() { when(hintValueContext.findHintDataSourceName()).thenReturn(Optional.of("ds_1")); QueryContext queryContext = new QueryContext(commonSQLStatementContext, "", Collections.emptyList(), hintValueContext, connectionContext, metaData); RouteContext routeContext = partialSQLRouteExecutor.route(connectionContext, queryContext, mock(RuleMetaData.class), database); assertThat(routeContext.getRouteUnits().size(), is(1)); assertThat(routeContext.getRouteUnits().iterator().next().getDataSourceMapper().getActualName(), is("ds_1")); }
public static ViewFn<?, ?> viewFnFromProto(RunnerApi.FunctionSpec viewFn) throws InvalidProtocolBufferException { RunnerApi.FunctionSpec spec = viewFn; checkArgument( spec.getUrn().equals(ParDoTranslation.CUSTOM_JAVA_VIEW_FN_URN), "Can't deserialize unknown %s type %s", ViewFn.class.getSimpleName(), spec.getUrn()); return (ViewFn<?, ?>) SerializableUtils.deserializeFromByteArray( spec.getPayload().toByteArray(), "Custom ViewFn"); }
@Test public void testViewFnTranslation() throws Exception { SdkComponents sdkComponents = SdkComponents.create(); sdkComponents.registerEnvironment(Environments.createDockerEnvironment("java")); assertEquals( new TestViewFn(), PCollectionViewTranslation.viewFnFromProto( ParDoTranslation.translateViewFn(new TestViewFn(), sdkComponents))); }
public static Gson instance() { return SingletonHolder.INSTANCE; }
@Test void rejectsDeserializationOfGoCipher() { final IllegalArgumentException e = assertThrows(IllegalArgumentException.class, () -> Serialization.instance().fromJson("{}", GoCipher.class)); assertEquals(format("Refusing to deserialize a %s in the JSON stream!", GoCipher.class.getName()), e.getMessage()); }
@Override public ResultSet getFunctionColumns(final String catalog, final String schemaPattern, final String functionNamePattern, final String columnNamePattern) throws SQLException { return createDatabaseMetaDataResultSet(getDatabaseMetaData().getFunctionColumns(getActualCatalog(catalog), getActualSchema(schemaPattern), functionNamePattern, columnNamePattern)); }
@Test void assertGetFunctionColumns() throws SQLException { when(databaseMetaData.getFunctionColumns("test", null, null, null)).thenReturn(resultSet); assertThat(shardingSphereDatabaseMetaData.getFunctionColumns("test", null, null, null), instanceOf(DatabaseMetaDataResultSet.class)); }
@Override public void addMeasure(String metricKey, int value) { Metric metric = metricRepository.getByKey(metricKey); validateAddMeasure(metric); measureRepository.add(internalComponent, metric, newMeasureBuilder().create(value)); }
@Test public void fail_with_IAE_when_add_measure_is_called_on_metric_not_in_output_list() { assertThatThrownBy(() -> { MeasureComputerContextImpl underTest = newContext(PROJECT_REF, NCLOC_KEY, INT_METRIC_KEY); underTest.addMeasure(DOUBLE_METRIC_KEY, 10); }) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Only metrics in [int_metric_key] can be used to add measures. Metric 'double_metric_key' is not allowed."); }
public static String colorToAlphaHexCode(final Color color) { return String.format("%08x", color.getRGB()); }
@Test public void colorToAlphaHexCode() { COLOR_ALPHA_HEXSTRING_MAP.forEach((color, hex) -> { assertEquals(hex, ColorUtil.colorToAlphaHexCode(color)); }); }
public static KdbTree buildKdbTree(int maxItemsPerNode, List<Rectangle> items) { checkArgument(maxItemsPerNode > 0, "maxItemsPerNode must be > 0"); requireNonNull(items, "items is null"); return new KdbTree(buildKdbTreeNode(maxItemsPerNode, 0, Rectangle.getUniverseRectangle(), items, new LeafIdAllocator())); }
@Test public void testSerde() { ImmutableList.Builder<Rectangle> rectangles = ImmutableList.builder(); for (double x = 0; x < 10; x += 1) { for (double y = 0; y < 5; y += 1) { rectangles.add(new Rectangle(x, y, x + 0.1, y + 0.2)); } } testSerializationRoundtrip(buildKdbTree(100, rectangles.build())); testSerializationRoundtrip(buildKdbTree(20, rectangles.build())); testSerializationRoundtrip(buildKdbTree(10, rectangles.build())); }
@Override public <T> T convert(DataTable dataTable, Type type) { return convert(dataTable, type, false); }
@Test void convert_to_optional_of_object__must_have_optional_converter() { DataTable table = parse("", " | | 1 | 2 | 3 |", " | A | ♘ | | ♝ |", " | B | | | |", " | C | | ♝ | |"); registry.defineDataTableType(new DataTableType(ChessBoard.class, CHESS_BOARD_TABLE_TRANSFORMER)); UndefinedDataTableTypeException exception = assertThrows( UndefinedDataTableTypeException.class, () -> converter.convert(table, OPTIONAL_CHESS_BOARD_TYPE)); assertThat(exception.getMessage(), is("" + "Can't convert DataTable to io.cucumber.datatable.DataTableTypeRegistryTableConverterTest$ChessBoard.\n" + "Please review these problems:\n" + "\n" + " - There was no table entry or table row transformer registered for io.cucumber.datatable.DataTableTypeRegistryTableConverterTest$ChessBoard.\n" + " Please consider registering a table entry or row transformer.\n" + "\n" + " - There was no default table entry transformer registered to transform io.cucumber.datatable.DataTableTypeRegistryTableConverterTest$ChessBoard.\n" + " Please consider registering a default table entry transformer.\n" + "\n" + "Note: Usually solving one is enough")); }
@Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof IndexMeta)) { return false; } IndexMeta indexMeta = (IndexMeta)o; if (!ArrayUtils.isEquals(indexMeta.values, this.values)) { return false; } if (!Objects.equals(indexMeta.nonUnique, this.nonUnique)) { return false; } if (!Objects.equals(indexMeta.indexQualifier, this.indexQualifier)) { return false; } if (!Objects.equals(indexMeta.indexName, this.indexName)) { return false; } if (!Objects.equals(indexMeta.type, this.type)) { return false; } if (!Objects.equals(indexMeta.indextype.value(), this.indextype.value())) { return false; } if (!Objects.equals(indexMeta.ascOrDesc, this.ascOrDesc)) { return false; } if (!Objects.equals(indexMeta.ordinalPosition, this.ordinalPosition)) { return false; } return true; }
@Test public void testEqualsSameObject() { IndexMeta indexMeta = new IndexMeta(); assertTrue(indexMeta.equals(indexMeta), "An object should be equal to itself"); }
@VisibleForTesting String buildIndexName(final int number) { return config.indexPrefix() + SEPARATOR + number; }
@Test public void testBuildIndexName() { assertEquals("graylog_0", mongoIndexSet.buildIndexName(0)); assertEquals("graylog_1", mongoIndexSet.buildIndexName(1)); assertEquals("graylog_9001", mongoIndexSet.buildIndexName(9001)); }
public static void setFieldValue(Object obj, String fieldName, Object value) throws RuntimeException { Assert.notNull(obj); Assert.notBlank(fieldName); final Field field = getField((obj instanceof Class) ? (Class<?>) obj : obj.getClass(), fieldName); Assert.notNull(field, "Field [" + fieldName + "] is not exist in [" + obj.getClass().getName() + "]"); setFieldValue(obj, field, value); }
@Test public void setFieldValueTest() { TestClass testClass = new TestClass(); ReflectUtil.setFieldValue(testClass, "field", "fieldVal"); Assert.assertEquals("fieldVal", testClass.getField()); Field privateField = ReflectUtil.getField(TestSubClass.class, "privateField"); ReflectUtil.setFieldValue(testClass, privateField, "privateFieldVal"); Assert.assertEquals("privateFieldVal", testClass.getPrivateField()); }
public static ComposeCombineFnBuilder compose() { return new ComposeCombineFnBuilder(); }
@Test public void testComposedCombineDisplayData() { SimpleFunction<String, String> extractFn = new SimpleFunction<String, String>() { @Override public String apply(String input) { return input; } }; DisplayDataCombineFn combineFn1 = new DisplayDataCombineFn("value1"); DisplayDataCombineFn combineFn2 = new DisplayDataCombineFn("value2"); CombineFns.ComposedCombineFn<String> composedCombine = CombineFns.compose() .with(extractFn, combineFn1, new TupleTag<>()) .with(extractFn, combineFn2, new TupleTag<>()); DisplayData displayData = DisplayData.from(composedCombine); assertThat(displayData, hasDisplayItem("combineFn1", combineFn1.getClass())); assertThat(displayData, hasDisplayItem("combineFn2", combineFn2.getClass())); assertThat(displayData, includesDisplayDataFor("combineFn1", combineFn1)); assertThat(displayData, includesDisplayDataFor("combineFn2", combineFn2)); }
@Override public JavaKeyStore load(SecureConfig config) { if (!exists(config)) { throw new SecretStoreException.LoadException( String.format("Can not find Logstash keystore at %s. Please verify this file exists and is a valid Logstash keystore.", config.getPlainText("keystore.file") == null ? "<undefined>" : new String(config.getPlainText("keystore.file")))); } try { init(config); lock.lock(); try (final InputStream is = Files.newInputStream(keyStorePath)) { try { keyStore.load(is, this.keyStorePass); } catch (IOException ioe) { if (ioe.getCause() instanceof UnrecoverableKeyException) { throw new SecretStoreException.AccessException( String.format("Can not access Logstash keystore at %s. Please verify correct file permissions and keystore password.", keyStorePath.toAbsolutePath()), ioe); } else { throw new SecretStoreException.LoadException(String.format("Found a file at %s, but it is not a valid Logstash keystore.", keyStorePath.toAbsolutePath().toString()), ioe); } } byte[] marker = retrieveSecret(LOGSTASH_MARKER); if (marker == null) { throw new SecretStoreException.LoadException(String.format("Found a keystore at %s, but it is not a Logstash keystore.", keyStorePath.toAbsolutePath().toString())); } LOGGER.debug("Using existing keystore at {}", keyStorePath.toAbsolutePath()); return this; } } catch (SecretStoreException sse) { throw sse; } catch (Exception e) { //should never happen throw new SecretStoreException.UnknownException("Error while trying to load the Logstash keystore", e); } finally { releaseLock(lock); config.clearValues(); } }
@Test public void tamperedKeystore() throws Exception { byte[] keyStoreAsBytes = Files.readAllBytes(Paths.get(new String(keyStorePath))); //bump the middle byte by 1 int tamperLocation = keyStoreAsBytes.length / 2; keyStoreAsBytes[tamperLocation] = (byte) (keyStoreAsBytes[tamperLocation] + 1); Path tamperedPath = folder.newFolder().toPath().resolve("tampered.logstash.keystore"); Files.write(tamperedPath, keyStoreAsBytes); SecureConfig sc = new SecureConfig(); sc.add("keystore.file", tamperedPath.toString().toCharArray()); assertThrows(SecretStoreException.class, () -> { new JavaKeyStore().load(sc); }); }
@Override public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { super.onDataReceived(device, data); if (data.size() < 3) { onInvalidDataReceived(device, data); return; } final int opCode = data.getIntValue(Data.FORMAT_UINT8, 0); if (opCode != OP_CODE_NUMBER_OF_STORED_RECORDS_RESPONSE && opCode != OP_CODE_RESPONSE_CODE) { onInvalidDataReceived(device, data); return; } final int operator = data.getIntValue(Data.FORMAT_UINT8, 1); if (operator != OPERATOR_NULL) { onInvalidDataReceived(device, data); return; } switch (opCode) { case OP_CODE_NUMBER_OF_STORED_RECORDS_RESPONSE -> { // Field size is defined per service int numberOfRecords; switch (data.size() - 2) { case 1 -> numberOfRecords = data.getIntValue(Data.FORMAT_UINT8, 2); case 2 -> numberOfRecords = data.getIntValue(Data.FORMAT_UINT16_LE, 2); case 4 -> numberOfRecords = data.getIntValue(Data.FORMAT_UINT32_LE, 2); default -> { // Other field sizes are not supported onInvalidDataReceived(device, data); return; } } onNumberOfRecordsReceived(device, numberOfRecords); } case OP_CODE_RESPONSE_CODE -> { if (data.size() != 4) { onInvalidDataReceived(device, data); return; } final int requestCode = data.getIntValue(Data.FORMAT_UINT8, 2); final int responseCode = data.getIntValue(Data.FORMAT_UINT8, 3); if (responseCode == RACP_RESPONSE_SUCCESS) { onRecordAccessOperationCompleted(device, requestCode); } else if (responseCode == RACP_ERROR_NO_RECORDS_FOUND) { onRecordAccessOperationCompletedWithNoRecordsFound(device, requestCode); } else { onRecordAccessOperationError(device, requestCode, responseCode); } } } }
@Test public void onNumberOfRecordsReceived_uint8() { final Data data = new Data(new byte[] { 5, 0, 1 }); callback.onDataReceived(null, data); assertEquals(numberOfRecords, 1); }
@Deprecated public static String getJwt(JwtClaims claims) throws JoseException { String jwt; RSAPrivateKey privateKey = (RSAPrivateKey) getPrivateKey( jwtConfig.getKey().getFilename(),jwtConfig.getKey().getPassword(), jwtConfig.getKey().getKeyName()); // A JWT is a JWS and/or a JWE with JSON claims as the payload. // In this example it is a JWS nested inside a JWE // So we first create a JsonWebSignature object. JsonWebSignature jws = new JsonWebSignature(); // The payload of the JWS is JSON content of the JWT Claims jws.setPayload(claims.toJson()); // The JWT is signed using the sender's private key jws.setKey(privateKey); // Get provider from security config file, it should be two digit // And the provider id will set as prefix for keyid in the token header, for example: 05100 // if there is no provider id, we use "00" for the default value String provider_id = ""; if (jwtConfig.getProviderId() != null) { provider_id = jwtConfig.getProviderId(); if (provider_id.length() == 1) { provider_id = "0" + provider_id; } else if (provider_id.length() > 2) { logger.error("provider_id defined in the security.yml file is invalid; the length should be 2"); provider_id = provider_id.substring(0, 2); } } jws.setKeyIdHeaderValue(provider_id + jwtConfig.getKey().getKid()); // Set the signature algorithm on the JWT/JWS that will integrity protect the claims jws.setAlgorithmHeaderValue(AlgorithmIdentifiers.RSA_USING_SHA256); // Sign the JWS and produce the compact serialization, which will be the inner JWT/JWS // representation, which is a string consisting of three dot ('.') separated // base64url-encoded parts in the form Header.Payload.Signature jwt = jws.getCompactSerialization(); return jwt; }
@Test public void longLivedPetStoreJwt() throws Exception { JwtClaims claims = ClaimsUtil.getTestClaims("steve", "EMPLOYEE", "f7d42348-c647-4efb-a52d-4c5787421e72", Arrays.asList("write:pets", "read:pets"), "user"); claims.setExpirationTimeMinutesInTheFuture(5256000); String jwt = JwtIssuer.getJwt(claims, long_kid, KeyUtil.deserializePrivateKey(long_key, KeyUtil.RSA)); System.out.println("***LongLived PetStore JWT***: " + jwt); }
@Bean public CorsFilter corsFilter() { UrlBasedCorsConfigurationSource source = new UrlBasedCorsConfigurationSource(); CorsConfiguration config = jHipsterProperties.getCors(); if (!CollectionUtils.isEmpty(config.getAllowedOrigins()) || !CollectionUtils.isEmpty(config.getAllowedOriginPatterns())) { log.debug("Registering CORS filter"); source.registerCorsConfiguration("/api/**", config); source.registerCorsConfiguration("/management/**", config); source.registerCorsConfiguration("/v3/api-docs", config); source.registerCorsConfiguration("/swagger-ui/**", config); } return new CorsFilter(source); }
@Test void shouldCorsFilterOnApiPath() throws Exception { props.getCors().setAllowedOrigins(Collections.singletonList("other.domain.com")); props.getCors().setAllowedMethods(Arrays.asList("GET", "POST", "PUT", "DELETE")); props.getCors().setAllowedHeaders(Collections.singletonList("*")); props.getCors().setMaxAge(1800L); props.getCors().setAllowCredentials(true); MockMvc mockMvc = MockMvcBuilders.standaloneSetup(new WebConfigurerTestController()).addFilters(webConfigurer.corsFilter()).build(); mockMvc .perform( options("/api/test-cors") .header(HttpHeaders.ORIGIN, "other.domain.com") .header(HttpHeaders.ACCESS_CONTROL_REQUEST_METHOD, "POST") ) .andExpect(status().isOk()) .andExpect(header().string(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN, "other.domain.com")) .andExpect(header().string(HttpHeaders.VARY, "Origin")) .andExpect(header().string(HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS, "GET,POST,PUT,DELETE")) .andExpect(header().string(HttpHeaders.ACCESS_CONTROL_ALLOW_CREDENTIALS, "true")) .andExpect(header().string(HttpHeaders.ACCESS_CONTROL_MAX_AGE, "1800")); mockMvc .perform(get("/api/test-cors").header(HttpHeaders.ORIGIN, "other.domain.com")) .andExpect(status().isOk()) .andExpect(header().string(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN, "other.domain.com")); }
public SchemaMapping fromArrow(Schema arrowSchema) { List<Field> fields = arrowSchema.getFields(); List<TypeMapping> parquetFields = fromArrow(fields); MessageType parquetType = addToBuilder(parquetFields, Types.buildMessage()).named("root"); return new SchemaMapping(arrowSchema, parquetType, parquetFields); }
@Test public void testArrowTimestampMillisecondToParquet() { MessageType expected = converter .fromArrow(new Schema(asList(field("a", new ArrowType.Timestamp(TimeUnit.MILLISECOND, "UTC"))))) .getParquetSchema(); Assert.assertEquals( expected, Types.buildMessage() .addField(Types.optional(INT64) .as(LogicalTypeAnnotation.timestampType(true, MILLIS)) .named("a")) .named("root")); }
@UdafFactory(description = "collect values of a field into a single Array") public static <T> TableUdaf<T, List<T>, List<T>> createCollectListT() { return new Collect<>(); }
@Test public void shouldMergeIntLists() { final TableUdaf<Integer, List<Integer>, List<Integer>> udaf = CollectListUdaf.createCollectListT(); List<Integer> lhs = udaf.initialize(); final Integer[] lhsValues = new Integer[] {1, 2, null, 3}; for (final Integer i : lhsValues) { lhs = udaf.aggregate(i, lhs); } assertThat(lhs, contains(1, 2, null, 3)); List<Integer> rhs = udaf.initialize(); final Integer[] rhsValues = new Integer[] {2, null, 3, 4, 5, 6}; for (final Integer i : rhsValues) { rhs = udaf.aggregate(i, rhs); } assertThat(rhs, contains(2, null, 3, 4, 5, 6)); final List<Integer> merged = udaf.merge(lhs, rhs); assertThat(merged, contains(1, 2, null, 3, 2, null, 3, 4, 5, 6)); }
static PodSecurityProvider findProviderOrThrow(String providerClass) { ServiceLoader<PodSecurityProvider> loader = ServiceLoader.load(PodSecurityProvider.class); for (PodSecurityProvider provider : loader) { if (providerClass.equals(provider.getClass().getCanonicalName())) { LOGGER.info("Found PodSecurityProvider {}", providerClass); return provider; } } // The provider was not found LOGGER.warn("PodSecurityProvider {} was not found. Available providers are {}", providerClass, loader.stream().map(p -> p.getClass().getCanonicalName()).collect(Collectors.toSet())); throw new InvalidConfigurationException("PodSecurityProvider " + providerClass + " was not found."); }
@Test public void testMissingClass() { Exception ex = assertThrows(InvalidConfigurationException.class, () -> PodSecurityProviderFactory.findProviderOrThrow("my.package.MyCustomPodSecurityProvider")); assertThat(ex.getMessage(), is("PodSecurityProvider my.package.MyCustomPodSecurityProvider was not found.")); }
public static KiePMMLFieldOperatorValue getConstraintEntryFromSimplePredicates(final String fieldName, final BOOLEAN_OPERATOR containerOperator, final List<SimplePredicate> simplePredicates, final Map<String, KiePMMLOriginalTypeGeneratedType> fieldTypeMap) { List<KiePMMLOperatorValue> kiePMMLOperatorValues = simplePredicates .stream() .map(simplePredicate -> new KiePMMLOperatorValue(OPERATOR.byName(simplePredicate.getOperator().value()), getCorrectlyFormattedObject(simplePredicate, fieldTypeMap))) .collect(Collectors.toList()); return new KiePMMLFieldOperatorValue(fieldName, containerOperator, kiePMMLOperatorValues, null); }
@Test void getConstraintEntryFromSimplePredicates() { final Map<String, KiePMMLOriginalTypeGeneratedType> fieldTypeMap = new HashMap<>(); String fieldName = "FIELD_NAME"; List<SimplePredicate> simplePredicates = IntStream.range(0, 2) .mapToObj(index -> { fieldTypeMap.put(fieldName, new KiePMMLOriginalTypeGeneratedType(DataType.STRING.value(), getSanitizedClassName(fieldName.toUpperCase()))); return PMMLModelTestUtils .getSimplePredicate(fieldName, "VALUE-" + index, SimplePredicate.Operator.LESS_THAN); }) .collect(Collectors.toList()); final KiePMMLFieldOperatorValue retrieved = KiePMMLASTFactoryUtils .getConstraintEntryFromSimplePredicates(fieldName, BOOLEAN_OPERATOR.OR, simplePredicates, fieldTypeMap); assertThat(retrieved.getName()).isEqualTo(fieldName); assertThat(retrieved.getConstraintsAsString()).isNotNull(); String expected = "value < \"VALUE-0\" || value < \"VALUE-1\""; assertThat(retrieved.getConstraintsAsString()).isEqualTo(expected); }
@Override public Graph<EntityDescriptor> resolveNativeEntity(EntityDescriptor entityDescriptor) { final MutableGraph<EntityDescriptor> mutableGraph = GraphBuilder.directed().build(); mutableGraph.addNode(entityDescriptor); final ModelId modelId = entityDescriptor.id(); try { final GrokPattern grokPattern = grokPatternService.load(modelId.id()); final String namedPattern = grokPattern.pattern(); final Set<String> patterns = GrokPatternService.extractPatternNames(namedPattern); patterns.stream().forEach(patternName -> { grokPatternService.loadByName(patternName).ifPresent(depPattern -> { final EntityDescriptor depEntityDescriptor = EntityDescriptor.create( depPattern.id(), ModelTypes.GROK_PATTERN_V1); mutableGraph.putEdge(entityDescriptor, depEntityDescriptor); }); }); } catch (NotFoundException e) { LOG.debug("Couldn't find grok pattern {}", entityDescriptor, e); } return mutableGraph; }
@Test public void resolveEntityDescriptor() throws ValidationException { final GrokPattern grokPattern = grokPatternService.save(GrokPattern.create("Test1", "[a-z]+")); final EntityDescriptor descriptor = EntityDescriptor.create(grokPattern.id(), ModelTypes.GROK_PATTERN_V1); final Graph<EntityDescriptor> graph = facade.resolveNativeEntity(descriptor); assertThat(graph.nodes()).containsOnly(descriptor); }
@Override public SchemaKTable<K> select( final List<ColumnName> keyColumnNames, final List<SelectExpression> selectExpressions, final Stacker contextStacker, final PlanBuildContext buildContext, final FormatInfo valueFormat ) { final TableSelect<K> step = ExecutionStepFactory.tableMapValues( contextStacker, sourceTableStep, keyColumnNames, selectExpressions, InternalFormats.of(keyFormat, valueFormat) ); return new SchemaKTable<>( step, resolveSchema(step), keyFormat, ksqlConfig, functionRegistry ); }
@Test public void testSelectWithFunctions() { // Given: final String selectQuery = "SELECT col0, LEN(UCASE(col2)) FROM test2 WHERE col0 > 100 EMIT CHANGES;"; final PlanNode logicalPlan = buildLogicalPlan(selectQuery); final ProjectNode projectNode = (ProjectNode) logicalPlan.getSources().get(0); initialSchemaKTable = buildSchemaKTableFromPlan(logicalPlan); // When: final SchemaKTable<?> projectedSchemaKStream = initialSchemaKTable.select( ImmutableList.of(), projectNode.getSelectExpressions(), childContextStacker, buildContext, internalFormats ); // Then: assertThat(projectedSchemaKStream.getSchema(), is(LogicalSchema.builder() .keyColumn(ColumnName.of("COL0"), SqlTypes.BIGINT) .valueColumn(ColumnName.of("COL0"), SqlTypes.BIGINT) .valueColumn(ColumnName.of("KSQL_COL_0"), SqlTypes.INTEGER) .build() )); }
private CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> consumerGroupHeartbeat( String groupId, String memberId, int memberEpoch, String instanceId, String rackId, int rebalanceTimeoutMs, String clientId, String clientHost, List<String> subscribedTopicNames, String assignorName, List<ConsumerGroupHeartbeatRequestData.TopicPartitions> ownedTopicPartitions ) throws ApiException { final long currentTimeMs = time.milliseconds(); final List<CoordinatorRecord> records = new ArrayList<>(); // Get or create the consumer group. boolean createIfNotExists = memberEpoch == 0; final ConsumerGroup group = getOrMaybeCreateConsumerGroup(groupId, createIfNotExists, records); throwIfConsumerGroupIsFull(group, memberId); // Get or create the member. if (memberId.isEmpty()) memberId = Uuid.randomUuid().toString(); final ConsumerGroupMember member; if (instanceId == null) { member = getOrMaybeSubscribeDynamicConsumerGroupMember( group, memberId, memberEpoch, ownedTopicPartitions, createIfNotExists, false ); } else { member = getOrMaybeSubscribeStaticConsumerGroupMember( group, memberId, memberEpoch, instanceId, ownedTopicPartitions, createIfNotExists, false, records ); } // 1. Create or update the member. If the member is new or has changed, a ConsumerGroupMemberMetadataValue // record is written to the __consumer_offsets partition to persist the change. If the subscriptions have // changed, the subscription metadata is updated and persisted by writing a ConsumerGroupPartitionMetadataValue // record to the __consumer_offsets partition. Finally, the group epoch is bumped if the subscriptions have // changed, and persisted by writing a ConsumerGroupMetadataValue record to the partition. ConsumerGroupMember updatedMember = new ConsumerGroupMember.Builder(member) .maybeUpdateInstanceId(Optional.ofNullable(instanceId)) .maybeUpdateRackId(Optional.ofNullable(rackId)) .maybeUpdateRebalanceTimeoutMs(ofSentinel(rebalanceTimeoutMs)) .maybeUpdateServerAssignorName(Optional.ofNullable(assignorName)) .maybeUpdateSubscribedTopicNames(Optional.ofNullable(subscribedTopicNames)) .setClientId(clientId) .setClientHost(clientHost) .setClassicMemberMetadata(null) .build(); boolean bumpGroupEpoch = hasMemberSubscriptionChanged( groupId, member, updatedMember, records ); int groupEpoch = group.groupEpoch(); Map<String, TopicMetadata> subscriptionMetadata = group.subscriptionMetadata(); Map<String, Integer> subscribedTopicNamesMap = group.subscribedTopicNames(); SubscriptionType subscriptionType = group.subscriptionType(); if (bumpGroupEpoch || group.hasMetadataExpired(currentTimeMs)) { // The subscription metadata is updated in two cases: // 1) The member has updated its subscriptions; // 2) The refresh deadline has been reached. subscribedTopicNamesMap = group.computeSubscribedTopicNames(member, updatedMember); subscriptionMetadata = group.computeSubscriptionMetadata( subscribedTopicNamesMap, metadataImage.topics(), metadataImage.cluster() ); int numMembers = group.numMembers(); if (!group.hasMember(updatedMember.memberId()) && !group.hasStaticMember(updatedMember.instanceId())) { numMembers++; } subscriptionType = ModernGroup.subscriptionType( subscribedTopicNamesMap, numMembers ); if (!subscriptionMetadata.equals(group.subscriptionMetadata())) { log.info("[GroupId {}] Computed new subscription metadata: {}.", groupId, subscriptionMetadata); bumpGroupEpoch = true; records.add(newConsumerGroupSubscriptionMetadataRecord(groupId, subscriptionMetadata)); } if (bumpGroupEpoch) { groupEpoch += 1; records.add(newConsumerGroupEpochRecord(groupId, groupEpoch)); log.info("[GroupId {}] Bumped group epoch to {}.", groupId, groupEpoch); metrics.record(CONSUMER_GROUP_REBALANCES_SENSOR_NAME); } group.setMetadataRefreshDeadline(currentTimeMs + consumerGroupMetadataRefreshIntervalMs, groupEpoch); } // 2. Update the target assignment if the group epoch is larger than the target assignment epoch. The delta between // the existing and the new target assignment is persisted to the partition. final int targetAssignmentEpoch; final Assignment targetAssignment; if (groupEpoch > group.assignmentEpoch()) { targetAssignment = updateTargetAssignment( group, groupEpoch, member, updatedMember, subscriptionMetadata, subscriptionType, records ); targetAssignmentEpoch = groupEpoch; } else { targetAssignmentEpoch = group.assignmentEpoch(); targetAssignment = group.targetAssignment(updatedMember.memberId(), updatedMember.instanceId()); } // 3. Reconcile the member's assignment with the target assignment if the member is not // fully reconciled yet. updatedMember = maybeReconcile( groupId, updatedMember, group::currentPartitionEpoch, targetAssignmentEpoch, targetAssignment, ownedTopicPartitions, records ); scheduleConsumerGroupSessionTimeout(groupId, memberId); // Prepare the response. ConsumerGroupHeartbeatResponseData response = new ConsumerGroupHeartbeatResponseData() .setMemberId(updatedMember.memberId()) .setMemberEpoch(updatedMember.memberEpoch()) .setHeartbeatIntervalMs(consumerGroupHeartbeatIntervalMs(groupId)); // The assignment is only provided in the following cases: // 1. The member sent a full request. It does so when joining or rejoining the group with zero // as the member epoch; or on any errors (e.g. timeout). We use all the non-optional fields // (rebalanceTimeoutMs, subscribedTopicNames and ownedTopicPartitions) to detect a full request // as those must be set in a full request. // 2. The member's assignment has been updated. boolean isFullRequest = memberEpoch == 0 || (rebalanceTimeoutMs != -1 && subscribedTopicNames != null && ownedTopicPartitions != null); if (isFullRequest || hasAssignedPartitionsChanged(member, updatedMember)) { response.setAssignment(createConsumerGroupResponseAssignment(updatedMember)); } return new CoordinatorResult<>(records, response); }
@Test public void testLeavingStaticMemberBumpsGroupEpoch() { String groupId = "fooup"; // Use a static member id as it makes the test easier. String memberId1 = Uuid.randomUuid().toString(); String memberId2 = Uuid.randomUuid().toString(); Uuid fooTopicId = Uuid.randomUuid(); String fooTopicName = "foo"; Uuid barTopicId = Uuid.randomUuid(); String barTopicName = "bar"; Uuid zarTopicId = Uuid.randomUuid(); String zarTopicName = "zar"; MockPartitionAssignor assignor = new MockPartitionAssignor("range"); // Consumer group with two static members. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() .withConsumerGroupAssignors(Collections.singletonList(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addTopic(barTopicId, barTopicName, 3) .addTopic(zarTopicId, zarTopicName, 1) .addRacks() .build()) .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) .withMember(new ConsumerGroupMember.Builder(memberId1) .setState(MemberState.STABLE) .setInstanceId(memberId1) .setMemberEpoch(10) .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setSubscribedTopicNames(Arrays.asList("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2), mkTopicAssignment(barTopicId, 0, 1))) .build()) .withMember(new ConsumerGroupMember.Builder(memberId2) .setState(MemberState.STABLE) .setInstanceId(memberId2) .setMemberEpoch(10) .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) // Use zar only here to ensure that metadata needs to be recomputed. .setSubscribedTopicNames(Arrays.asList("foo", "bar", "zar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 2))) .build()) .withAssignment(memberId1, mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2), mkTopicAssignment(barTopicId, 0, 1))) .withAssignment(memberId2, mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 2))) .withAssignmentEpoch(10)) .build(); // Member 2 leaves the consumer group. CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> result = context.consumerGroupHeartbeat( new ConsumerGroupHeartbeatRequestData() .setGroupId(groupId) .setInstanceId(memberId2) .setMemberId(memberId2) .setMemberEpoch(LEAVE_GROUP_MEMBER_EPOCH) .setRebalanceTimeoutMs(5000) .setSubscribedTopicNames(Arrays.asList("foo", "bar")) .setTopicPartitions(Collections.emptyList())); assertResponseEquals( new ConsumerGroupHeartbeatResponseData() .setMemberId(memberId2) .setMemberEpoch(LEAVE_GROUP_MEMBER_EPOCH), result.response() ); List<CoordinatorRecord> expectedRecords = Arrays.asList( GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId2), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId2), GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId2), // Subscription metadata is recomputed because zar is no longer there. GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, new HashMap<String, TopicMetadata>() { { put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6, mkMapOfPartitionRacks(6))); put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 3, mkMapOfPartitionRacks(3))); } }), GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11) ); assertRecordsEquals(expectedRecords, result.records()); }
@Override public void setMetadataUpdateResults(List<ConnectorMetadataUpdateHandle> results) { boundedExecutor.execute(() -> updateResultAsync(results)); }
@Test public void testSetMetadataUpdateResults() { HiveMetadataUpdater hiveMetadataUpdater = new HiveMetadataUpdater(EXECUTOR); // add Request hiveMetadataUpdater.addMetadataUpdateRequest(TEST_SCHEMA_NAME, TEST_TABLE_NAME, Optional.of(TEST_PARTITION_NAME), TEST_WRITER_INDEX); List<ConnectorMetadataUpdateHandle> hiveMetadataUpdateRequests = hiveMetadataUpdater.getPendingMetadataUpdateRequests(); assertEquals(hiveMetadataUpdateRequests.size(), 1); HiveMetadataUpdateHandle request = (HiveMetadataUpdateHandle) hiveMetadataUpdateRequests.get(0); // create Result HiveMetadataUpdateHandle hiveMetadataUpdateResult = new HiveMetadataUpdateHandle(request.getRequestId(), request.getSchemaTableName(), request.getPartitionName(), Optional.of(TEST_FILE_NAME)); // set the result hiveMetadataUpdater.setMetadataUpdateResults(ImmutableList.of(hiveMetadataUpdateResult)); try { // get the fileName String fileName = hiveMetadataUpdater.getMetadataResult(TEST_WRITER_INDEX).get(); // assert the fileName assertEquals(fileName, TEST_FILE_NAME); // assert the pending request queue size is zero assertEquals(hiveMetadataUpdater.getPendingMetadataUpdateRequests().size(), 0); } catch (InterruptedException | ExecutionException e) { fail("Expected to succeed and get the fileName metadata result"); } }
public int compare(boolean b1, boolean b2) { throw new UnsupportedOperationException( "compare(boolean, boolean) was called on a non-boolean comparator: " + toString()); }
@Test public void testBinaryAsSignedIntegerComparatorWithEquals() { List<Binary> valuesToCompare = new ArrayList<>(); valuesToCompare.add(Binary.fromConstantByteBuffer(ByteBuffer.wrap(new byte[] {0, 0, -108}))); valuesToCompare.add(Binary.fromConstantByteBuffer(ByteBuffer.wrap(new byte[] {0, 0, 0, 0, 0, -108}))); valuesToCompare.add(Binary.fromConstantByteBuffer(ByteBuffer.wrap(new byte[] {0, 0, 0, -108}))); valuesToCompare.add(Binary.fromConstantByteBuffer(ByteBuffer.wrap(new byte[] {0, 0, 0, 0, -108}))); valuesToCompare.add(Binary.fromConstantByteBuffer(ByteBuffer.wrap(new byte[] {0, -108}))); for (Binary v1 : valuesToCompare) { for (Binary v2 : valuesToCompare) { assertEquals( String.format("Wrong result of comparison %s and %s", v1, v2), 0, BINARY_AS_SIGNED_INTEGER_COMPARATOR.compare(v1, v2)); } } }
@Override public Credentials configure(final Host host) { if(WinHttpClients.isWinAuthAvailable()) { if(!host.getCredentials().validate(host.getProtocol(), new LoginOptions(host.getProtocol()).password(false))) { final String nameSamCompatible = CurrentWindowsCredentials.INSTANCE.getName(); final Credentials credentials = new Credentials(host.getCredentials()) .withPassword(CurrentWindowsCredentials.INSTANCE.getPassword()); if(!includeDomain && StringUtils.contains(nameSamCompatible, '\\')) { credentials.setUsername(StringUtils.split(nameSamCompatible, '\\')[1]); } else { credentials.setUsername(nameSamCompatible); } if(log.isDebugEnabled()) { log.debug(String.format("Configure %s with username %s", host, credentials)); } return credentials; } } return CredentialsConfigurator.DISABLED.configure(host); }
@Test public void testConfigure() { assumeTrue(Factory.Platform.getDefault().equals(Factory.Platform.Name.linux)); final Host bookmark = new Host(new TestProtocol()); assertSame(bookmark.getCredentials(), new WindowsIntegratedCredentialsConfigurator().configure(bookmark)); }
@Override public TypeSerializerSchemaCompatibility<T> resolveSchemaCompatibility( TypeSerializerSnapshot<T> oldSerializerSnapshot) { if (!(oldSerializerSnapshot instanceof AvroSerializerSnapshot)) { return TypeSerializerSchemaCompatibility.incompatible(); } AvroSerializerSnapshot<?> oldAvroSerializerSnapshot = (AvroSerializerSnapshot<?>) oldSerializerSnapshot; return resolveSchemaCompatibility(oldAvroSerializerSnapshot.schema, schema); }
@Test void validSchemaEvaluationShouldResultInCRequiresMigration() { final AvroSerializer<GenericRecord> originalSerializer = new AvroSerializer<>(GenericRecord.class, FIRST_NAME); final AvroSerializer<GenericRecord> newSerializer = new AvroSerializer<>(GenericRecord.class, FIRST_REQUIRED_LAST_OPTIONAL); TypeSerializerSnapshot<GenericRecord> originalSnapshot = originalSerializer.snapshotConfiguration(); assertThat( newSerializer .snapshotConfiguration() .resolveSchemaCompatibility(originalSnapshot)) .is(isCompatibleAfterMigration()); }
List<String> decorateTextWithHtml(String text, DecorationDataHolder decorationDataHolder) { return decorateTextWithHtml(text, decorationDataHolder, null, null); }
@Test public void should_support_multiple_empty_lines_at_end_of_file() { String classDeclarationSample = "/*" + LF_END_OF_LINE + " * Header" + LF_END_OF_LINE + " */" + LF_END_OF_LINE + LF_END_OF_LINE + "public class HelloWorld {" + LF_END_OF_LINE + "}" + LF_END_OF_LINE + LF_END_OF_LINE + LF_END_OF_LINE; DecorationDataHolder decorationData = new DecorationDataHolder(); decorationData.loadSyntaxHighlightingData("0,16,cppd;18,25,k;25,31,k;"); HtmlTextDecorator htmlTextDecorator = new HtmlTextDecorator(); List<String> htmlOutput = htmlTextDecorator.decorateTextWithHtml(classDeclarationSample, decorationData); assertThat(htmlOutput).containsExactly( "<span class=\"cppd\">/*</span>", "<span class=\"cppd\"> * Header</span>", "<span class=\"cppd\"> */</span>", "", "<span class=\"k\">public </span><span class=\"k\">class </span>HelloWorld {", "}", "", "", "" ); }
public static boolean toBoolean(String valueStr) { if (StringUtil.isNotBlank(valueStr)) { valueStr = valueStr.trim().toLowerCase(); return TREE_SET.contains(valueStr); } return false; }
@Test public void assertToBoolean() { Assert.assertTrue(BooleanUtil.toBoolean("true")); Assert.assertTrue(BooleanUtil.toBoolean("yes")); Assert.assertTrue(BooleanUtil.toBoolean("1")); }
@Override public void clearLossHistoryStats(MdId mdName, MaIdShort maName, MepId mepId) throws CfmConfigException { throw new UnsupportedOperationException("Not yet implemented"); }
@Test public void testClearLossHistoryStatsOnLm() throws CfmConfigException { //TODO: Implement underlying method try { soamManager.clearLossHistoryStats(MDNAME1, MANAME1, MEPID1, LMID101); fail("Expecting UnsupportedOperationException"); } catch (UnsupportedOperationException e) { } }
@Override public boolean shouldHandle(OutOfMemoryError oome) { try { if (GC_OVERHEAD_LIMIT_EXCEEDED.equals(oome.getMessage())) { return true; } long maxMemory = memoryInfoAccessor.getMaxMemory(); long totalMemory = memoryInfoAccessor.getTotalMemory(); // if total-memory has not reached to max-memory // then no need to handle this if (totalMemory < maxMemory - MAX_TOTAL_DELTA) { return false; } // since previous total vs max memory comparison // freeMemory should return the same result // with = (maxMemory - totalMemory + freeMemory) long freeMemory = memoryInfoAccessor.getFreeMemory(); if (freeMemory > maxMemory * freeVersusMaxRatio) { return false; } } catch (Throwable ignored) { ignore(ignored); } return true; }
@Test public void testShouldHandle() { assertTrue(outOfMemoryHandler.shouldHandle(new OutOfMemoryError(DefaultOutOfMemoryHandler.GC_OVERHEAD_LIMIT_EXCEEDED))); }
public static Pair<Optional<Method>, Optional<TypedExpression>> resolveMethodWithEmptyCollectionArguments( final MethodCallExpr methodExpression, final MvelCompilerContext mvelCompilerContext, final Optional<TypedExpression> scope, List<TypedExpression> arguments, List<Integer> emptyCollectionArgumentsIndexes) { Objects.requireNonNull(methodExpression, "MethodExpression parameter cannot be null as the method searches methods based on this expression!"); Objects.requireNonNull(mvelCompilerContext, "MvelCompilerContext parameter cannot be null!"); Objects.requireNonNull(arguments, "Arguments parameter cannot be null! Use an empty list instance if needed instead."); Objects.requireNonNull(emptyCollectionArgumentsIndexes, "EmptyListArgumentIndexes parameter cannot be null! Use an empty list instance if needed instead."); if (emptyCollectionArgumentsIndexes.size() > arguments.size()) { throw new IllegalArgumentException("There cannot be more empty collection arguments than all arguments! emptyCollectionArgumentsIndexes parameter has more items than arguments parameter. " + "(" + emptyCollectionArgumentsIndexes.size() + " > " + arguments.size() + ")"); } else { final List<TypedExpression> coercedArgumentsTypesList = new ArrayList<>(arguments); Pair<Optional<Method>, Optional<TypedExpression>> resolveMethodResult = MethodResolutionUtils.resolveMethod(methodExpression, mvelCompilerContext, scope, coercedArgumentsTypesList); if (resolveMethodResult.a.isPresent()) { return resolveMethodResult; } else { // Rather work only with the argumentsType and when a method is resolved, flip the arguments list based on it. // This needs to go through all possible combinations. final int indexesListSize = emptyCollectionArgumentsIndexes.size(); for (int numberOfProcessedIndexes = 0; numberOfProcessedIndexes < indexesListSize; numberOfProcessedIndexes++) { for (int indexOfEmptyListIndex = numberOfProcessedIndexes; indexOfEmptyListIndex < indexesListSize; indexOfEmptyListIndex++) { switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(indexOfEmptyListIndex)); resolveMethodResult = MethodResolutionUtils.resolveMethod(methodExpression, mvelCompilerContext, scope, coercedArgumentsTypesList); if (resolveMethodResult.a.isPresent()) { modifyArgumentsBasedOnCoercedCollectionArguments(arguments, coercedArgumentsTypesList); return resolveMethodResult; } switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(indexOfEmptyListIndex)); } switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(numberOfProcessedIndexes)); } // No method found, return empty. return new Pair<>(Optional.empty(), scope); } } }
@Test public void resolveMethodWithEmptyCollectionArgumentsArgumentsAreNull() { Assertions.assertThatThrownBy( () -> MethodResolutionUtils.resolveMethodWithEmptyCollectionArguments( new MethodCallExpr(), new MvelCompilerContext(null), Optional.empty(), null, null)) .isInstanceOf(NullPointerException.class); }
@Override public <T extends Response> CompletableFuture<T> sendAsync( Request request, Class<T> responseType) { CompletableFuture<T> result = new CompletableFuture<>(); long requestId = request.getId(); requestForId.put(requestId, new WebSocketRequest<>(result, responseType)); try { sendRequest(request, requestId); } catch (IOException e) { closeRequest(requestId, e); } return result; }
@Test public void testReceiveReply() throws Exception { CompletableFuture<Web3ClientVersion> reply = service.sendAsync(request, Web3ClientVersion.class); sendGethVersionReply(); assertTrue(reply.isDone()); assertEquals("geth-version", reply.get().getWeb3ClientVersion()); }
public String manifest(final String container, final List<StorageObject> objects) { JsonArray manifestSLO = new JsonArray(); for(StorageObject s : objects) { JsonObject segmentJSON = new JsonObject(); // this is the container and object name in the format {container-name}/{object-name} segmentJSON.addProperty("path", String.format("/%s/%s", container, s.getName())); // MD5 checksum of the content of the segment object segmentJSON.addProperty("etag", s.getMd5sum()); segmentJSON.addProperty("size_bytes", s.getSize()); manifestSLO.add(segmentJSON); } return manifestSLO.toString(); }
@Test public void testManifest() { final SwiftSegmentService service = new SwiftSegmentService(session); final Path container = new Path("test.cyberduck.ch", EnumSet.of(Path.Type.directory, Path.Type.volume)); container.attributes().setRegion("IAD"); final StorageObject a = new StorageObject("a"); a.setMd5sum("m1"); a.setSize(1L); final StorageObject b = new StorageObject("b"); b.setMd5sum("m2"); b.setSize(1L); final String manifest = service.manifest(container.getName(), Arrays.asList(a, b)); assertEquals("[{\"path\":\"/test.cyberduck.ch/a\",\"etag\":\"m1\",\"size_bytes\":1},{\"path\":\"/test.cyberduck.ch/b\",\"etag\":\"m2\",\"size_bytes\":1}]", manifest); }
public MultimapWithProtoValuesFluentAssertion<M> ignoringRepeatedFieldOrderForValues() { return usingConfig(config.ignoringRepeatedFieldOrder()); }
@Test public void testCompareMultipleMessageTypes() { // Don't run this test twice. if (!testIsRunOnce()) { return; } expectThat( ImmutableMultimap.of( 2, TestMessage2.newBuilder().addRString("foo").addRString("bar").build(), 2, TestMessage2.newBuilder().addRString("quibble").addRString("frozzit").build(), 3, TestMessage3.newBuilder().addRString("baz").addRString("qux").build())) .ignoringRepeatedFieldOrderForValues() .containsExactlyEntriesIn( ImmutableMultimap.of( 2, TestMessage2.newBuilder().addRString("frozzit").addRString("quibble").build(), 3, TestMessage3.newBuilder().addRString("qux").addRString("baz").build(), 2, TestMessage2.newBuilder().addRString("bar").addRString("foo").build())); }
protected synchronized int updateWorkerDrainMap() { long startTime = System.nanoTime(); int numRemovedWorkerIds = 0; if (drainOpStatusMap.size() > 0) { val currentMembership = membershipManager.getCurrentMembership() .stream().map(WorkerInfo::getWorkerId).collect(Collectors.toSet()); val removeWorkerIds = new ArrayList<String>(); for (String workerId : drainOpStatusMap.keySet()) { if (!currentMembership.contains(workerId)) { removeWorkerIds.add(workerId); } } for (String workerId : removeWorkerIds) { drainOpStatusMap.remove(workerId); } numRemovedWorkerIds = removeWorkerIds.size(); } if (numRemovedWorkerIds > 0) { log.info("cleanupWorkerDrainMap removed {} stale workerIds in {} sec", numRemovedWorkerIds, (System.nanoTime() - startTime) / Math.pow(10, 9)); } return numRemovedWorkerIds; }
@Test public void testUpdateWorkerDrainMap() throws Exception { final int numWorkersInDrainMap = 5; String workerId; // Set up drain status for some of those workers. SchedulerManager.DrainOpStatus drainOp; for (int ix = 0; ix < numWorkersInDrainMap; ix++) { workerId = "worker-" + ix; drainOp = SchedulerManager.DrainOpStatus.DrainCompleted; if (ix % 2 == 0) { drainOp = SchedulerManager.DrainOpStatus.DrainInProgress; } callGetDrainStatus(workerId, DrainOps.SetDrainStatus, drainOp); } val oldDrainMap = schedulerManager.getDrainOpsStatusMap(); final int numWorkersInCurrentMembership = 3; List<WorkerInfo> workerInfoList = new LinkedList<>(); final String workerHostName = "workerHostName"; final int workerPort = 5000; for (int ix = 0; ix < numWorkersInCurrentMembership; ix++) { workerId = "worker-" + ix; workerInfoList.add(WorkerInfo.of(workerId, workerHostName, workerPort)); } doReturn(workerInfoList).when(membershipManager).getCurrentMembership(); val numWorkersCleanedUp = schedulerManager.updateWorkerDrainMap(); Assert.assertEquals(numWorkersCleanedUp, numWorkersInDrainMap - numWorkersInCurrentMembership); val newDrainMap = schedulerManager.getDrainOpsStatusMap(); for (val worker : newDrainMap.keySet()) { Assert.assertTrue(oldDrainMap.get(worker) != null); WorkerInfo matchedWorker = workerInfoList.stream() .filter(winfo -> worker.equals(winfo.getWorkerId())) .findAny() .orElse(null); Assert.assertTrue(matchedWorker != null); } }
public static ImmutableList<HttpRequest> fuzzGetParameters(HttpRequest request, String payload) { return fuzzGetParameters(request, payload, Optional.empty(), ImmutableSet.of()); }
@Test public void fuzzGetParameters_whenGetParameters_fuzzesAllParameters() { ImmutableList<HttpRequest> requestsWithFuzzedGetParameters = ImmutableList.of( HttpRequest.get("https://google.com?key=<payload>&other=test") .withEmptyHeaders() .build(), HttpRequest.get("https://google.com?key=value&other=<payload>") .withEmptyHeaders() .build()); assertThat(FuzzingUtils.fuzzGetParameters(REQUEST_WITH_GET_PARAMETERS, "<payload>")) .containsAtLeastElementsIn(requestsWithFuzzedGetParameters); }