focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Operation(summary= "Create new session and return information") @PostMapping(value = "new", consumes = "application/json", produces = "application/json") public CreateResponse create(@Valid @RequestBody CreateRequest request) { if (request.getDrivingLicences().isEmpty() && request.getTravelDocuments().isEmpty()) { throw new BadRequestException("No card information specified"); } final RdaSession session = RdaSession.create( request.getReturnUrl(), request.getConfirmId(), request.getClientIpAddress(), timeout ); session.getApp().setDrivingLicences(request.getDrivingLicences()); session.getApp().setTravelDocuments(request.getTravelDocuments()); sessionRepo.save(session); final CreateResponse response = new CreateResponse(); response.setUrl(publicUrl); response.setSessionId(session.getId()); response.setConfirmSecret(session.getConfirmSecret()); response.setExpiration(session.getExpiration()); return response; }
@Test public void testCreateRestServiceWithError() { CreateRequest request = new CreateRequest(); request.setDrivingLicences(new ArrayList<>()); request.setTravelDocuments(new ArrayList<>()); Exception exception = assertThrows(BadRequestException.class, () -> { controller.create(request);; }); assertEquals("No card information specified", exception.getMessage()); }
@VisibleForTesting RoleDO validateRoleForUpdate(Long id) { RoleDO role = roleMapper.selectById(id); if (role == null) { throw exception(ROLE_NOT_EXISTS); } // 内置角色,不允许删除 if (RoleTypeEnum.SYSTEM.getType().equals(role.getType())) { throw exception(ROLE_CAN_NOT_UPDATE_SYSTEM_TYPE_ROLE); } return role; }
@Test public void testValidateUpdateRole_systemRoleCanNotBeUpdate() { RoleDO roleDO = randomPojo(RoleDO.class, o -> o.setType(RoleTypeEnum.SYSTEM.getType())); roleMapper.insert(roleDO); // 准备参数 Long id = roleDO.getId(); assertServiceException(() -> roleService.validateRoleForUpdate(id), ROLE_CAN_NOT_UPDATE_SYSTEM_TYPE_ROLE); }
protected HashMap<String, Double> computeModularity(Graph graph, CommunityStructure theStructure, int[] comStructure, double currentResolution, boolean randomized, boolean weighted) { isCanceled = false; Progress.start(progress); Random rand = new Random(); double totalWeight = theStructure.graphWeightSum; double[] nodeDegrees = theStructure.weights.clone(); HashMap<String, Double> results = new HashMap<>(); if (isCanceled) { return results; } boolean someChange = true; while (someChange) { someChange = false; boolean localChange = true; while (localChange) { localChange = false; int start = 0; if (randomized) { start = Math.abs(rand.nextInt()) % theStructure.N; } int step = 0; for (int i = start; step < theStructure.N; i = (i + 1) % theStructure.N) { step++; Community bestCommunity = updateBestCommunity(theStructure, i, currentResolution); if ((theStructure.nodeCommunities[i] != bestCommunity) && (bestCommunity != null)) { theStructure.moveNodeTo(i, bestCommunity); localChange = true; } if (isCanceled) { return results; } } someChange = localChange || someChange; if (isCanceled) { return results; } } if (someChange) { theStructure.zoomOut(); } } fillComStructure(graph, theStructure, comStructure); double[] degreeCount = fillDegreeCount(graph, theStructure, comStructure, nodeDegrees, weighted); double computedModularity = finalQ(comStructure, degreeCount, graph, theStructure, totalWeight, 1., weighted); double computedModularityResolution = finalQ(comStructure, degreeCount, graph, theStructure, totalWeight, currentResolution, weighted); results.put("modularity", computedModularity); results.put("modularityResolution", computedModularityResolution); return results; }
@Test public void testComputeBarbellGraphModularityHasHighWeight() { GraphModel graphModel = GraphGenerator.generateCompleteUndirectedGraph(4); UndirectedGraph undirectedGraph = graphModel.getUndirectedGraph(); Node[] nodes = new Node[4]; for (int i = 0; i < 4; i++) { Node currentNode = graphModel.factory().newNode(((Integer) (i + 4)).toString()); nodes[i] = currentNode; undirectedGraph.addNode(currentNode); } for (int i = 0; i < 3; i++) { for (int j = i + 1; j < 4; j++) { Edge currentEdge = graphModel.factory().newEdge(nodes[i], nodes[j], false); undirectedGraph.addEdge(currentEdge); } } Edge currentEdge = graphModel.factory().newEdge(undirectedGraph.getNode("0"), undirectedGraph.getNode("5"), 0, 100.f, false); undirectedGraph.addEdge(currentEdge); UndirectedGraph graph = graphModel.getUndirectedGraph(); Modularity mod = new Modularity(); Modularity.CommunityStructure theStructure = mod.new CommunityStructure(graph); int[] comStructure = new int[graph.getNodeCount()]; HashMap<String, Double> modularityValues = mod.computeModularity(graph, theStructure, comStructure, 1., true, true); int class4 = comStructure[0]; int class5 = comStructure[5]; assertEquals(class4, class5); }
public XATopicConnection xaTopicConnection(XATopicConnection connection) { return TracingXAConnection.create(connection, this); }
@Test void xaTopicConnection_wrapsInput() { assertThat(jmsTracing.xaTopicConnection(mock(XATopicConnection.class))) .isInstanceOf(TracingXAConnection.class); }
@Override public void setConf(Configuration conf) { if (conf != null) { conf = addSecurityConfiguration(conf); } super.setConf(conf); }
@Test public void testForceFenceOptionListedBeforeArgs() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); HdfsConfiguration conf = getHAConf(); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand()); tool.setConf(conf); assertEquals(0, runTool("-failover", "--forcefence", "nn1", "nn2")); }
@Override public void writeTo(ByteBuf byteBuf) throws LispWriterException { WRITER.writeTo(byteBuf, this); }
@Test public void testSerialization() throws LispReaderException, LispWriterException, LispParseError { ByteBuf byteBuf = Unpooled.buffer(); NotifyWriter writer = new NotifyWriter(); writer.writeTo(byteBuf, notify1); NotifyReader reader = new NotifyReader(); LispMapNotify deserialized = reader.readFrom(byteBuf); new EqualsTester().addEqualityGroup(notify1, deserialized).testEquals(); }
public static <T> T copyProperties(Object source, Class<T> tClass, String... ignoreProperties) { if (null == source) { return null; } T target = ReflectUtil.newInstanceIfPossible(tClass); copyProperties(source, target, CopyOptions.create().setIgnoreProperties(ignoreProperties)); return target; }
@Test public void beanToBeanCopyOptionsTest() { final ChildVo1 childVo1 = new ChildVo1(); childVo1.setChild_address("中国北京五道口"); childVo1.setChild_name("张三"); childVo1.setChild_father_name("张无忌"); childVo1.setChild_mother_name("赵敏敏"); final CopyOptions copyOptions = CopyOptions.create(). //setIgnoreNullValue(true). //setIgnoreCase(false). setFieldNameEditor(StrUtil::toCamelCase); final ChildVo2 childVo2 = new ChildVo2(); BeanUtil.copyProperties(childVo1, childVo2, copyOptions); assertEquals(childVo1.getChild_address(), childVo2.getChildAddress()); assertEquals(childVo1.getChild_name(), childVo2.getChildName()); assertEquals(childVo1.getChild_father_name(), childVo2.getChildFatherName()); assertEquals(childVo1.getChild_mother_name(), childVo2.getChildMotherName()); }
@Override public String format(final Schema schema) { final String converted = SchemaWalker.visit(schema, new Converter()) + typePostFix(schema); return options.contains(Option.AS_COLUMN_LIST) ? stripTopLevelStruct(converted) : converted; }
@Test public void shouldFormatOptionalDouble() { assertThat(DEFAULT.format(Schema.OPTIONAL_FLOAT64_SCHEMA), is("DOUBLE")); assertThat(STRICT.format(Schema.OPTIONAL_FLOAT64_SCHEMA), is("DOUBLE")); }
@Override public synchronized CompletableFuture<Void> clear() { CompletableFuture<Void> future = cleanImmutableBuckets(); sharedBucketPriorityQueue.clear(); lastMutableBucket.clear(); snapshotSegmentLastIndexTable.clear(); numberDelayedMessages = 0; return future; }
@Test(dataProvider = "delayedTracker") public void testClear(BucketDelayedDeliveryTracker tracker) throws ExecutionException, InterruptedException, TimeoutException { for (int i = 1; i <= 1001; i++) { tracker.addMessage(i, i, i * 10); } assertEquals(tracker.getNumberOfDelayedMessages(), 1001); assertTrue(tracker.getImmutableBuckets().asMapOfRanges().size() > 0); assertEquals(tracker.getLastMutableBucket().size(), 1); tracker.clear().get(1, TimeUnit.MINUTES); assertEquals(tracker.getNumberOfDelayedMessages(), 0); assertEquals(tracker.getImmutableBuckets().asMapOfRanges().size(), 0); assertEquals(tracker.getLastMutableBucket().size(), 0); assertEquals(tracker.getSharedBucketPriorityQueue().size(), 0); tracker.close(); }
@Override public ClusterInfo clusterGetClusterInfo() { RFuture<Map<String, String>> f = executorService.readAsync((String)null, StringCodec.INSTANCE, RedisCommands.CLUSTER_INFO); Map<String, String> entries = syncFuture(f); Properties props = new Properties(); for (Entry<String, String> entry : entries.entrySet()) { props.setProperty(entry.getKey(), entry.getValue()); } return new ClusterInfo(props); }
@Test public void testClusterGetClusterInfo() { ClusterInfo info = connection.clusterGetClusterInfo(); assertThat(info.getSlotsFail()).isEqualTo(0); assertThat(info.getSlotsOk()).isEqualTo(MasterSlaveConnectionManager.MAX_SLOT); assertThat(info.getSlotsAssigned()).isEqualTo(MasterSlaveConnectionManager.MAX_SLOT); }
@Bean public ParamCheckerFilter checkerFilter(ControllerMethodsCache methodsCache) { return new ParamCheckerFilter(methodsCache); }
@Test void testCheckerFilter() { CheckConfiguration checkConfiguration = new CheckConfiguration(); ControllerMethodsCache methodsCache = Mockito.mock(ControllerMethodsCache.class); ParamCheckerFilter checkerFilter = checkConfiguration.checkerFilter(methodsCache); assertNotNull(checkerFilter); }
public static ValueFormat of( final FormatInfo format, final SerdeFeatures features ) { return new ValueFormat(format, features); }
@Test public void shouldThrowNPEs() { new NullPointerTester() .setDefault(SerdeFeatures.class, SerdeFeatures.of()) .setDefault(FormatInfo.class, FormatInfo.of("AVRO")) .testAllPublicStaticMethods(ValueFormat.class); }
static void checkValidIndexName(String indexName) { if (indexName.length() > MAX_INDEX_NAME_LENGTH) { throw new IllegalArgumentException( "Index name " + indexName + " cannot be longer than " + MAX_INDEX_NAME_LENGTH + " characters."); } Matcher matcher = ILLEGAL_INDEX_NAME_CHARS.matcher(indexName); if (matcher.find()) { throw new IllegalArgumentException( "Index name " + indexName + " is not a valid name. Character \"" + matcher.group() + "\" is not allowed."); } if (indexName.charAt(0) == '-' || indexName.charAt(0) == '_' || indexName.charAt(0) == '+') { throw new IllegalArgumentException( "Index name " + indexName + " can not start with -, _ or +."); } }
@Test public void testCheckValidIndexNameThrowsErrorWhenNameContainsNull() { assertThrows(IllegalArgumentException.class, () -> checkValidIndexName("test\0collection")); }
void appendOnDuplicateClause(StringBuilder sb) { sb.append("ON DUPLICATE KEY UPDATE "); Iterator<String> it = jdbcTable.dbFieldNames().iterator(); while (it.hasNext()) { String dbFieldName = it.next(); dialect.quoteIdentifier(sb, dbFieldName); sb.append(" = VALUES("); dialect.quoteIdentifier(sb, dbFieldName); sb.append(')'); if (it.hasNext()) { sb.append(','); } } }
@Test void appendOnDuplicateClause() { MySQLUpsertQueryBuilder builder = new MySQLUpsertQueryBuilder(jdbcTable, dialect); StringBuilder sb = new StringBuilder(); builder.appendOnDuplicateClause(sb); String valuesClause = sb.toString(); assertThat(valuesClause).isEqualTo( "ON DUPLICATE KEY UPDATE " + "`field1` = VALUES(`field1`)," + "`field2` = VALUES(`field2`)"); }
@VisibleForTesting static Pattern convertToPattern(String scopeOrNameComponent) { final String[] split = scopeOrNameComponent.split(LIST_DELIMITER); final String rawPattern = Arrays.stream(split) .map(s -> s.replaceAll("\\.", "\\.")) .map(s -> s.replaceAll("\\*", ".*")) .collect(Collectors.joining("|", "(", ")")); return Pattern.compile(rawPattern); }
@Test void testConvertToPatternWithoutWildcards() { final Pattern pattern = DefaultMetricFilter.convertToPattern("numRecordsIn"); assertThat(pattern.toString()).isEqualTo("(numRecordsIn)"); assertThat(pattern.matcher("numRecordsIn").matches()).isTrue(); assertThat(pattern.matcher("numBytesOut").matches()).isFalse(); }
public static void checkNotNull(Object obj, String argName) { checkArgument(obj != null, "'%s' must not be null.", argName); }
@Test public void testCheckNotNull() throws Exception { String nonNullArg = "nonNullArg"; String nullArg = null; // Should not throw. Validate.checkNotNull(nonNullArg, "nonNullArg"); // Verify it throws. intercept(IllegalArgumentException.class, "'nullArg' must not be null", () -> Validate.checkNotNull(nullArg, "nullArg")); }
@Override public KTable<K, V> reduce(final Reducer<V> adder, final Reducer<V> subtractor, final Materialized<K, V, KeyValueStore<Bytes, byte[]>> materialized) { return reduce(adder, subtractor, NamedInternal.empty(), materialized); }
@Test public void shouldThrowNullPointerOnReduceWhenMaterializedIsNull() { assertThrows(NullPointerException.class, () -> groupedTable.reduce( MockReducer.STRING_ADDER, MockReducer.STRING_REMOVER, null)); }
public static String genCode(Class<?> beanClass) { CodegenContext ctx = new CodegenContext(); ctx.setPackage(CodeGenerator.getPackage(beanClass)); String className = accessorClassName(beanClass); ctx.setClassName(className); boolean isRecord = RecordUtils.isRecord(beanClass); // filter out super classes Collection<Descriptor> descriptors = Descriptor.getAllDescriptorsMap(beanClass, false).values(); for (Descriptor descriptor : descriptors) { if (Modifier.isPrivate(descriptor.getModifiers())) { continue; } boolean accessible = sourcePkgLevelAccessible(descriptor.getRawType()); { // getter String methodName = descriptor.getName(); String codeBody; Class<?> returnType = accessible ? descriptor.getRawType() : Object.class; if (isRecord) { codeBody = StringUtils.format( "return ${obj}.${fieldName}();", "obj", OBJ_NAME, "fieldName", descriptor.getName()); } else { codeBody = StringUtils.format( "return ${obj}.${fieldName};", "obj", OBJ_NAME, "fieldName", descriptor.getName()); } ctx.addStaticMethod(methodName, codeBody, returnType, beanClass, OBJ_NAME); } if (accessible) { String methodName = descriptor.getName(); String codeBody = StringUtils.format( "${obj}.${fieldName} = ${fieldValue};", "obj", OBJ_NAME, "fieldName", descriptor.getName(), "fieldValue", FIELD_VALUE); ctx.addStaticMethod( methodName, codeBody, void.class, beanClass, OBJ_NAME, descriptor.getRawType(), FIELD_VALUE); } // getter/setter may lose some inner state of an object, so we set them to null to avoid // creating getter/setter accessor. } return ctx.genCode(); }
@Test public void genCode() { System.out.println(AccessorHelper.genCode(A.class)); ; }
@VisibleForTesting public static Domain getDomain(Type type, long rowCount, ColumnStatistics columnStatistics) { if (rowCount == 0) { return Domain.none(type); } if (columnStatistics == null) { return Domain.all(type); } if (columnStatistics.hasNumberOfValues() && columnStatistics.getNumberOfValues() == 0) { return Domain.onlyNull(type); } boolean hasNullValue = columnStatistics.getNumberOfValues() != rowCount; if (type.getJavaType() == boolean.class && columnStatistics.getBooleanStatistics() != null) { BooleanStatistics booleanStatistics = columnStatistics.getBooleanStatistics(); boolean hasTrueValues = (booleanStatistics.getTrueValueCount() != 0); boolean hasFalseValues = (columnStatistics.getNumberOfValues() != booleanStatistics.getTrueValueCount()); if (hasTrueValues && hasFalseValues) { return Domain.all(BOOLEAN); } if (hasTrueValues) { return Domain.create(ValueSet.of(BOOLEAN, true), hasNullValue); } if (hasFalseValues) { return Domain.create(ValueSet.of(BOOLEAN, false), hasNullValue); } } else if (isShortDecimal(type) && columnStatistics.getDecimalStatistics() != null) { return createDomain(type, hasNullValue, columnStatistics.getDecimalStatistics(), value -> rescale(value, (DecimalType) type).unscaledValue().longValue()); } else if (isLongDecimal(type) && columnStatistics.getDecimalStatistics() != null) { return createDomain(type, hasNullValue, columnStatistics.getDecimalStatistics(), value -> encodeUnscaledValue(rescale(value, (DecimalType) type).unscaledValue())); } else if (isCharType(type) && columnStatistics.getStringStatistics() != null) { return createDomain(type, hasNullValue, columnStatistics.getStringStatistics(), value -> truncateToLengthAndTrimSpaces(value, type)); } else if (isVarcharType(type) && columnStatistics.getStringStatistics() != null) { return createDomain(type, hasNullValue, columnStatistics.getStringStatistics()); } else if (type.getTypeSignature().getBase().equals(StandardTypes.DATE) && columnStatistics.getDateStatistics() != null) { return createDomain(type, hasNullValue, columnStatistics.getDateStatistics(), value -> (long) value); } else if (type.getJavaType() == long.class && columnStatistics.getIntegerStatistics() != null) { return createDomain(type, hasNullValue, columnStatistics.getIntegerStatistics()); } else if (type.getJavaType() == double.class && columnStatistics.getDoubleStatistics() != null) { return createDomain(type, hasNullValue, columnStatistics.getDoubleStatistics()); } else if (REAL.equals(type) && columnStatistics.getDoubleStatistics() != null) { return createDomain(type, hasNullValue, columnStatistics.getDoubleStatistics(), value -> (long) floatToRawIntBits(value.floatValue())); } return Domain.create(ValueSet.all(type), hasNullValue); }
@Test public void testDate() { assertEquals(getDomain(DATE, 0, null), Domain.none(DATE)); assertEquals(getDomain(DATE, 10, null), Domain.all(DATE)); assertEquals(getDomain(DATE, 0, dateColumnStats(null, null, null)), Domain.none(DATE)); assertEquals(getDomain(DATE, 0, dateColumnStats(0L, null, null)), Domain.none(DATE)); assertEquals(getDomain(DATE, 0, dateColumnStats(0L, 100, 100)), Domain.none(DATE)); assertEquals(getDomain(DATE, 10, dateColumnStats(0L, null, null)), onlyNull(DATE)); assertEquals(getDomain(DATE, 10, dateColumnStats(10L, null, null)), notNull(DATE)); assertEquals(getDomain(DATE, 10, dateColumnStats(10L, 100, 100)), singleValue(DATE, 100L)); assertEquals(getDomain(DATE, 10, dateColumnStats(10L, 0, 100)), create(ValueSet.ofRanges(range(DATE, 0L, true, 100L, true)), false)); assertEquals(getDomain(DATE, 10, dateColumnStats(10L, null, 100)), create(ValueSet.ofRanges(lessThanOrEqual(DATE, 100L)), false)); assertEquals(getDomain(DATE, 10, dateColumnStats(10L, 0, null)), create(ValueSet.ofRanges(greaterThanOrEqual(DATE, 0L)), false)); assertEquals(getDomain(DATE, 10, dateColumnStats(5L, 0, 100)), create(ValueSet.ofRanges(range(DATE, 0L, true, 100L, true)), true)); assertEquals(getDomain(DATE, 10, dateColumnStats(5L, null, 100)), create(ValueSet.ofRanges(lessThanOrEqual(DATE, 100L)), true)); assertEquals(getDomain(DATE, 10, dateColumnStats(5L, 0, null)), create(ValueSet.ofRanges(greaterThanOrEqual(DATE, 0L)), true)); }
@GET @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) public AppInfo get() { return getAppInfo(); }
@Test public void testBlacklistedNodesXML() throws Exception { WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("mapreduce") .path("blacklistednodes").accept(MediaType.APPLICATION_XML) .get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8, response.getType().toString()); String xml = response.getEntity(String.class); verifyBlacklistedNodesInfoXML(xml, appContext); }
public List<String> getDispatcherKeysForProperty(String propertyKey, @Nullable String propertyValue) { ImmutableList.Builder<String> keys = ImmutableList.builder(); for (NotificationDispatcherMetadata metadata : dispatchersMetadata) { String dispatcherKey = metadata.getDispatcherKey(); String value = metadata.getProperty(propertyKey); if (value != null && (propertyValue == null || value.equals(propertyValue))) { keys.add(dispatcherKey); } } return keys.build(); }
@Test public void shouldReturnDispatcherKeysForSpecificPropertyValue() { assertThat(underTest.getDispatcherKeysForProperty("global", "true")).containsOnly("Dispatcher1", "Dispatcher2"); }
@GET @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) public NodeInfo get() { return getNodeInfo(); }
@Test public void testNodeDefault() throws JSONException, Exception { WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("node") .get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, response.getType().toString()); JSONObject json = response.getEntity(JSONObject.class); verifyNodeInfo(json); }
public static int getStorageNodeIndex(ApplicationInstance application, HostName hostName) { Optional<ServiceInstance> storageNode = getStorageNodeAtHost(application, hostName); if (storageNode.isEmpty()) { throw new IllegalArgumentException("Failed to find a storage node for application " + application.applicationInstanceId() + " at host " + hostName); } return getStorageNodeIndex(storageNode.get().configId()); }
@Test public void testGetStorageNodeIndex() { ConfigId configId = TestUtil.storageNodeConfigId(CONTENT_CLUSTER_ID.toString(), 3); assertEquals(3, VespaModelUtil.getStorageNodeIndex(configId)); }
protected void encodeName(String name, ByteBuf buf) throws Exception { DnsCodecUtil.encodeDomainName(name, buf); }
@Test public void testEncodeName() throws Exception { testEncodeName(new byte[] { 5, 'n', 'e', 't', 't', 'y', 2, 'i', 'o', 0 }, "netty.io."); }
static Polygon buildPolygon(TDWay way) { Coordinate[] coordinates = JTSUtils.toCoordinates(way); return GEOMETRY_FACTORY.createPolygon(GEOMETRY_FACTORY.createLinearRing(coordinates), null); }
@Test public void testBuildInvalidPolygon() { String testfile = "invalid-polygon.wkt"; List<TDWay> ways = MockingUtils.wktPolygonToWays(testfile); Polygon polygon = JTSUtils.buildPolygon(ways.get(0)); Geometry expected = MockingUtils.readWKTFile(testfile); Assert.isTrue(!polygon.isValid()); Assert.equals(expected, polygon); }
@PostMapping("/createOrUpdate") @RequiresPermissions(value = {"system:meta:add", "system:meta:edit"}, logical = Logical.OR) public ShenyuAdminResult createOrUpdate(@Valid @RequestBody final MetaDataDTO metaDataDTO) { return ShenyuAdminResult.success(metaDataService.createOrUpdate(metaDataDTO)); }
@Test public void testCreateOrUpdate() throws Exception { final MetaDataDTO metaDataDTO = new MetaDataDTO(); metaDataDTO.setId("0001"); metaDataDTO.setAppName("aname-01"); metaDataDTO.setContextPath("path"); metaDataDTO.setPath("/path"); metaDataDTO.setRpcType("rpcType"); metaDataDTO.setMethodName("methodName"); metaDataDTO.setServiceName("serviceName"); metaDataDTO.setRuleName("ruleName"); metaDataDTO.setEnabled(false); SpringBeanUtils.getInstance().setApplicationContext(mock(ConfigurableApplicationContext.class)); when(SpringBeanUtils.getInstance().getBean(MetaDataMapper.class)).thenReturn(metaDataMapper); when(metaDataMapper.existed(metaDataDTO.getId())).thenReturn(true); given(this.metaDataService.createOrUpdate(metaDataDTO)).willReturn(ShenyuResultMessage.UPDATE_SUCCESS); this.mockMvc.perform(MockMvcRequestBuilders.post("/meta-data/createOrUpdate") .contentType(MediaType.APPLICATION_JSON) .content(GsonUtils.getInstance().toJson(metaDataDTO))) .andExpect(status().isOk()) .andExpect(jsonPath("$.message", is(ShenyuResultMessage.UPDATE_SUCCESS))) .andReturn(); }
public static Read<byte[], byte[]> readBytes() { return KafkaIO.<byte[], byte[]>read() .withKeyDeserializer(ByteArrayDeserializer.class) .withValueDeserializer(ByteArrayDeserializer.class); }
@Test public void testSourceWithExplicitPartitionsDisplayData() { KafkaIO.Read<byte[], byte[]> read = KafkaIO.readBytes() .withBootstrapServers("myServer1:9092,myServer2:9092") .withTopicPartitions( ImmutableList.of(new TopicPartition("test", 5), new TopicPartition("test", 6))) .withConsumerFactoryFn( new ConsumerFactoryFn( Lists.newArrayList("test"), 10, 10, OffsetResetStrategy.EARLIEST)); // 10 partitions DisplayData displayData = DisplayData.from(read); assertThat(displayData, hasDisplayItem("topicPartitions", "test-5,test-6")); assertThat(displayData, hasDisplayItem("enable.auto.commit", false)); assertThat(displayData, hasDisplayItem("bootstrap.servers", "myServer1:9092,myServer2:9092")); assertThat(displayData, hasDisplayItem("auto.offset.reset", "latest")); assertThat(displayData, hasDisplayItem("receive.buffer.bytes", 524288)); }
public static Map<String, Object> beanToMap(Object bean, String... properties) { int mapSize = 16; Editor<String> keyEditor = null; if (ArrayUtil.isNotEmpty(properties)) { mapSize = properties.length; final Set<String> propertiesSet = CollUtil.set(false, properties); keyEditor = property -> propertiesSet.contains(property) ? property : null; } // 指明了要复制的属性 所以不忽略null值 return beanToMap(bean, new LinkedHashMap<>(mapSize, 1), false, keyEditor); }
@Test public void beanToMapWithAliasTest() { final SubPersonWithAlias person = new SubPersonWithAlias(); person.setAge(14); person.setOpenid("11213232"); person.setName("测试A11"); person.setSubName("sub名字"); person.setSlow(true); person.setBooleana(true); person.setBooleanb(true); final Map<String, Object> map = BeanUtil.beanToMap(person); assertEquals("sub名字", map.get("aliasSubName")); }
@Override public Type classify(final Throwable e) { final Type type = e instanceof AuthorizationException || e instanceof StreamsException && e.getCause() instanceof AuthorizationException ? Type.USER : Type.UNKNOWN; if (type == Type.USER) { LOG.info( "Classified error as USER error based on missing access rights." + " Query ID: {} Exception: {}", queryId, e); } return type; }
@Test public void shouldClassifyAuthorizationExceptionAsUserError() { // Given: final Exception e = new AuthorizationException("foo"); // When: final Type type = new AuthorizationClassifier("").classify(e); // Then: assertThat(type, is(Type.USER)); }
@Override public <T> UncommittedBundle<T> createBundle(PCollection<T> output) { return new CloningBundle<>(underlying.createBundle(output)); }
@Test public void bundleWorkingCoderSucceedsClonesOutput() { PCollection<Integer> created = p.apply(Create.of(1, 3).withCoder(VarIntCoder.of())); PCollection<KV<String, Integer>> kvs = created .apply(WithKeys.of("foo")) .setCoder(KvCoder.of(StringUtf8Coder.of(), VarIntCoder.of())); WindowedValue<KV<String, Integer>> fooOne = WindowedValue.valueInGlobalWindow(KV.of("foo", 1)); WindowedValue<KV<String, Integer>> fooThree = WindowedValue.valueInGlobalWindow(KV.of("foo", 3)); CommittedBundle<KV<String, Integer>> bundle = factory.createBundle(kvs).add(fooOne).add(fooThree).commit(Instant.now()); assertThat(bundle.getElements(), containsInAnyOrder(fooOne, fooThree)); assertThat( bundle.getElements(), not(containsInAnyOrder(theInstance(fooOne), theInstance(fooThree)))); for (WindowedValue<KV<String, Integer>> foo : bundle.getElements()) { assertThat( foo.getValue(), not(anyOf(theInstance(fooOne.getValue()), theInstance(fooThree.getValue())))); } assertThat(bundle.getPCollection(), equalTo(kvs)); }
protected Map<String, String> parseJettyOptions( Node node ) { Map<String, String> jettyOptions = null; Node jettyOptionsNode = XMLHandler.getSubNode( node, XML_TAG_JETTY_OPTIONS ); if ( jettyOptionsNode != null ) { jettyOptions = new HashMap<String, String>(); if ( XMLHandler.getTagValue( jettyOptionsNode, XML_TAG_ACCEPTORS ) != null ) { jettyOptions.put( Const.KETTLE_CARTE_JETTY_ACCEPTORS, XMLHandler.getTagValue( jettyOptionsNode, XML_TAG_ACCEPTORS ) ); } if ( XMLHandler.getTagValue( jettyOptionsNode, XML_TAG_ACCEPT_QUEUE_SIZE ) != null ) { jettyOptions.put( Const.KETTLE_CARTE_JETTY_ACCEPT_QUEUE_SIZE, XMLHandler.getTagValue( jettyOptionsNode, XML_TAG_ACCEPT_QUEUE_SIZE ) ); } if ( XMLHandler.getTagValue( jettyOptionsNode, XML_TAG_LOW_RES_MAX_IDLE_TIME ) != null ) { jettyOptions.put( Const.KETTLE_CARTE_JETTY_RES_MAX_IDLE_TIME, XMLHandler.getTagValue( jettyOptionsNode, XML_TAG_LOW_RES_MAX_IDLE_TIME ) ); } } return jettyOptions; }
@Test public void testParseJettyOption_Acceptors() throws KettleXMLException { Node configNode = getConfigNode( getConfigWithAcceptorsOnlyOption() ); Map<String, String> parseJettyOptions = slServerConfig.parseJettyOptions( configNode ); assertNotNull( parseJettyOptions ); assertEquals( 1, parseJettyOptions.size() ); assertTrue( "Expected containing key=" + EXPECTED_ACCEPTORS_KEY, parseJettyOptions .containsKey( EXPECTED_ACCEPTORS_KEY ) ); assertEquals( EXPECTED_ACCEPTORS_VALUE, parseJettyOptions.get( EXPECTED_ACCEPTORS_KEY ) ); }
public boolean isCurrentOSValidForThisPlugin(String currentOS) { if (about == null || about.targetOperatingSystems.isEmpty()) { return true; } for (String targetOperatingSystem : about.targetOperatingSystems) { if (targetOperatingSystem.equalsIgnoreCase(currentOS)) { return true; } } return false; }
@Test void shouldMatchValidOSesAgainstCurrentOS() { assertThat(descriptorWithTargetOSes().isCurrentOSValidForThisPlugin("Linux")).isTrue(); assertThat(descriptorWithTargetOSes().isCurrentOSValidForThisPlugin("Windows")).isTrue(); assertThat(descriptorWithTargetOSes("Linux").isCurrentOSValidForThisPlugin("Linux")).isTrue(); assertThat(descriptorWithTargetOSes("Windows").isCurrentOSValidForThisPlugin("Linux")).isFalse(); assertThat(descriptorWithTargetOSes("Windows", "Linux").isCurrentOSValidForThisPlugin("Linux")).isTrue(); assertThat(descriptorWithTargetOSes("Windows", "SunOS", "Mac OS X").isCurrentOSValidForThisPlugin("Linux")).isFalse(); }
@Udf public String decodeParam( @UdfParameter(description = "the value to decode") final String input) { if (input == null) { return null; } try { return URLDecoder.decode(input, UTF_8.name()); } catch (final UnsupportedEncodingException e) { throw new KsqlFunctionException( "url_decode udf encountered an encoding exception while decoding: " + input, e); } }
@Test public void shouldReturnForNullValue() { assertThat(decodeUdf.decodeParam(null), is(nullValue())); }
@Override public void handleRequest(RestRequest request, RequestContext requestContext, Callback<RestResponse> callback) { //This code path cannot accept content types or accept types that contain //multipart/related. This is because these types of requests will usually have very large payloads and therefore //would degrade server performance since RestRequest reads everything into memory. if (!isMultipart(request, requestContext, callback)) { _restRestLiServer.handleRequest(request, requestContext, callback); } }
@Test public void testRestRequestResponseAttachmentsDesired() throws Exception { //This test verifies that a RestRequest sent to the RestLiServer throws an exception if the accept type //includes multipart related RestRequest acceptTypeMultiPartRelated = new RestRequestBuilder(new URI("/statuses/abcd")) .setHeader(RestConstants.HEADER_ACCEPT, RestConstants.HEADER_VALUE_MULTIPART_RELATED).build(); Callback<RestResponse> callback = new Callback<RestResponse>() { @Override public void onSuccess(RestResponse restResponse) { fail(); } @Override public void onError(Throwable e) { assertTrue(e instanceof RestException); RestException restException = (RestException)e; RestResponse restResponse = restException.getResponse(); assertEquals(restResponse.getStatus(), 406); assertTrue(restResponse.getEntity().length() > 0); assertEquals(restResponse.getEntity().asString(Charset.defaultCharset()), "This server cannot handle requests with an accept type of multipart/related"); } }; _server.handleRequest(acceptTypeMultiPartRelated, new RequestContext(), callback); }
public String send() throws MailException { try { return doSend(); } catch (MessagingException e) { if (e instanceof SendFailedException) { // 当地址无效时,显示更加详细的无效地址信息 final Address[] invalidAddresses = ((SendFailedException) e).getInvalidAddresses(); final String msg = StrUtil.format("Invalid Addresses: {}", ArrayUtil.toString(invalidAddresses)); throw new MailException(msg, e); } throw new MailException(e); } }
@Test @Disabled public void sendByAccountTest() { MailAccount account = new MailAccount(); account.setHost("smtp.yeah.net"); account.setPort(465); account.setSslEnable(true); account.setFrom("hutool@yeah.net"); account.setUser("hutool"); account.setPass("q1w2e3"); MailUtil.send(account, "hutool@foxmail.com", "测试", "<h1>邮件来自Hutool测试</h1>", true); }
@Override public void close() { close(Duration.ofMillis(Long.MAX_VALUE)); }
@Test public void closeWithNegativeTimestampShouldThrow() { Properties producerProps = new Properties(); producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); try (Producer<byte[], byte[]> producer = new KafkaProducer<>(producerProps, new ByteArraySerializer(), new ByteArraySerializer())) { assertThrows(IllegalArgumentException.class, () -> producer.close(Duration.ofMillis(-100))); } }
public String getType(CharSequence type) { StringWriter writer = new StringWriter(); try { getWriter(writer).writeType(type); } catch (IOException e) { throw new AssertionError("Unexpected IOException"); } return writer.toString(); }
@Test public void testGetType() throws IOException { TestDexFormatter formatter = new TestDexFormatter(); Assert.assertEquals( "type", formatter.getType("mock type")); }
public String getConfigForDisplay() { String pluginId = getPluginId(); SCMMetadataStore metadataStore = SCMMetadataStore.getInstance(); List<ConfigurationProperty> propertiesToBeUsedForDisplay = ConfigurationDisplayUtil.getConfigurationPropertiesToBeUsedForDisplay(metadataStore, pluginId, configuration); String prefix = metadataStore.hasPlugin(pluginId) ? "" : "WARNING! Plugin missing. "; return prefix + configuration.forDisplay(propertiesToBeUsedForDisplay); }
@Test void shouldOnlyDisplayFieldsWhichAreNonSecureAndPartOfIdentityInGetConfigForDisplayWhenPluginExists() { SCMConfigurations scmConfiguration = new SCMConfigurations(); scmConfiguration.add(new SCMConfiguration("key1").with(PART_OF_IDENTITY, true).with(SECURE, false)); scmConfiguration.add(new SCMConfiguration("key2").with(PART_OF_IDENTITY, false).with(SECURE, false)); scmConfiguration.add(new SCMConfiguration("key3").with(PART_OF_IDENTITY, true).with(SECURE, true)); scmConfiguration.add(new SCMConfiguration("key4").with(PART_OF_IDENTITY, false).with(SECURE, true)); scmConfiguration.add(new SCMConfiguration("key5").with(PART_OF_IDENTITY, true).with(SECURE, false)); SCMMetadataStore.getInstance().addMetadataFor("plugin-id", scmConfiguration, null); Configuration configuration = new Configuration(create("key1", false, "value1"), create("key2", false, "value2"), create("key3", true, "value3"), create("key4", true, "value4"), create("key5", false, "value5")); SCM scm = SCMMother.create("scm", "scm-name", "plugin-id", "1.0", configuration); assertThat(scm.getConfigForDisplay()).isEqualTo("[key1=value1, key5=value5]"); }
@Override public NestedFieldFilterScope<T> getFilterScope() { return filterScope; }
@Test public void getFilterScope_always_returns_the_same_instance() { String fieldName = randomAlphabetic(5); String nestedFieldName = randomAlphabetic(6); String value = randomAlphabetic(7); boolean sticky = RANDOM.nextBoolean(); NestedFieldTopAggregationDefinition<String> underTest = new NestedFieldTopAggregationDefinition<>(fieldName + "." + nestedFieldName, value, sticky); Set<TopAggregationDefinition.FilterScope> filterScopes = IntStream.range(0, 2 + RANDOM.nextInt(200)) .mapToObj(i -> underTest.getFilterScope()) .collect(Collectors.toSet()); assertThat(filterScopes).hasSize(1); }
public static TableElements parse(final String schema, final TypeRegistry typeRegistry) { return new SchemaParser(typeRegistry).parse(schema); }
@Test public void shouldParseQuotedMixedCase() { // Given: final String schema = "`End` VARCHAR"; // When: final TableElements elements = parser.parse(schema); // Then: assertThat(elements, hasItem( new TableElement(ColumnName.of("End"), new Type(SqlTypes.STRING)) )); }
@Override public Optional<String> getContentHash() { return Optional.ofNullable(mContentHash); }
@Test public void close() throws Exception { mStream.close(); Mockito.verify(mMockOssClient, never()) .initiateMultipartUpload(any(InitiateMultipartUploadRequest.class)); Mockito.verify(mMockOssClient, never()) .completeMultipartUpload(any(CompleteMultipartUploadRequest.class)); assertTrue(mStream.getContentHash().isPresent()); assertEquals("emptyTag", mStream.getContentHash().get()); }
@Override public Hedge hedge(final String name) { return hedge(name, getDefaultConfig(), emptyMap()); }
@Test public void hedgeNewWithNullNameAndConfigSupplier() { exception.expect(NullPointerException.class); exception.expectMessage(NAME_MUST_NOT_BE_NULL); HedgeRegistry registry = HedgeRegistry.builder().withDefaultConfig(config).build(); registry.hedge(null, () -> config); }
@Override public MaterializedWindowedTable windowed() { return new KsqlMaterializedWindowedTable(inner.windowed()); }
@Test public void shouldMaintainResultOrdering() { // Given: final MaterializedWindowedTable table = materialization.windowed(); givenNoopFilter(); when(project.apply(any(), any(), any())).thenReturn(Optional.of(transformed)); final Instant now = Instant.now(); final TimeWindow window1 = new TimeWindow(now.plusMillis(10).toEpochMilli(), now.plusMillis(11).toEpochMilli()); final SessionWindow window2 = new SessionWindow(now.toEpochMilli(), now.plusMillis(2).toEpochMilli()); final TimeWindow window3 = new TimeWindow(now.toEpochMilli(), now.plusMillis(3).toEpochMilli()); final ImmutableList<WindowedRow> rows = ImmutableList.of( WindowedRow.of(schema, new Windowed<>(aKey, window1), aValue, aRowtime), WindowedRow.of(schema, new Windowed<>(aKey, window2), aValue, aRowtime), WindowedRow.of(schema, new Windowed<>(aKey, window3), aValue, aRowtime) ); when(innerWindowed.get(any(), anyInt(), any(), any(), any())).thenReturn( KsMaterializedQueryResult.rowIteratorWithPosition(rows.iterator(), position)); // When: final Iterator<WindowedRow> result = table.get(aKey, partition, windowStartBounds, windowEndBounds); // Then: assertThat(result.hasNext(), is(true)); final List<WindowedRow> resultList = Lists.newArrayList(result); assertThat(resultList, hasSize(rows.size())); assertThat(resultList.get(0).windowedKey().window(), is(window1)); assertThat(resultList.get(1).windowedKey().window(), is(window2)); assertThat(resultList.get(2).windowedKey().window(), is(window3)); }
public Counter getCounterByName(String counterName) { for (final Counter counter : counters) { if (counter.getName().equals(counterName)) { return counter; } } return null; }
@Test public void testGetCounterByName() { final Counter counter = createCounter(); final Collector collector = new Collector("test collector2", Collections.singletonList(counter)); assertNotNull("getCounterByName", collector.getCounterByName(counter.getName())); assertNull("getCounterByName", collector.getCounterByName("unknown")); }
@Override public AppResponse process(Flow flow, AppRequest body) { digidClient.remoteLog("1218", getAppDetails()); if (!appSession.getWithBsn()) { digidClient.remoteLog("1345", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId())); return new NokResponse("no_bsn_on_account"); } appSession.setActivationMethod(ActivationMethod.RDA); // Options: kiosk, upgrade_rda_widchecker, app? // For logging in iapi /confirm from rda server appSession.setRdaAction("app"); return new OkResponse(); }
@Test void processWithoutBsn(){ mockedAppSession.setWithBsn(false); AppResponse appResponse = rdaChosen.process(mockedFlow, mockedAbstractAppRequest); verify(digidClientMock, times(1)).remoteLog("1218", Map.of(lowerUnderscore(ACCOUNT_ID), mockedAppSession.getAccountId(), lowerUnderscore(APP_CODE), "2B5A2", lowerUnderscore(DEVICE_NAME), mockedAppAuthenticator.getDeviceName())); verify(digidClientMock, times(1)).remoteLog("1345", Map.of(lowerUnderscore(ACCOUNT_ID), mockedAppSession.getAccountId())); assertTrue(appResponse instanceof NokResponse); assertEquals("no_bsn_on_account", ((NokResponse)appResponse).getError()); }
@Override public void close() { if (Objects.isNull(this.redisTemplate)) { return; } final ReactiveRedisConnectionFactory connectionFactory = this.redisTemplate.getConnectionFactory(); try { ReactiveRedisConnection connection = connectionFactory.getReactiveConnection(); connection.close(); connection = connectionFactory.getReactiveClusterConnection(); connection.close(); } catch (Exception ignored) { } }
@Test public void closeCache() throws NoSuchFieldException, IllegalAccessException { final ICache cache = new RedisCache(getConfig()); cache.close(); final Field redisTemplate = RedisCache.class.getDeclaredField("redisTemplate"); redisTemplate.setAccessible(true); redisTemplate.set(cache, null); cache.close(); final ReactiveRedisTemplate reactiveRedisTemplate = mock(ReactiveRedisTemplate.class); final ReactiveRedisConnectionFactory redisConnectionFactory = mock(ReactiveRedisConnectionFactory.class); redisTemplate.set(cache, reactiveRedisTemplate); when(reactiveRedisTemplate.getConnectionFactory()).thenReturn(redisConnectionFactory); when(redisConnectionFactory.getReactiveClusterConnection()).thenReturn(mock(ReactiveRedisClusterConnection.class)); when(redisConnectionFactory.getReactiveConnection()).thenReturn(mock(ReactiveRedisConnection.class)); cache.close(); }
public boolean isUnknownOrGreaterThan(Version version) { return isUnknown() || (!version.isUnknown() && compareTo(version) > 0); }
@Test public void isUnknownOrGreaterThan() { assertTrue(V3_0.isUnknownOrGreaterThan(of(2, 0))); assertFalse(V3_0.isUnknownOrGreaterThan(of(3, 0))); assertFalse(V3_0.isUnknownOrGreaterThan(of(4, 0))); assertTrue(UNKNOWN.isUnknownOrGreaterThan(of(4, 0))); }
@Override public synchronized DefaultConnectClient get( final Optional<String> ksqlAuthHeader, final List<Entry<String, String>> incomingRequestHeaders, final Optional<KsqlPrincipal> userPrincipal ) { if (defaultConnectAuthHeader == null) { defaultConnectAuthHeader = buildDefaultAuthHeader(); } final Map<String, Object> configWithPrefixOverrides = ksqlConfig.valuesWithPrefixOverride(KsqlConfig.KSQL_CONNECT_PREFIX); return new DefaultConnectClient( ksqlConfig.getString(KsqlConfig.CONNECT_URL_PROPERTY), buildAuthHeader(ksqlAuthHeader, incomingRequestHeaders), requestHeadersExtension .map(extension -> extension.getHeaders(userPrincipal)) .orElse(Collections.emptyMap()), Optional.ofNullable(newSslContext(configWithPrefixOverrides)), shouldVerifySslHostname(configWithPrefixOverrides), ksqlConfig.getLong(KsqlConfig.CONNECT_REQUEST_TIMEOUT_MS) ); }
@Test public void shouldFavorCustomAuthHeaderOverBasicAuthHeader() throws Exception { // Given: givenCustomBasicAuthHeader(); givenValidCredentialsFile(); when(config.getConfiguredInstance( KsqlConfig.CONNECT_REQUEST_HEADERS_PLUGIN, ConnectRequestHeadersExtension.class)) .thenReturn(requestHeadersExtension); // re-initialize client factory since request headers extension is configured in constructor connectClientFactory = new DefaultConnectClientFactory(config); when(requestHeadersExtension.getHeaders(Optional.of(userPrincipal))) .thenReturn(ImmutableMap.of("header", "value")); when(requestHeadersExtension.shouldUseCustomAuthHeader()).thenReturn(true); when(requestHeadersExtension.getAuthHeader(incomingRequestHeaders)).thenReturn(Optional.of("some custom auth")); // When: final DefaultConnectClient connectClient = connectClientFactory.get(Optional.empty(), incomingRequestHeaders, Optional.of(userPrincipal)); // Then: assertThat(connectClient.getRequestHeaders(), arrayContaining(header(AUTH_HEADER_NAME, "some custom auth"), header("header", "value"))); }
public static ECKeyPair deserialize(byte[] input) { if (input.length != PRIVATE_KEY_SIZE + PUBLIC_KEY_SIZE) { throw new RuntimeException("Invalid input key size"); } BigInteger privateKey = Numeric.toBigInt(input, 0, PRIVATE_KEY_SIZE); BigInteger publicKey = Numeric.toBigInt(input, PRIVATE_KEY_SIZE, PUBLIC_KEY_SIZE); return new ECKeyPair(privateKey, publicKey); }
@Test public void testDeserializeInvalidKey() { assertThrows(RuntimeException.class, () -> Keys.deserialize(new byte[0])); }
public Session.Status readStatus() { try { Optional<byte[]> data = curator.getData(sessionStatusPath); return data.map(d -> Session.Status.parse(Utf8.toString(d))).orElse(Session.Status.UNKNOWN); } catch (Exception e) { log.log(Level.INFO, "Failed to read session status from " + sessionStatusPath.getAbsolute() + ", returning session status 'unknown'"); return Session.Status.UNKNOWN; } }
@Test public void require_that_status_is_read_from_zk() { int sessionId = 3; SessionZooKeeperClient zkc = createSessionZKClient(sessionId); curator.set(sessionPath(sessionId).append(SESSIONSTATE_ZK_SUBPATH), Utf8.toBytes("PREPARE")); assertEquals(Session.Status.PREPARE, zkc.readStatus()); }
public static List<ServiceInstance> build( URL registryUrl, Collection<org.apache.curator.x.discovery.ServiceInstance<ZookeeperInstance>> instances) { return instances.stream().map((i) -> build(registryUrl, i)).collect(Collectors.toList()); }
@Test void testBuild() { ServiceInstance dubboServiceInstance = new DefaultServiceInstance("A", "127.0.0.1", 8888, ApplicationModel.defaultModel()); Map<String, String> metadata = dubboServiceInstance.getMetadata(); metadata.put(METADATA_STORAGE_TYPE_PROPERTY_NAME, "remote"); metadata.put(EXPORTED_SERVICES_REVISION_PROPERTY_NAME, "111"); metadata.put("site", "dubbo"); // convert {org.apache.dubbo.registry.client.ServiceInstance} to // {org.apache.curator.x.discovery.ServiceInstance<ZookeeperInstance>} org.apache.curator.x.discovery.ServiceInstance<ZookeeperInstance> curatorServiceInstance = CuratorFrameworkUtils.build(dubboServiceInstance); Assertions.assertEquals(curatorServiceInstance.getId(), dubboServiceInstance.getAddress()); Assertions.assertEquals(curatorServiceInstance.getName(), dubboServiceInstance.getServiceName()); Assertions.assertEquals(curatorServiceInstance.getAddress(), dubboServiceInstance.getHost()); Assertions.assertEquals(curatorServiceInstance.getPort(), dubboServiceInstance.getPort()); ZookeeperInstance payload = curatorServiceInstance.getPayload(); Assertions.assertNotNull(payload); Assertions.assertEquals(payload.getMetadata(), metadata); Assertions.assertEquals(payload.getName(), dubboServiceInstance.getServiceName()); // convert {org.apache.curator.x.discovery.ServiceInstance<ZookeeperInstance>} to // {org.apache.dubbo.registry.client.ServiceInstance} ServiceInstance serviceInstance = CuratorFrameworkUtils.build(registryUrl, curatorServiceInstance); Assertions.assertEquals(serviceInstance, dubboServiceInstance); // convert {Collection<org.apache.curator.x.discovery.ServiceInstance<ZookeeperInstance>>} to // {List<org.apache.dubbo.registry.client.ServiceInstance>} List<ServiceInstance> serviceInstances = CuratorFrameworkUtils.build(registryUrl, Arrays.asList(curatorServiceInstance)); Assertions.assertNotNull(serviceInstances); Assertions.assertEquals(serviceInstances.get(0), dubboServiceInstance); }
public static void clearLocalUtm() { try { SAStoreManager.getInstance().setString(SHARED_PREF_UTM, ""); } catch (Exception e) { SALog.printStackTrace(e); } }
@Test public void clearLocalUtm() { ChannelUtils.clearLocalUtm(); }
public T getResult() { return result; }
@Test public void testEthStorageAt() { buildResponse( "{\n" + " \"jsonrpc\":\"2.0\"," + " \"id\":1," + " \"result\":" + "\"0x000000000000000000000000000000000000000000000000000000000000162e\"" + "}"); EthGetStorageAt ethGetStorageAt = deserialiseResponse(EthGetStorageAt.class); assertEquals( ethGetStorageAt.getResult(), ("0x000000000000000000000000000000000000000000000000000000000000162e")); }
public static boolean isDubboProxyName(String name) { return name.startsWith(ALIBABA_DUBBO_PROXY_NAME_PREFIX) || name.startsWith(APACHE_DUBBO_PROXY_NAME_PREFIX) || name.contains(DUBBO_3_X_PARTIAL_PROXY_NAME); }
@Test public void testIsDubbo3XPartialProxyName() { assertTrue(DubboUtil.isDubboProxyName(SimpleDubboProxy.class.getName())); }
static GlobalMetaData mergeInto(FileMetaData toMerge, GlobalMetaData mergedMetadata) { return mergeInto(toMerge, mergedMetadata, true); }
@Test public void testMergeMetadata() { FileMetaData md1 = new FileMetaData( new MessageType( "root1", new PrimitiveType(REPEATED, BINARY, "a"), new PrimitiveType(OPTIONAL, BINARY, "b")), new HashMap<String, String>(), "test"); FileMetaData md2 = new FileMetaData( new MessageType("root2", new PrimitiveType(REQUIRED, BINARY, "c")), new HashMap<String, String>(), "test2"); GlobalMetaData merged = ParquetFileWriter.mergeInto(md2, ParquetFileWriter.mergeInto(md1, null)); assertEquals( merged.getSchema(), new MessageType( "root1", new PrimitiveType(REPEATED, BINARY, "a"), new PrimitiveType(OPTIONAL, BINARY, "b"), new PrimitiveType(REQUIRED, BINARY, "c"))); }
public static Future<Integer> authTlsHash(SecretOperator secretOperations, String namespace, KafkaClientAuthentication auth, List<CertSecretSource> certSecretSources) { Future<Integer> tlsFuture; if (certSecretSources == null || certSecretSources.isEmpty()) { tlsFuture = Future.succeededFuture(0); } else { // get all TLS trusted certs, compute hash from each of them, sum hashes tlsFuture = Future.join(certSecretSources.stream().map(certSecretSource -> getCertificateAsync(secretOperations, namespace, certSecretSource) .compose(cert -> Future.succeededFuture(cert.hashCode()))).collect(Collectors.toList())) .compose(hashes -> Future.succeededFuture(hashes.list().stream().mapToInt(e -> (int) e).sum())); } if (auth == null) { return tlsFuture; } else { // compute hash from Auth if (auth instanceof KafkaClientAuthenticationScram) { // only passwordSecret can be changed return tlsFuture.compose(tlsHash -> getPasswordAsync(secretOperations, namespace, auth) .compose(password -> Future.succeededFuture(password.hashCode() + tlsHash))); } else if (auth instanceof KafkaClientAuthenticationPlain) { // only passwordSecret can be changed return tlsFuture.compose(tlsHash -> getPasswordAsync(secretOperations, namespace, auth) .compose(password -> Future.succeededFuture(password.hashCode() + tlsHash))); } else if (auth instanceof KafkaClientAuthenticationTls) { // custom cert can be used (and changed) return ((KafkaClientAuthenticationTls) auth).getCertificateAndKey() == null ? tlsFuture : tlsFuture.compose(tlsHash -> getCertificateAndKeyAsync(secretOperations, namespace, (KafkaClientAuthenticationTls) auth) .compose(crtAndKey -> Future.succeededFuture(crtAndKey.certAsBase64String().hashCode() + crtAndKey.keyAsBase64String().hashCode() + tlsHash))); } else if (auth instanceof KafkaClientAuthenticationOAuth) { List<Future<Integer>> futureList = ((KafkaClientAuthenticationOAuth) auth).getTlsTrustedCertificates() == null ? new ArrayList<>() : ((KafkaClientAuthenticationOAuth) auth).getTlsTrustedCertificates().stream().map(certSecretSource -> getCertificateAsync(secretOperations, namespace, certSecretSource) .compose(cert -> Future.succeededFuture(cert.hashCode()))).collect(Collectors.toList()); futureList.add(tlsFuture); futureList.add(addSecretHash(secretOperations, namespace, ((KafkaClientAuthenticationOAuth) auth).getAccessToken())); futureList.add(addSecretHash(secretOperations, namespace, ((KafkaClientAuthenticationOAuth) auth).getClientSecret())); futureList.add(addSecretHash(secretOperations, namespace, ((KafkaClientAuthenticationOAuth) auth).getRefreshToken())); return Future.join(futureList) .compose(hashes -> Future.succeededFuture(hashes.list().stream().mapToInt(e -> (int) e).sum())); } else { // unknown Auth type return tlsFuture; } } }
@Test void testAuthTlsHashScramSha512SecretAndPasswordFound() { SecretOperator secretOperator = mock(SecretOperator.class); Map<String, String> data = new HashMap<>(); data.put("passwordKey", "my-password"); Secret secret = new Secret(); secret.setData(data); CompletionStage<Secret> cf = CompletableFuture.supplyAsync(() -> secret); when(secretOperator.getAsync(anyString(), anyString())).thenReturn(Future.fromCompletionStage(cf)); KafkaClientAuthenticationScramSha512 auth = new KafkaClientAuthenticationScramSha512(); PasswordSecretSource passwordSecretSource = new PasswordSecretSource(); passwordSecretSource.setSecretName("my-secret"); passwordSecretSource.setPassword("passwordKey"); auth.setPasswordSecret(passwordSecretSource); Future<Integer> result = VertxUtil.authTlsHash(secretOperator, "anyNamespace", auth, List.of()); result.onComplete(handler -> { assertTrue(handler.succeeded()); assertEquals("my-password".hashCode(), handler.result()); }); }
public Duplicate[] getDuplicates() { return this.duplicates; }
@Test public void getDuplicates_sorts_duplicates_by_Inner_then_InProject_then_CrossProject() { CrossProjectDuplicate crossProjectDuplicate = new CrossProjectDuplicate("some key", TEXT_BLOCK_1); InProjectDuplicate inProjectDuplicate = new InProjectDuplicate(FILE_COMPONENT_1, TEXT_BLOCK_1); InnerDuplicate innerDuplicate = new InnerDuplicate(TEXT_BLOCK_1); Duplication duplication = new Duplication( SOME_ORIGINAL_TEXTBLOCK, shuffledList(crossProjectDuplicate, inProjectDuplicate, innerDuplicate)); assertThat(duplication.getDuplicates()).containsExactly(innerDuplicate, inProjectDuplicate, crossProjectDuplicate); }
@Override public boolean retainAll(Collection<?> elementsToRetain) { boolean setChanged = false; final int sizeBeforeRemovals = size; int visited = 0; for (int index = 0; index < table.length && visited < sizeBeforeRemovals; index++) { final Object storedElement = table[index]; if (storedElement != null) { visited++; if (!elementsToRetain.contains(storedElement)) { removeFromIndex(index); setChanged = true; } } } return setChanged; }
@Test public void testRetainAll() { final OAHashSet<Integer> set = new OAHashSet<>(8); populateSet(set, 10); Collection<Integer> elementsToRetain = new ArrayList<>(10); for (int i = 0; i < 10; i++) { elementsToRetain.add(i); } final boolean retainedAll = set.retainAll(elementsToRetain); assertFalse("No elements should be removed", retainedAll); assertFalse(set.isEmpty()); }
@Nonnull public T createAndRestore( @Nonnull List<? extends Collection<S>> restoreOptions, @Nonnull StateObject.StateObjectSizeStatsCollector stats) throws Exception { if (restoreOptions.isEmpty()) { restoreOptions = Collections.singletonList(Collections.emptyList()); } int alternativeIdx = 0; Exception collectedException = null; while (alternativeIdx < restoreOptions.size()) { Collection<S> restoreState = restoreOptions.get(alternativeIdx); ++alternativeIdx; // IMPORTANT: please be careful when modifying the log statements because they are used // for validation in // the automatic end-to-end tests. Those tests might fail if they are not aligned with // the log message! if (restoreState.isEmpty()) { LOG.debug("Creating {} with empty state.", logDescription); } else { if (LOG.isTraceEnabled()) { LOG.trace( "Creating {} and restoring with state {} from alternative ({}/{}).", logDescription, restoreState, alternativeIdx, restoreOptions.size()); } else { LOG.debug( "Creating {} and restoring with state from alternative ({}/{}).", logDescription, alternativeIdx, restoreOptions.size()); } } try { T successfullyRestored = attemptCreateAndRestore(restoreState); // Obtain and report stats for the state objects used in our successful restore restoreState.forEach(handle -> handle.collectSizeStats(stats)); return successfullyRestored; } catch (Exception ex) { collectedException = ExceptionUtils.firstOrSuppressed(ex, collectedException); if (backendCloseableRegistry.isClosed()) { throw new FlinkException( "Stopping restore attempts for already cancelled task.", collectedException); } LOG.warn( "Exception while restoring {} from alternative ({}/{}), will retry while more " + "alternatives are available.", logDescription, alternativeIdx, restoreOptions.size(), ex); } } throw new FlinkException( "Could not restore " + logDescription + " from any of the " + restoreOptions.size() + " provided restore options.", collectedException); }
@Test void testExceptionThrownIfAllRestoresFailed() throws Exception { CloseableRegistry closeableRegistry = new CloseableRegistry(); OperatorStateHandle firstFailHandle = mock(OperatorStateHandle.class); OperatorStateHandle secondFailHandle = mock(OperatorStateHandle.class); OperatorStateHandle thirdFailHandle = mock(OperatorStateHandle.class); List<StateObjectCollection<OperatorStateHandle>> sortedRestoreOptions = Arrays.asList( new StateObjectCollection<>(Collections.singletonList(firstFailHandle)), new StateObjectCollection<>(Collections.singletonList(secondFailHandle)), new StateObjectCollection<>(Collections.singletonList(thirdFailHandle))); BackendRestorerProcedure<OperatorStateBackend, OperatorStateHandle> restorerProcedure = new BackendRestorerProcedure<>( backendSupplier, closeableRegistry, "test op state backend"); assertThatThrownBy( () -> restorerProcedure.createAndRestore( sortedRestoreOptions, StateObject.StateObjectSizeStatsCollector.create())) .isInstanceOf(FlinkException.class); verify(firstFailHandle).openInputStream(); verify(secondFailHandle).openInputStream(); verify(thirdFailHandle).openInputStream(); }
public NonClosedTracking<RAW, BASE> trackNonClosed(Input<RAW> rawInput, Input<BASE> baseInput) { NonClosedTracking<RAW, BASE> tracking = NonClosedTracking.of(rawInput, baseInput); // 1. match by rule, line, line hash and message match(tracking, LineAndLineHashAndMessage::new); // 2. match issues with same rule, same line and same line hash, but not necessarily with same message match(tracking, LineAndLineHashKey::new); // 3. detect code moves by comparing blocks of codes detectCodeMoves(rawInput, baseInput, tracking); // 4. match issues with same rule, same message and same line hash match(tracking, LineHashAndMessageKey::new); // 5. match issues with same rule, same line and same message match(tracking, LineAndMessageKey::new); // 6. match issues with same rule and same line hash but different line and different message. // See SONAR-2812 match(tracking, LineHashKey::new); return tracking; }
@Test public void do_not_fail_if_base_issue_without_line() { FakeInput baseInput = new FakeInput("H1", "H2"); Issue base = baseInput.createIssueOnLine(1, RULE_SYSTEM_PRINT, "msg1"); FakeInput rawInput = new FakeInput("H3", "H4", "H5"); Issue raw = rawInput.createIssue(RULE_UNUSED_LOCAL_VARIABLE, "msg2"); Tracking<Issue, Issue> tracking = tracker.trackNonClosed(rawInput, baseInput); assertThat(tracking.baseFor(raw)).isNull(); assertThat(tracking.getUnmatchedBases()).containsOnly(base); }
@PostMapping(value = "/artifact/download") public ResponseEntity<String> importArtifact(@RequestParam(value = "url", required = true) String url, @RequestParam(value = "mainArtifact", defaultValue = "true") boolean mainArtifact, @RequestParam(value = "secretName", required = false) String secretName) { if (!url.isEmpty()) { List<Service> services = null; Secret secret = null; if (secretName != null) { secret = secretRepository.findByName(secretName).stream().findFirst().orElse(null); log.debug("Secret {} was requested. Have we found it? {}", secretName, (secret != null)); } try { // Download remote to local file before import. HTTPDownloader.FileAndHeaders fileAndHeaders = HTTPDownloader.handleHTTPDownloadToFileAndHeaders(url, secret, true); File localFile = fileAndHeaders.getLocalFile(); // Now try importing services. services = serviceService.importServiceDefinition(localFile, new ReferenceResolver(url, secret, true, RelativeReferenceURLBuilderFactory .getRelativeReferenceURLBuilder(fileAndHeaders.getResponseHeaders())), new ArtifactInfo(url, mainArtifact)); } catch (IOException ioe) { log.error("Exception while retrieving remote item " + url, ioe); return new ResponseEntity<>("Exception while retrieving remote item", HttpStatus.INTERNAL_SERVER_ERROR); } catch (MockRepositoryImportException mrie) { return new ResponseEntity<>(mrie.getMessage(), HttpStatus.BAD_REQUEST); } if (services != null && !services.isEmpty()) { return new ResponseEntity<>( "{\"name\": \"" + services.get(0).getName() + ":" + services.get(0).getVersion() + "\"}", HttpStatus.CREATED); } } return new ResponseEntity<>(HttpStatus.NO_CONTENT); }
@Test void shouldReturnNoContentWhenTheServiceHasNotBeenCreatedNullValue() throws MockRepositoryImportException { // arrange Mockito.when(serviceService.importServiceDefinition(Mockito.any(File.class), Mockito.any(ReferenceResolver.class), Mockito.any(ArtifactInfo.class))).thenReturn(null); String wrongUrl = "https://raw.githubusercontent.com/microcks/microcks/master/samples/APIPastry-openapi.yaml"; // act ResponseEntity<String> responseEntity = sut.importArtifact(wrongUrl, false, null); // assert Assertions.assertThat(responseEntity.getStatusCode()).isEqualTo(HttpStatus.NO_CONTENT); }
public static Optional<String> encrypt(String key, String text) { try { Cipher cipher = Cipher.getInstance(AES_PADDING); byte[] keyBytes = Base64.getDecoder().decode(key.getBytes(DEFAULT_ENCODE)); SecretKey secretKey = new SecretKeySpec(keyBytes, ALGORITHM); cipher.init(Cipher.ENCRYPT_MODE, secretKey); byte[] encryptBytes = cipher.doFinal(text.getBytes(DEFAULT_ENCODE)); byte[] bytes = new byte[IV_LENGTH + text.getBytes(DEFAULT_ENCODE).length + LENGTH]; System.arraycopy(cipher.getIV(), 0, bytes, 0, IV_LENGTH); System.arraycopy(encryptBytes, 0, bytes, IV_LENGTH, encryptBytes.length); return Optional.of(new String(Base64.getEncoder().encode(bytes), DEFAULT_ENCODE)); } catch (IOException | GeneralSecurityException e) { return Optional.empty(); } }
@Test void encrypt() { Optional<String> optional = AesUtil.generateKey(); Assertions.assertTrue(optional.isPresent()); String key = optional.get(); Optional<String> encryptTextOptional = AesUtil.encrypt(key, TEXT); Assertions.assertTrue(encryptTextOptional.isPresent()); Optional<String> decryptTextOptional = AesUtil.decrypt(key, encryptTextOptional.get()); Assertions.assertTrue(decryptTextOptional.isPresent()); Assertions.assertEquals(decryptTextOptional.get(), TEXT); }
public static Combine.CombineFn<Boolean, ?, Long> combineFn() { return new CountIfFn(); }
@Test public void testCreatesEmptyAccumulator() { long[] accumulator = (long[]) CountIf.combineFn().createAccumulator(); assertEquals(0, accumulator[0]); }
public static ShowResultSet execute(ShowStmt statement, ConnectContext context) { return GlobalStateMgr.getCurrentState().getShowExecutor().showExecutorVisitor.visit(statement, context); }
@Test public void testShowGrants() throws Exception { ShowGrantsStmt stmt = new ShowGrantsStmt("root"); ShowResultSet resultSet = ShowExecutor.execute(stmt, ctx); resultSet.getResultRows().forEach(System.out::println); String expectString1 = "root, null, GRANT CREATE TABLE, DROP, ALTER, CREATE VIEW, CREATE FUNCTION, " + "CREATE MATERIALIZED VIEW, CREATE PIPE ON ALL DATABASES TO ROLE 'root'"; Assert.assertTrue(resultSet.getResultRows().stream().anyMatch(l -> l.toString().contains(expectString1))); String expectString2 = "root, null, GRANT DELETE, DROP, INSERT, SELECT, ALTER, EXPORT, " + "UPDATE ON ALL TABLES IN ALL DATABASES TO ROLE 'root'"; Assert.assertTrue(resultSet.getResultRows().stream().anyMatch(l -> l.toString().contains(expectString2))); }
public static String weakIntern(String sample) { if (sample == null) { return null; } return sample.intern(); }
@Test public void testWeakIntern() { String weakInternLiteralABC = weakIntern("ABC"); String weakInternSubstringABC = weakIntern("ABCDE".substring(0,3)); String weakInternHeapABC = weakIntern(new String("ABC")); assertSame(weakInternLiteralABC, weakInternSubstringABC); assertSame(weakInternLiteralABC, weakInternHeapABC); assertSame(weakInternSubstringABC, weakInternHeapABC); }
@Override public void getFields( RowMetaInterface r, String name, RowMetaInterface[] info, StepMeta nextStep, VariableSpace space, Repository repository, IMetaStore metaStore ) throws KettleStepException { // Input rows and output rows are different in the webservice step // if ( !isPassingInputData() ) { r.clear(); } // Add the output fields... // for ( WebServiceField field : getFieldsOut() ) { int valueType = field.getType(); // If the type is unrecognized we give back the XML as a String... // if ( field.getType() == ValueMetaInterface.TYPE_NONE ) { valueType = ValueMetaInterface.TYPE_STRING; } try { ValueMetaInterface vValue = ValueMetaFactory.createValueMeta( field.getName(), valueType ); vValue.setOrigin( name ); r.addValueMeta( vValue ); } catch ( Exception e ) { throw new KettleStepException( e ); } } }
@Test public void testGetFields() throws Exception { WebServiceMeta webServiceMeta = new WebServiceMeta(); webServiceMeta.setDefault(); RowMetaInterface rmi = mock( RowMetaInterface.class ); RowMetaInterface rmi2 = mock( RowMetaInterface.class ); StepMeta nextStep = mock( StepMeta.class ); IMetaStore metastore = mock( IMetaStore.class ); Repository rep = mock( Repository.class ); WebServiceField field1 = new WebServiceField(); field1.setName( "field1" ); field1.setWsName( "field1WS" ); field1.setXsdType( "string" ); WebServiceField field2 = new WebServiceField(); field2.setName( "field2" ); field2.setWsName( "field2WS" ); field2.setXsdType( "string" ); WebServiceField field3 = new WebServiceField(); field3.setName( "field3" ); field3.setWsName( "field3WS" ); field3.setXsdType( "string" ); webServiceMeta.setFieldsOut( Arrays.asList( field1, field2, field3 ) ); webServiceMeta.getFields( rmi, "idk", new RowMetaInterface[]{ rmi2 }, nextStep, new Variables(), rep, metastore ); verify( rmi ).addValueMeta( argThat( matchValueMetaString( "field1" ) ) ); verify( rmi ).addValueMeta( argThat( matchValueMetaString( "field2" ) ) ); verify( rmi ).addValueMeta( argThat( matchValueMetaString( "field3" ) ) ); }
public static Iterable<String> expandAtNFilepattern(String filepattern) { ImmutableList.Builder<String> builder = ImmutableList.builder(); Matcher match = AT_N_SPEC.matcher(filepattern); if (!match.find()) { builder.add(filepattern); } else { int numShards = Integer.parseInt(match.group("N")); String formatString = "-%0" + getShardWidth(numShards, filepattern) + "d-of-%05d"; for (int i = 0; i < numShards; ++i) { builder.add( AT_N_SPEC.matcher(filepattern).replaceAll(String.format(formatString, i, numShards))); } if (match.find()) { throw new IllegalArgumentException( "More than one @N wildcard found in filepattern: " + filepattern); } } return builder.build(); }
@Test public void testExpandAtNFilepatternSmall() throws Exception { assertThat( Filepatterns.expandAtNFilepattern("gs://bucket/file@2.ism"), contains("gs://bucket/file-00000-of-00002.ism", "gs://bucket/file-00001-of-00002.ism")); }
@Override public boolean alterOffsets(Map<String, String> connectorConfig, Map<Map<String, ?>, Map<String, ?>> offsets) { for (Map.Entry<Map<String, ?>, Map<String, ?>> offsetEntry : offsets.entrySet()) { Map<String, ?> sourceOffset = offsetEntry.getValue(); if (sourceOffset == null) { // We allow tombstones for anything; if there's garbage in the offsets for the connector, we don't // want to prevent users from being able to clean it up using the REST API continue; } Map<String, ?> sourcePartition = offsetEntry.getKey(); if (sourcePartition == null) { throw new ConnectException("Source partitions may not be null"); } MirrorUtils.validateSourcePartitionString(sourcePartition, CONSUMER_GROUP_ID_KEY); MirrorUtils.validateSourcePartitionString(sourcePartition, TOPIC_KEY); MirrorUtils.validateSourcePartitionPartition(sourcePartition); MirrorUtils.validateSourceOffset(sourcePartition, sourceOffset, true); } // We don't actually use these offsets in the task class, so no additional effort is required beyond just validating // the format of the user-supplied offsets return true; }
@Test public void testAlterOffsetsInvalidPartitionPartition() { MirrorCheckpointConnector connector = new MirrorCheckpointConnector(); Map<String, Object> partition = sourcePartition("consumer-app-2", "t", 3); partition.put(PARTITION_KEY, "a string"); assertThrows(ConnectException.class, () -> connector.alterOffsets(null, Collections.singletonMap( partition, SOURCE_OFFSET ))); }
@POST @ApiOperation(value = "Retrieve the field list of a given set of streams") @NoAuditEvent("This is not changing any data") public Set<MappedFieldTypeDTO> byStreams(@ApiParam(name = "JSON body", required = true) @Valid @NotNull FieldTypesForStreamsRequest request, @Context SearchUser searchUser) { final ImmutableSet<String> streams = searchUser.streams().readableOrAllIfEmpty(request.streams()); return mappedFieldTypesService.fieldTypesByStreamIds(streams, request.timerange().orElse(RelativeRange.allTime())); }
@Test public void passesRequestedTimeRangeToMappedFieldTypesService() throws Exception { final SearchUser searchUser = TestSearchUser.builder() .allowStream("2323") .allowStream("4242") .build(); final FieldTypesForStreamsRequest request = FieldTypesForStreamsRequest.Builder.builder() .streams(ImmutableSet.of("2323", "4242")) .timerange(RelativeRange.create(250)) .build(); final MappedFieldTypesService mappedFieldTypesService = (streamIds, timeRange) -> { if (timeRange.equals(RelativeRange.create(250))) { final FieldTypes.Type fieldType = FieldTypes.Type.createType("long", ImmutableSet.of("numeric", "enumerable")); final MappedFieldTypeDTO field = MappedFieldTypeDTO.create("foobar", fieldType); return Collections.singleton(field); } else { throw new AssertionError("Expected relative range of 250"); } }; final FieldTypesResource resource = new FieldTypesResource(mappedFieldTypesService, mock(IndexFieldTypePollerPeriodical.class)); final Set<MappedFieldTypeDTO> result = resource.byStreams(request, searchUser); assertThat(result) .hasSize(1) .hasOnlyOneElementSatisfying(type -> assertThat(type.name()).isEqualTo("foobar")); }
public String getConfigurationPath() { return args.length < 2 ? DEFAULT_CONFIG_PATH : paddingWithSlash(args[1]); }
@Test void assertGetConfigurationPathWithEmptyArgument() { assertThat(new BootstrapArguments(new String[]{}).getConfigurationPath(), is("/conf/")); }
public static HollowBlobInput modeBasedSelector(MemoryMode mode, HollowConsumer.Blob blob) throws IOException { if (mode.equals(ON_HEAP)) { return serial(blob.getInputStream()); } else if (mode.equals(SHARED_MEMORY_LAZY)) { return randomAccess(blob.getFile()); } else { throw new UnsupportedOperationException(); } }
@Test public void testModeBasedSelector() throws IOException { assertTrue((HollowBlobInput.modeBasedSelector(MemoryMode.ON_HEAP, mockBlob)).getInput() instanceof DataInputStream); assertTrue((HollowBlobInput.modeBasedSelector(MemoryMode.SHARED_MEMORY_LAZY, mockBlob)).getInput() instanceof RandomAccessFile); assertNotNull((HollowBlobInput.modeBasedSelector(MemoryMode.SHARED_MEMORY_LAZY, mockBlob)).getBuffer()); }
protected static String normalize(String text) { text = text.toUpperCase(Locale.US); //strip out commas text = text.replaceAll(",", ""); //1) strip off extra stuff after +0800, e.g. "Mon, 9 May 2016 7:32:00 UTC+0600 (BST)", //2) insert a colon btwn hrs and minutes to avoid a difference in behavior // between jdk 8 and jdk 11+17 Matcher matcher = OFFSET_PATTERN.matcher(text); while (matcher.find()) { if (matcher.group(1) != null) { text = text.substring(0, matcher.start()); text += matcher.group(1) + StringUtils.leftPad(matcher.group(2), 2, '0') + ":" + matcher.group(3); break; } } matcher = LOCALIZED_OFFSET_PATTERN.matcher(text); if (matcher.find()) { text = buildLocalizedOffset(matcher, text); } matcher = AM_PM.matcher(text); if (matcher.find()) { text = matcher.replaceFirst("$1 $2"); } //The rfc_lenient parser had a problem parsing dates //with days of week missing and a timezone: 9 May 2016 01:32:00 UTC //The day of week is not used in the resolvers, so we may as well throw //out that info matcher = DAYS_OF_WEEK.matcher(text); if (matcher.find()) { text = matcher.replaceAll(" "); } //16 May 2016 at 09:30:32 GMT+1 text = text.replaceAll("(?i) at ", " "); //just cause text = text.replaceAll("\\s+", " ").trim(); return text; }
@Test public void testNormalization() throws Exception { String s = "10-10-2022"; //make sure that the year does not have ":" inserted assertEquals(s, MailDateParser.normalize(s)); }
@Override public BulkOperationResponse executeBulkOperation(final BulkOperationRequest bulkOperationRequest, final C userContext, final AuditParams params) { if (bulkOperationRequest.entityIds() == null || bulkOperationRequest.entityIds().isEmpty()) { throw new BadRequestException(NO_ENTITY_IDS_ERROR); } List<BulkOperationFailure> capturedFailures = new LinkedList<>(); for (String entityId : bulkOperationRequest.entityIds()) { try { T entityModel = singleEntityOperationExecutor.execute(entityId, userContext); try { if (params != null) { auditEventSender.success(getAuditActor(userContext), params.eventType(), successAuditLogContextCreator.create(entityModel, params.entityClass())); } } catch (Exception auditLogStoreException) { //exception on audit log storing should not result in failure report, as the operation itself is successful LOG.error("Failed to store in the audit log information about successful entity removal via bulk action ", auditLogStoreException); } } catch (Exception ex) { capturedFailures.add(new BulkOperationFailure(entityId, ex.getMessage())); try { if (params != null) { auditEventSender.failure(getAuditActor(userContext), params.eventType(), failureAuditLogContextCreator.create(params.entityIdInPathParam(), entityId)); } } catch (Exception auditLogStoreException) { //exception on audit log storing should not result in failure report, as the operation itself is successful LOG.error("Failed to store in the audit log information about failed entity removal via bulk action ", auditLogStoreException); } } } return new BulkOperationResponse( bulkOperationRequest.entityIds().size() - capturedFailures.size(), capturedFailures); }
@Test void doesNotCreateAuditLogIfAuditParamsAreNull() throws Exception { final BulkOperationResponse bulkOperationResponse = toTest.executeBulkOperation(new BulkOperationRequest(List.of("1", "2", "3")), context, null); assertThat(bulkOperationResponse.successfullyPerformed()).isEqualTo(3); assertThat(bulkOperationResponse.failures()).isEmpty(); verify(singleEntityOperationExecutor).execute("1", context); verify(singleEntityOperationExecutor).execute("2", context); verify(singleEntityOperationExecutor).execute("3", context); verifyNoMoreInteractions(singleEntityOperationExecutor); verifyNoInteractions(auditEventSender); verifyNoInteractions(failureAuditLogContextCreator); verifyNoInteractions(successAuditLogContextCreator); }
public GsonAzureRepo getRepo(String serverUrl, String token, String projectName, String repositoryName) { String url = Stream.of(getTrimmedUrl(serverUrl), projectName, "_apis/git/repositories", repositoryName + "?" + API_VERSION_3) .filter(StringUtils::isNotBlank) .collect(joining("/")); return doGet(token, url, r -> buildGson().fromJson(r.body().charStream(), GsonAzureRepo.class)); }
@Test public void get_repo_non_json_payload() { enqueueResponse(200, NON_JSON_PAYLOAD); assertThatThrownBy(() -> underTest.getRepo(server.url("").toString(), "token", "projectName", "repoName")) .isInstanceOf(IllegalArgumentException.class) .hasMessage(UNABLE_TO_CONTACT_AZURE); }
public static boolean validateUpdateStatus(GlobalStatus before, GlobalStatus after) { if (isTimeoutGlobalStatus(before) && isCommitGlobalStatus(after)) { return false; } if (isCommitGlobalStatus(before) && isTimeoutGlobalStatus(after)) { return false; } if (isRollbackGlobalStatus(before) && isCommitGlobalStatus(after)) { return false; } if (isCommitGlobalStatus(before) && isRollbackGlobalStatus(after)) { return false; } return true; }
@Test public void testValidateUpdateStatus(){ Assertions.assertTrue(SessionStatusValidator.validateUpdateStatus(GlobalStatus.Begin, GlobalStatus.Committing)); Assertions.assertTrue( SessionStatusValidator.validateUpdateStatus(GlobalStatus.Committing, GlobalStatus.Committed)); Assertions.assertFalse( SessionStatusValidator.validateUpdateStatus(GlobalStatus.Committing, GlobalStatus.TimeoutRollbacking)); Assertions.assertFalse( SessionStatusValidator.validateUpdateStatus(GlobalStatus.TimeoutRollbacking, GlobalStatus.Committing)); Assertions.assertFalse( SessionStatusValidator.validateUpdateStatus(GlobalStatus.Committing, GlobalStatus.Rollbacking)); Assertions.assertFalse( SessionStatusValidator.validateUpdateStatus(GlobalStatus.Rollbacking, GlobalStatus.Committing)); Assertions.assertFalse( SessionStatusValidator.validateUpdateStatus(GlobalStatus.Committed, GlobalStatus.Rollbacked)); Assertions.assertFalse( SessionStatusValidator.validateUpdateStatus(GlobalStatus.Committed, GlobalStatus.TimeoutRollbacking)); }
@SuppressWarnings("unchecked") public static <T> Class<T> compile(ClassLoader cl, String name, String code) { try { // The class name is part of the "code" and makes the string unique, // to prevent class leaks we don't cache the class loader directly // but only its hash code final ClassKey classKey = new ClassKey(cl.hashCode(), code); return (Class<T>) COMPILED_CLASS_CACHE.get(classKey, () -> doCompile(cl, name, code)); } catch (Exception e) { throw new FlinkRuntimeException(e.getMessage(), e); } }
@Test public void testCacheReuse() { String code = "public class Main {\n" + " int i;\n" + " int j;\n" + "}"; Class<?> class1 = CompileUtils.compile(this.getClass().getClassLoader(), "Main", code); Class<?> class2 = CompileUtils.compile(this.getClass().getClassLoader(), "Main", code); Class<?> class3 = CompileUtils.compile(new TestClassLoader(), "Main", code); assertThat(class2).isSameAs(class1); assertThat(class3).isNotSameAs(class1); }
public static void endCount(String service) { RpcStatus rpcStatus = getStatus(service); rpcStatus.active.decrementAndGet(); rpcStatus.total.increment(); }
@Test public void endCount() { RpcStatus.endCount(SERVICE); Assertions.assertEquals(RpcStatus.getStatus(SERVICE).getActive(), 0); Assertions.assertEquals(RpcStatus.getStatus(SERVICE).getTotal(), 1); }
@Override public int compare(Object o1, Object o2) { if (o1 == null && o2 == null) { return 0; // o1 == o2 } if (o1 == null) { return -1; // o1 < o2 } if (o2 == null) { return 1; // o1 > o2 } return nonNullCompare(o1, o2); }
@Test public void nullArgOne() { assertTrue("null one", cmp.compare(null, 1) < 0); }
public final void removeInboundHandler() { checkAdded(); inboundCtx.remove(); }
@Test public void testInboundRemoveBeforeAdded() { final CombinedChannelDuplexHandler<ChannelInboundHandler, ChannelOutboundHandler> handler = new CombinedChannelDuplexHandler<ChannelInboundHandler, ChannelOutboundHandler>( new ChannelInboundHandlerAdapter(), new ChannelOutboundHandlerAdapter()); assertThrows(IllegalStateException.class, new Executable() { @Override public void execute() { handler.removeInboundHandler(); } }); }
@Override public boolean apply(Collection<Member> members) { if (members.size() < minimumClusterSize) { return false; } int count = 0; long timestamp = Clock.currentTimeMillis(); for (Member member : members) { if (!isAlivePerIcmp(member)) { continue; } if (member.localMember() || failureDetector.isAlive(member, timestamp)) { count++; } } return count >= minimumClusterSize; }
@Test public void testSplitBrainProtectionAbsent_whenFewerThanSplitBrainProtectionPresent() { splitBrainProtectionFunction = new ProbabilisticSplitBrainProtectionFunction(splitBrainProtectionSize, 10000, 10000, 200, 100, 10); heartbeat(5, 1000); // cluster membership manager considers fewer than split brain protection members live assertFalse(splitBrainProtectionFunction.apply(subsetOfMembers(splitBrainProtectionSize - 1))); }
public static String formatExpression(final Expression expression) { return formatExpression(expression, FormatOptions.of(s -> false)); }
@Test public void shouldFormatBooleanLiteral() { assertThat(ExpressionFormatter.formatExpression(new BooleanLiteral("true")), equalTo("true")); }
@SuppressWarnings("WeakerAccess") public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); checkIfUnexpectedUserSpecifiedConsumerConfig(clientProvidedProps, NON_CONFIGURABLE_PRODUCER_EOS_CONFIGS); // generate producer configs from original properties and overridden maps final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(getClientCustomProps()); props.putAll(clientProvidedProps); // When using EOS alpha, stream should auto-downgrade the transactional commit protocol to be compatible with older brokers. if (StreamsConfigUtils.processingMode(this) == StreamsConfigUtils.ProcessingMode.EXACTLY_ONCE_ALPHA) { props.put("internal.auto.downgrade.txn.commit", true); } props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); // add client id with stream client id prefix props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId); return props; }
@SuppressWarnings("deprecation") @Test public void shouldSetInternalAutoDowngradeTxnCommitToTrueInProducerForEosAlpha() { props.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, EXACTLY_ONCE); final StreamsConfig streamsConfig = new StreamsConfig(props); final Map<String, Object> producerConfigs = streamsConfig.getProducerConfigs(clientId); assertThat(producerConfigs.get("internal.auto.downgrade.txn.commit"), is(true)); }
public LispRouter getRouterInstance(IpAddress ipAddress) { LispRouterId routerId = new LispRouterId(ipAddress); if (!routerMap.containsKey(routerId)) { LispRouter router = new DefaultLispRouter(routerId); router.setAgent(agent); routerMap.put(routerId, router); return router; } else { return routerMap.get(routerId); } }
@Test public void testGetRouterInstance() throws Exception { IpAddress ipAddress = IpAddress.valueOf("192.168.1.1"); LispRouter router1 = routerFactory.getRouterInstance(ipAddress); LispRouter router2 = routerFactory.getRouterInstance(ipAddress); assertThat(router1, is(router2)); }
public static FromJarEntryClassInformationProvider createFromCustomJar( File jarFile, @Nullable String jobClassName) { return new FromJarEntryClassInformationProvider(jarFile, jobClassName); }
@Test void testEitherJobClassNameOrJarHasToBeSet() { assertThatThrownBy( () -> FromJarEntryClassInformationProvider.createFromCustomJar(null, null)) .isInstanceOf(NullPointerException.class); }
@Override public boolean shouldHandle(String key) { return super.shouldHandle(key) && RouterConstant.GLOBAL_ROUTER_KEY.equals(key); }
@Test public void testShouldHandle() { Assert.assertTrue(handler.shouldHandle("servicecomb.globalRouteRule")); }
@Override public String[] split(String text) { if (splitContraction) { text = WONT_CONTRACTION.matcher(text).replaceAll("$1ill not"); text = SHANT_CONTRACTION.matcher(text).replaceAll("$1ll not"); text = AINT_CONTRACTION.matcher(text).replaceAll("$1m not"); for (Pattern regexp : NOT_CONTRACTIONS) { text = regexp.matcher(text).replaceAll("$1 not"); } for (Pattern regexp : CONTRACTIONS2) { text = regexp.matcher(text).replaceAll("$1 $2"); } for (Pattern regexp : CONTRACTIONS3) { text = regexp.matcher(text).replaceAll("$1 $2 $3"); } } text = DELIMITERS[0].matcher(text).replaceAll(" $1 "); text = DELIMITERS[1].matcher(text).replaceAll(" $1"); text = DELIMITERS[2].matcher(text).replaceAll(" $1"); text = DELIMITERS[3].matcher(text).replaceAll(" . "); text = DELIMITERS[4].matcher(text).replaceAll(" $1 "); String[] words = WHITESPACE.split(text); if (words.length > 1 && words[words.length-1].equals(".")) { if (EnglishAbbreviations.contains(words[words.length-2])) { words[words.length-2] = words[words.length-2] + "."; } } ArrayList<String> result = new ArrayList<>(); for (String token : words) { if (!token.isEmpty()) { result.add(token); } } return result.toArray(new String[0]); }
@Test public void testTokenizeMixedAlphanumWords() { System.out.println("tokenize words with mixed numbers, letters, and punctuation"); String text = "3M, L-3, BB&T, AutoZone, O'Reilly, Harley-Davidson, CH2M, A-Mark, " + "Quad/Graphics, Bloomin' Brands, B/E Aerospace, J.Crew, E*Trade."; // Note: would be very hard to get "Bloomin'" and "E*Trade" correct String[] expResult = {"3M", ",", "L-3", ",", "BB&T", ",", "AutoZone", ",", "O'Reilly", ",", "Harley-Davidson", ",", "CH2M", ",", "A-Mark", ",", "Quad/Graphics", ",", "Bloomin", "'", "Brands", ",", "B/E", "Aerospace", ",", "J.Crew", ",", "E", "*", "Trade", "."}; SimpleTokenizer instance = new SimpleTokenizer(); String[] result = instance.split(text); assertEquals(expResult.length, result.length); for (int i = 0; i < result.length; i++) { assertEquals(expResult[i], result[i]); } }
Flux<DataEntityList> export(KafkaCluster cluster) { return kafkaConnectService.getConnects(cluster) .flatMap(connect -> kafkaConnectService.getConnectorNamesWithErrorsSuppress(cluster, connect.getName()) .flatMap(connectorName -> kafkaConnectService.getConnector(cluster, connect.getName(), connectorName)) .flatMap(connectorDTO -> kafkaConnectService.getConnectorTopics(cluster, connect.getName(), connectorDTO.getName()) .map(topics -> createConnectorDataEntity(cluster, connect, connectorDTO, topics))) .buffer(100) .map(connectDataEntities -> { String dsOddrn = Oddrn.connectDataSourceOddrn(connect.getAddress()); return new DataEntityList() .dataSourceOddrn(dsOddrn) .items(connectDataEntities); }) ); }
@Test void exportsConnectorsAsDataTransformers() { ConnectDTO connect = new ConnectDTO(); connect.setName("testConnect"); connect.setAddress("http://kconnect:8083"); ConnectorDTO sinkConnector = new ConnectorDTO(); sinkConnector.setName("testSink"); sinkConnector.setType(ConnectorTypeDTO.SINK); sinkConnector.setConnect(connect.getName()); sinkConnector.setConfig( Map.of( "connector.class", "FileStreamSink", "file", "filePathHere", "topic", "inputTopic" ) ); ConnectorDTO sourceConnector = new ConnectorDTO(); sourceConnector.setName("testSource"); sourceConnector.setConnect(connect.getName()); sourceConnector.setType(ConnectorTypeDTO.SOURCE); sourceConnector.setConfig( Map.of( "connector.class", "FileStreamSource", "file", "filePathHere", "topic", "outputTopic" ) ); when(kafkaConnectService.getConnects(CLUSTER)) .thenReturn(Flux.just(connect)); when(kafkaConnectService.getConnectorNamesWithErrorsSuppress(CLUSTER, connect.getName())) .thenReturn(Flux.just(sinkConnector.getName(), sourceConnector.getName())); when(kafkaConnectService.getConnector(CLUSTER, connect.getName(), sinkConnector.getName())) .thenReturn(Mono.just(sinkConnector)); when(kafkaConnectService.getConnector(CLUSTER, connect.getName(), sourceConnector.getName())) .thenReturn(Mono.just(sourceConnector)); when(kafkaConnectService.getConnectorTopics(CLUSTER, connect.getName(), sourceConnector.getName())) .thenReturn(Mono.just(new ConnectorTopics().topics(List.of("outputTopic")))); when(kafkaConnectService.getConnectorTopics(CLUSTER, connect.getName(), sinkConnector.getName())) .thenReturn(Mono.just(new ConnectorTopics().topics(List.of("inputTopic")))); StepVerifier.create(exporter.export(CLUSTER)) .assertNext(dataEntityList -> { assertThat(dataEntityList.getDataSourceOddrn()) .isEqualTo("//kafkaconnect/host/kconnect:8083"); assertThat(dataEntityList.getItems()) .hasSize(2); assertThat(dataEntityList.getItems()) .filteredOn(DataEntity::getOddrn, "//kafkaconnect/host/kconnect:8083/connectors/testSink") .singleElement() .satisfies(sink -> { assertThat(sink.getMetadata().get(0).getMetadata()) .containsOnlyKeys("type", "connector.class", "file", "topic"); assertThat(sink.getDataTransformer().getInputs()).contains( "//kafka/cluster/localhost:9092/topics/inputTopic"); }); assertThat(dataEntityList.getItems()) .filteredOn(DataEntity::getOddrn, "//kafkaconnect/host/kconnect:8083/connectors/testSource") .singleElement() .satisfies(source -> { assertThat(source.getMetadata().get(0).getMetadata()) .containsOnlyKeys("type", "connector.class", "file", "topic"); assertThat(source.getDataTransformer().getOutputs()).contains( "//kafka/cluster/localhost:9092/topics/outputTopic"); }); }) .verifyComplete(); }
public static String normalizeClassName(String name) { StringBuilder sb = new StringBuilder(name.length()); for (char ch : name.toCharArray()) { if (ch == '.' || ch == '[' || ch == ']' || ch == '-' || Character.isJavaIdentifierPart(ch)) { sb.append(ch); } } return sb.toString(); }
@Test public void testNormalizeClassName() { assertEquals("my.package-info", StringHelper.normalizeClassName("my.package-info"), "Should get the right class name"); assertEquals("Integer[]", StringHelper.normalizeClassName("Integer[] \r"), "Should get the right class name"); assertEquals("Hello_World", StringHelper.normalizeClassName("Hello_World"), "Should get the right class name"); assertEquals("", StringHelper.normalizeClassName("////"), "Should get the right class name"); }
public static BigDecimal fromWei(String number, Unit unit) { return fromWei(new BigDecimal(number), unit); }
@Test public void testFromWei() { assertEquals( Convert.fromWei("21000000000000", Convert.Unit.WEI), (new BigDecimal("21000000000000"))); assertEquals( Convert.fromWei("21000000000000", Convert.Unit.KWEI), (new BigDecimal("21000000000"))); assertEquals( Convert.fromWei("21000000000000", Convert.Unit.MWEI), (new BigDecimal("21000000"))); assertEquals( Convert.fromWei("21000000000000", Convert.Unit.GWEI), (new BigDecimal("21000"))); assertEquals(Convert.fromWei("21000000000000", Convert.Unit.SZABO), (new BigDecimal("21"))); assertEquals( Convert.fromWei("21000000000000", Convert.Unit.FINNEY), (new BigDecimal("0.021"))); assertEquals( Convert.fromWei("21000000000000", Convert.Unit.ETHER), (new BigDecimal("0.000021"))); assertEquals( Convert.fromWei("21000000000000", Convert.Unit.KETHER), (new BigDecimal("0.000000021"))); assertEquals( Convert.fromWei("21000000000000", Convert.Unit.METHER), (new BigDecimal("0.000000000021"))); assertEquals( Convert.fromWei("21000000000000", Convert.Unit.GETHER), (new BigDecimal("0.000000000000021"))); }
public byte[] rawValue(final V value) { try { return valueSerde.serializer().serialize(topic, value); } catch (final ClassCastException e) { final String valueClass; final Class<? extends Serializer> serializerClass; if (valueSerializer() instanceof ValueAndTimestampSerializer) { serializerClass = ((ValueAndTimestampSerializer) valueSerializer()).valueSerializer.getClass(); valueClass = value == null ? "unknown because value is null" : ((ValueAndTimestamp) value).value().getClass().getName(); } else { serializerClass = valueSerializer().getClass(); valueClass = value == null ? "unknown because value is null" : value.getClass().getName(); } throw new StreamsException( String.format("A serializer (%s) is not compatible to the actual value type " + "(value type: %s). Change the default Serdes in StreamConfig or " + "provide correct Serdes via method parameters.", serializerClass.getName(), valueClass), e); } }
@Test public void shouldSkipValueAndTimestampeInformationForErrorOnTimestampAndValueSerialization() throws ClassNotFoundException { final Class myClass = Class.forName("java.lang.String"); final StateSerdes<Object, Object> stateSerdes = new StateSerdes<Object, Object>("anyName", Serdes.serdeFrom(myClass), new ValueAndTimestampSerde(Serdes.serdeFrom(myClass))); final Integer myInt = 123; final Exception e = assertThrows(StreamsException.class, () -> stateSerdes.rawValue(ValueAndTimestamp.make(myInt, 0L))); assertThat( e.getMessage(), equalTo( "A serializer (org.apache.kafka.common.serialization.StringSerializer) " + "is not compatible to the actual value type (value type: java.lang.Integer). " + "Change the default Serdes in StreamConfig or provide correct Serdes via method parameters.")); }
public String convert(final String hostname) { if(!PreferencesFactory.get().getBoolean("connection.hostname.idn")) { return StringUtils.strip(hostname); } if(StringUtils.isNotEmpty(hostname)) { try { // Convenience function that implements the IDNToASCII operation as defined in // the IDNA RFC. This operation is done on complete domain names, e.g: "www.example.com". // It is important to note that this operation can fail. If it fails, then the input // domain name cannot be used as an Internationalized Domain Name and the application // should have methods defined to deal with the failure. // IDNA.DEFAULT Use default options, i.e., do not process unassigned code points // and do not use STD3 ASCII rules If unassigned code points are found // the operation fails with ParseException final String idn = IDN.toASCII(StringUtils.strip(hostname)); if(log.isDebugEnabled()) { if(!StringUtils.equals(StringUtils.strip(hostname), idn)) { log.debug(String.format("IDN hostname for %s is %s", hostname, idn)); } } if(StringUtils.isNotEmpty(idn)) { return idn; } } catch(IllegalArgumentException e) { log.warn(String.format("Failed to convert hostname %s to IDNA", hostname), e); } } return StringUtils.strip(hostname); }
@Test public void testConvert() { assertEquals("host.localdomain", new PunycodeConverter().convert("host.localdomain")); assertNull(new PunycodeConverter().convert(null)); assertEquals("", new PunycodeConverter().convert("")); assertEquals("xn--4ca", new PunycodeConverter().convert("ä")); }
public int getMapAttributesToNodesFailedRetrieved() { return numMapAttributesToNodesFailedRetrieved.value(); }
@Test public void testGetMapAttributesToNodesFailedRetrieved() { long totalBadBefore = metrics.getMapAttributesToNodesFailedRetrieved(); badSubCluster.getMapAttributesToNodesFailed(); Assert.assertEquals(totalBadBefore + 1, metrics.getMapAttributesToNodesFailedRetrieved()); }
protected static String buildPoolName( DatabaseMeta dbMeta, String partitionId ) { return dbMeta.getName() + Const.NVL( dbMeta.getDatabaseName(), "" ) + Const.NVL( dbMeta.getHostname(), "" ) + Const.NVL( dbMeta.getDatabasePortNumberString(), "" ) + Const.NVL( partitionId, "" ); }
@Test public void testGetConnectionName() { when( dbMeta.getName() ).thenReturn( "CP2" ); String connectionName = ConnectionPoolUtil.buildPoolName( dbMeta, "" ); assertEquals( "CP2", connectionName ); assertNotEquals( "CP2pentaho", connectionName ); when( dbMeta.getDatabaseName() ).thenReturn( "pentaho" ); connectionName = ConnectionPoolUtil.buildPoolName( dbMeta, "" ); assertEquals( "CP2pentaho", connectionName ); assertNotEquals( "CP2pentaholocal", connectionName ); when( dbMeta.getHostname() ).thenReturn( "local" ); connectionName = ConnectionPoolUtil.buildPoolName( dbMeta, "" ); assertEquals( "CP2pentaholocal", connectionName ); assertNotEquals( "CP2pentaholocal3306", connectionName ); when( dbMeta.getDatabasePortNumberString() ).thenReturn( "3306" ); connectionName = ConnectionPoolUtil.buildPoolName( dbMeta, "" ); assertEquals( "CP2pentaholocal3306", connectionName ); }
public RemotingCommand notifyConsumerIdsChanged(ChannelHandlerContext ctx, RemotingCommand request) throws RemotingCommandException { try { final NotifyConsumerIdsChangedRequestHeader requestHeader = (NotifyConsumerIdsChangedRequestHeader) request.decodeCommandCustomHeader(NotifyConsumerIdsChangedRequestHeader.class); logger.info("receive broker's notification[{}], the consumer group: {} changed, rebalance immediately", RemotingHelper.parseChannelRemoteAddr(ctx.channel()), requestHeader.getConsumerGroup()); this.mqClientFactory.rebalanceImmediately(); } catch (Exception e) { logger.error("notifyConsumerIdsChanged exception", UtilAll.exceptionSimpleDesc(e)); } return null; }
@Test public void testNotifyConsumerIdsChanged() throws Exception { ChannelHandlerContext ctx = mock(ChannelHandlerContext.class); RemotingCommand request = mock(RemotingCommand.class); when(request.getCode()).thenReturn(RequestCode.NOTIFY_CONSUMER_IDS_CHANGED); NotifyConsumerIdsChangedRequestHeader requestHeader = new NotifyConsumerIdsChangedRequestHeader(); when(request.decodeCommandCustomHeader(NotifyConsumerIdsChangedRequestHeader.class)).thenReturn(requestHeader); assertNull(processor.processRequest(ctx, request)); }
public static <T> RetryTransformer<T> of(Retry retry) { return new RetryTransformer<>(retry); }
@Test public void returnOnErrorUsingObservable() throws InterruptedException { RetryConfig config = retryConfig(); Retry retry = Retry.of("testName", config); RetryTransformer<Object> retryTransformer = RetryTransformer.of(retry); given(helloWorldService.returnHelloWorld()) .willThrow(new HelloWorldException()); Observable.fromCallable(helloWorldService::returnHelloWorld) .compose(retryTransformer) .test() .await() .assertError(HelloWorldException.class) .assertNotComplete() .assertSubscribed(); Observable.fromCallable(helloWorldService::returnHelloWorld) .compose(retryTransformer) .test() .await() .assertError(HelloWorldException.class) .assertNotComplete() .assertSubscribed(); then(helloWorldService).should(times(6)).returnHelloWorld(); Retry.Metrics metrics = retry.getMetrics(); assertThat(metrics.getNumberOfFailedCallsWithRetryAttempt()).isEqualTo(2); assertThat(metrics.getNumberOfFailedCallsWithoutRetryAttempt()).isZero(); }
public void validate(AlmSettingDto almSettingDto) { String gitlabUrl = almSettingDto.getUrl(); String accessToken = almSettingDto.getDecryptedPersonalAccessToken(encryption); validate(ValidationMode.COMPLETE, gitlabUrl, accessToken); }
@Test public void validate_forAuthOnlyWhenUrlIsNull_throwsException() { assertThatIllegalArgumentException() .isThrownBy(() -> underTest.validate(AUTH_ONLY, null, null)) .withMessage("Your Gitlab global configuration is incomplete. The GitLab URL must be set."); }
@GET @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) public HistoryInfo get() { return getHistoryInfo(); }
@Test public void testHSSlash() throws JSONException, Exception { WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("history/") .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, response.getType().toString()); JSONObject json = response.getEntity(JSONObject.class); assertEquals("incorrect number of elements", 1, json.length()); verifyHSInfo(json.getJSONObject("historyInfo"), appContext); }
public BookDto getBooks(String bookRequest) { Response<BookDto> allBooksResponse = null; BookDto books = null; try { AuditDto audit = null; allBooksResponse = libraryClient.getAllBooksWithHeaders(bookRequest).execute(); if (allBooksResponse.isSuccessful()) { books = allBooksResponse.body(); log.info("Get All Books : {}", books); audit = auditMapper.populateAuditLogForGetBook(books); } else { log.error("Error calling library client: {}", allBooksResponse.errorBody()); if (Objects.nonNull(allBooksResponse.errorBody())) { audit = auditMapper.populateAuditLogForException( null, HttpMethod.GET, allBooksResponse.errorBody().string()); } } if (Objects.nonNull(audit)) { AuditLog savedObj = auditRepository.save(libraryMapper.auditDtoToAuditLog(audit)); log.info("Saved into audit successfully: {}", savedObj); } return books; } catch (Exception ex) { log.error("Error handling retrofit call for getAllBooks", ex); return books; } }
@Test @DisplayName("Successful call to get a specific book") public void getRequestBookTest() throws Exception { String booksResponse = getBooksResponse("/response/getOneBook.json"); BookDto bookDto = new ObjectMapper().readValue(booksResponse, BookDto.class); when(libraryClient.getAllBooksWithHeaders("1")).thenReturn(Calls.response(bookDto)); doReturn(null).when(auditRepository).save(any()); BookDto book = libraryAuditService.getBooks("1"); assertAll( () -> assertNotNull(book), () -> assertTrue(book.getId()==1) ); }
public static void writeNullTerminated(byte[] data, ByteArrayOutputStream out) throws IOException { out.write(data); out.write(MSC.NULL_TERMINATED_STRING_DELIMITER); }
@Test public void testWriteNullTerminated() throws IOException { ByteArrayOutputStream out = new ByteArrayOutputStream(); ByteHelper.writeNullTerminated(new byte[] {3}, out); Assert.assertArrayEquals(new byte[] {3, 0}, out.toByteArray()); }
public CompletableFuture<VertexThreadInfoStats> triggerThreadInfoRequest( Map<ImmutableSet<ExecutionAttemptID>, CompletableFuture<TaskExecutorThreadInfoGateway>> executionsWithGateways, int numSamples, Duration delayBetweenSamples, int maxStackTraceDepth) { checkNotNull(executionsWithGateways, "Tasks to sample"); checkArgument(executionsWithGateways.size() > 0, "No tasks to sample"); checkArgument(numSamples >= 1, "No number of samples"); checkArgument(maxStackTraceDepth >= 0, "Negative maximum stack trace depth"); // Execution IDs of running tasks grouped by the task manager Collection<ImmutableSet<ExecutionAttemptID>> runningSubtasksIds = executionsWithGateways.keySet(); synchronized (lock) { if (isShutDown) { return FutureUtils.completedExceptionally(new IllegalStateException("Shut down")); } final int requestId = requestIdCounter++; log.debug("Triggering thread info request {}", requestId); final PendingThreadInfoRequest pending = new PendingThreadInfoRequest(requestId, runningSubtasksIds); // requestTimeout is treated as the time on top of the expected sampling duration. // Discard the request if it takes too long. We don't send cancel // messages to the task managers, but only wait for the responses // and then ignore them. long expectedDuration = numSamples * delayBetweenSamples.toMillis(); Time timeout = Time.milliseconds(expectedDuration + requestTimeout.toMillis()); // Add the pending request before scheduling the discard task to // prevent races with removing it again. pendingRequests.put(requestId, pending); ThreadInfoSamplesRequest requestParams = new ThreadInfoSamplesRequest( requestId, numSamples, delayBetweenSamples, maxStackTraceDepth); requestThreadInfo(executionsWithGateways, requestParams, timeout); return pending.getStatsFuture(); } }
@Test void testSuccessfulThreadInfoRequest() throws Exception { Map<ImmutableSet<ExecutionAttemptID>, CompletableFuture<TaskExecutorThreadInfoGateway>> executionWithGateways = createMockSubtaskWithGateways( CompletionType.SUCCESSFULLY, CompletionType.SUCCESSFULLY); CompletableFuture<VertexThreadInfoStats> requestFuture = coordinator.triggerThreadInfoRequest( executionWithGateways, DEFAULT_NUMBER_OF_SAMPLES, DEFAULT_DELAY_BETWEEN_SAMPLES, DEFAULT_MAX_STACK_TRACE_DEPTH); VertexThreadInfoStats threadInfoStats = requestFuture.get(); // verify the request result assertThat(threadInfoStats.getRequestId()).isEqualTo(0); Map<ExecutionAttemptID, Collection<ThreadInfoSample>> samplesBySubtask = threadInfoStats.getSamplesBySubtask(); for (Collection<ThreadInfoSample> result : samplesBySubtask.values()) { StackTraceElement[] stackTrace = result.iterator().next().getStackTrace(); assertThat(stackTrace).isNotEmpty(); } }