focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static <T> T copyProperties(Object source, Class<T> tClass, String... ignoreProperties) { if (null == source) { return null; } T target = ReflectUtil.newInstanceIfPossible(tClass); copyProperties(source, target, CopyOptions.create().setIgnoreProperties(ignoreProperties)); return target; }
@Test public void copyPropertiesTest() { final SubPerson person = new SubPerson(); person.setAge(14); person.setOpenid("11213232"); person.setName("测试A11"); person.setSubName("sub名字"); final SubPerson person1 = BeanUtil.copyProperties(person, SubPerson.class); assertEquals(14, person1.getAge()); assertEquals("11213232", person1.getOpenid()); assertEquals("测试A11", person1.getName()); assertEquals("sub名字", person1.getSubName()); }
@Override public void handleError(String msg, Exception e) { log.error(msg, e); }
@Test public void testHandleErrorShouldNotThrowException() { target.handleError("msg", new Exception()); }
public String getDomain(Env env) { String metaServerAddress = getMetaServerAddress(env); // if there is more than one address, need to select one if (metaServerAddress.contains(",")) { return selectMetaServerAddress(metaServerAddress); } return metaServerAddress; }
@Test public void testGetMetaDomain() { // local String localMetaServerAddress = "http://localhost:8080"; mockMetaServerAddress(Env.LOCAL, localMetaServerAddress); assertEquals(localMetaServerAddress, portalMetaDomainService.getDomain(Env.LOCAL)); // add this environment without meta server address String randomEnvironment = "randomEnvironment"; Env.addEnvironment(randomEnvironment); assertEquals(PortalMetaDomainService.DEFAULT_META_URL, portalMetaDomainService.getDomain(Env.valueOf(randomEnvironment))); }
public boolean throttle(NodeList allNodes, Agent agent) { Instant startOfWindow = nodeRepository.clock().instant().minus(WINDOW); int provisionedRecently = allNodes.matching(host -> host.history().hasEventAfter(History.Event.Type.provisioned, startOfWindow)) .size(); boolean throttle = throttle(provisionedRecently, maxHostsPerHourFlag.value(), agent); metric.set(throttlingActiveMetric, throttle ? 1 : 0, null); return throttle; }
@Test void throttling() { Agent agent = Agent.system; assertFalse(throttle(239, 10, agent)); assertFalse(throttle(240, 10, agent)); assertTrue(throttle(241, 10, agent)); }
public static void onOutOfMemory(OutOfMemoryError outOfMemoryError) { isNotNull(outOfMemoryError, "outOfMemoryError"); OUT_OF_MEMORY_ERROR_COUNT.incrementAndGet(); OutOfMemoryHandler h = clientHandler; if (h != null && h.shouldHandle(outOfMemoryError)) { try { HazelcastInstance[] clients = removeRegisteredClients(); h.onOutOfMemory(outOfMemoryError, clients); } catch (Throwable ignored) { ignore(ignored); } } h = handler; if (h != null && h.shouldHandle(outOfMemoryError)) { try { HazelcastInstance[] instances = removeRegisteredServers(); h.onOutOfMemory(outOfMemoryError, instances); } catch (Throwable ignored) { ignore(ignored); } } }
@Test public void onOutOfMemory() { OutOfMemoryError oome = new OutOfMemoryError(); OutOfMemoryHandler handler = mock(OutOfMemoryHandler.class); when(handler.shouldHandle(oome)).thenReturn(Boolean.TRUE); HazelcastInstance hz1 = mock(HazelcastInstance.class); OutOfMemoryErrorDispatcher.registerServer(hz1); OutOfMemoryErrorDispatcher.setServerHandler(handler); HazelcastInstance[] registeredInstances = OutOfMemoryErrorDispatcher.current(); OutOfMemoryErrorDispatcher.onOutOfMemory(oome); //make sure the handler is called verify(handler).onOutOfMemory(oome, registeredInstances); //make sure that the registered instances are removed. assertArrayEquals(new HazelcastInstance[]{}, OutOfMemoryErrorDispatcher.current()); }
@GetMapping public DeferredResult<ResponseEntity<ApolloConfigNotification>> pollNotification( @RequestParam(value = "appId") String appId, @RequestParam(value = "cluster") String cluster, @RequestParam(value = "namespace", defaultValue = ConfigConsts.NAMESPACE_APPLICATION) String namespace, @RequestParam(value = "dataCenter", required = false) String dataCenter, @RequestParam(value = "notificationId", defaultValue = "-1") long notificationId, @RequestParam(value = "ip", required = false) String clientIp) { //strip out .properties suffix namespace = namespaceUtil.filterNamespaceName(namespace); Set<String> watchedKeys = watchKeysUtil.assembleAllWatchKeys(appId, cluster, namespace, dataCenter); DeferredResult<ResponseEntity<ApolloConfigNotification>> deferredResult = new DeferredResult<>(TIMEOUT, NOT_MODIFIED_RESPONSE); //check whether client is out-dated ReleaseMessage latest = releaseMessageService.findLatestReleaseMessageForMessages(watchedKeys); /** * Manually close the entity manager. * Since for async request, Spring won't do so until the request is finished, * which is unacceptable since we are doing long polling - means the db connection would be hold * for a very long time */ entityManagerUtil.closeEntityManager(); if (latest != null && latest.getId() != notificationId) { deferredResult.setResult(new ResponseEntity<>( new ApolloConfigNotification(namespace, latest.getId()), HttpStatus.OK)); } else { //register all keys for (String key : watchedKeys) { this.deferredResults.put(key, deferredResult); } deferredResult .onTimeout(() -> logWatchedKeys(watchedKeys, "Apollo.LongPoll.TimeOutKeys")); deferredResult.onCompletion(() -> { //unregister all keys for (String key : watchedKeys) { deferredResults.remove(key, deferredResult); } logWatchedKeys(watchedKeys, "Apollo.LongPoll.CompletedKeys"); }); logWatchedKeys(watchedKeys, "Apollo.LongPoll.RegisteredKeys"); logger.debug("Listening {} from appId: {}, cluster: {}, namespace: {}, datacenter: {}", watchedKeys, appId, cluster, namespace, dataCenter); } return deferredResult; }
@Test public void testPollNotificationWithSomeNamespaceAsFile() throws Exception { String namespace = "someNamespace.xml"; when(namespaceUtil.filterNamespaceName(namespace)).thenReturn(namespace); String someWatchKey = "someKey"; Set<String> watchKeys = Sets.newHashSet(someWatchKey); when(watchKeysUtil .assembleAllWatchKeys(someAppId, someCluster, namespace, someDataCenter)) .thenReturn( watchKeys); DeferredResult<ResponseEntity<ApolloConfigNotification>> deferredResult = controller .pollNotification(someAppId, someCluster, namespace, someDataCenter, someNotificationId, someClientIp); assertEquals(watchKeys.size(), deferredResults.size()); for (String watchKey : watchKeys) { assertTrue(deferredResults.get(watchKey).contains(deferredResult)); } }
public static Set<Result> anaylze(String log) { Set<Result> results = new HashSet<>(); for (Rule rule : Rule.values()) { Matcher matcher = rule.pattern.matcher(log); if (matcher.find()) { results.add(new Result(rule, log, matcher)); } } return results; }
@Test public void fabricWarnings1() throws IOException { CrashReportAnalyzer.Result result = findResultByRule( CrashReportAnalyzer.anaylze(loadLog("/logs/fabric_warnings2.txt")), CrashReportAnalyzer.Rule.FABRIC_WARNINGS); assertEquals(("net.fabricmc.loader.impl.FormattedException: Mod resolution encountered an incompatible mod set!\n" + "A potential solution has been determined:\n" + "\t - Install roughlyenoughitems, version 6.0.2 or later.\n" + "Unmet dependency listing:\n" + "\t - Mod 'Roughly Searchable' (roughlysearchable) 2.2.1+1.17.1 requires version 6.0.2 or later of roughlyenoughitems, which is missing!\n" + "\tat net.fabricmc.loader.impl.FabricLoaderImpl.load(FabricLoaderImpl.java:190) ~").replaceAll("\\s+", ""), result.getMatcher().group("reason").replaceAll("\\s+", "")); }
@VisibleForTesting void validateMobileUnique(Long id, String mobile) { if (StrUtil.isBlank(mobile)) { return; } AdminUserDO user = userMapper.selectByMobile(mobile); if (user == null) { return; } // 如果 id 为空,说明不用比较是否为相同 id 的用户 if (id == null) { throw exception(USER_MOBILE_EXISTS); } if (!user.getId().equals(id)) { throw exception(USER_MOBILE_EXISTS); } }
@Test public void testValidateMobileUnique_mobileExistsForCreate() { // 准备参数 String mobile = randomString(); // mock 数据 userMapper.insert(randomAdminUserDO(o -> o.setMobile(mobile))); // 调用,校验异常 assertServiceException(() -> userService.validateMobileUnique(null, mobile), USER_MOBILE_EXISTS); }
public Properties translate(Properties source) { Properties hikariProperties = new Properties(); // Iterate over source Properties and translate from HZ to Hikari source.forEach((key, value) -> { String keyString = (String) key; if (PROPERTY_MAP.containsKey(keyString)) { hikariProperties.put(keyString, value); } else if (keyString.startsWith(HIKARI_PREFIX)) { String keyNoPrefix = keyString.substring(HIKARI_PREFIX.length()); hikariProperties.put(keyNoPrefix, source.get(keyString)); } else { hikariProperties.put("dataSource." + keyString, value); } }); int cnt = poolCounter.getAndIncrement(); hikariProperties.put("poolName", "HikariPool-" + cnt + "-" + name); return hikariProperties; }
@Test public void testTranslatableProperties() { Properties hzProperties = new Properties(); String jdbcUrl = "jdbcUrl"; String connectionTimeout = "5000"; String idleTimeout = "6000"; String keepAliveTime = "7000"; String maxLifetime = "8000"; String minimumIdle = "8500"; String maximumPoolSize = "10"; hzProperties.setProperty(DataConnectionProperties.JDBC_URL, jdbcUrl); hzProperties.setProperty(DataConnectionProperties.CONNECTION_TIMEOUT, connectionTimeout); hzProperties.setProperty(DataConnectionProperties.IDLE_TIMEOUT, idleTimeout); hzProperties.setProperty(DataConnectionProperties.KEEP_ALIVE_TIME, keepAliveTime); hzProperties.setProperty(DataConnectionProperties.MAX_LIFETIME, maxLifetime); hzProperties.setProperty(DataConnectionProperties.MINIMUM_IDLE, minimumIdle); hzProperties.setProperty(DataConnectionProperties.MAXIMUM_POOL_SIZE, maximumPoolSize); Properties hikariProperties = hikariTranslator.translate(hzProperties); HikariConfig hikariConfig = new HikariConfig(hikariProperties); assertThat(hikariConfig.getJdbcUrl()).isEqualTo(jdbcUrl); assertThat(hikariConfig.getConnectionTimeout()).isEqualTo(Long.parseLong(connectionTimeout)); assertThat(hikariConfig.getIdleTimeout()).isEqualTo(Long.parseLong(idleTimeout)); assertThat(hikariConfig.getKeepaliveTime()).isEqualTo(Long.parseLong(keepAliveTime)); assertThat(hikariConfig.getMaxLifetime()).isEqualTo(Long.parseLong(maxLifetime)); assertThat(hikariConfig.getMinimumIdle()).isEqualTo(Long.parseLong(minimumIdle)); assertThat(hikariConfig.getMaximumPoolSize()).isEqualTo(Long.parseLong(maximumPoolSize)); }
public static HollowSchema parseSchema(String schema) throws IOException { StreamTokenizer tokenizer = new StreamTokenizer(new StringReader(schema)); configureTokenizer(tokenizer); return parseSchema(tokenizer); }
@Test public void parsesObjectSchema() throws IOException { String objectSchema = "/* This is a comment\n" + " consisting of multiple lines */\n" + " TypeA {\n" + " int a1;\n" + " \tstring a2; //This is a comment\n" + " String a3;\n" + "}\n"; HollowObjectSchema schema = (HollowObjectSchema) HollowSchemaParser.parseSchema(objectSchema); Assert.assertEquals("TypeA", schema.getName()); Assert.assertEquals(3, schema.numFields()); Assert.assertEquals(FieldType.INT, schema.getFieldType(0)); Assert.assertEquals("a1", schema.getFieldName(0)); Assert.assertEquals(FieldType.STRING, schema.getFieldType(1)); Assert.assertEquals("a2", schema.getFieldName(1)); Assert.assertEquals(FieldType.REFERENCE, schema.getFieldType(2)); Assert.assertEquals("String", schema.getReferencedType(2)); Assert.assertEquals("a3", schema.getFieldName(2)); // HollowObjectSchame.toString is parsed properly Assert.assertEquals(schema, HollowSchemaParser.parseSchema(schema.toString())); }
static Tuple<String, Class> splitClassAndName(String classLabel) { String[] stringNameSplit = classLabel.split("@"); // If i don't have a @, then no name is provided, use the class as the name. if (stringNameSplit.length == 1) { try { return new Tuple<>(classLabel, Class.forName(classLabel)); } catch (ClassNotFoundException e) { throw new RuntimeException("Configured class: " + classLabel + " has not been found"); } } else if (stringNameSplit.length > 1) { // Found a @, use that as the name, and try { return new Tuple<>(stringNameSplit[1], Class.forName(stringNameSplit[0])); } catch (ClassNotFoundException e) { throw new RuntimeException("Configured class: " + stringNameSplit[0] + " has not been found. Declared label was: " + stringNameSplit[1]); } } throw new RuntimeException("Invalid format provided for class label: " + classLabel); }
@Test public void validClassNameWithAt_split_returnsCorrect() throws Exception { Tuple<String, Class> sample1 = Handler.splitClassAndName("com.networknt.handler.sample.SampleHttpHandler1@Hello"); Assert.assertEquals("Hello", sample1.first); Assert.assertEquals(Class.forName("com.networknt.handler.sample.SampleHttpHandler1"), sample1.second); }
public static String getHttpMethod(Exchange exchange, Endpoint endpoint) { // 1. Use method provided in header. Object method = exchange.getIn().getHeader(Exchange.HTTP_METHOD); if (method instanceof String) { return (String) method; } else if (method instanceof Enum) { return ((Enum<?>) method).name(); } else if (method != null) { return exchange.getContext().getTypeConverter().tryConvertTo(String.class, exchange, method); } // 2. GET if query string is provided in header. if (exchange.getIn().getHeader(Exchange.HTTP_QUERY) != null) { return GET_METHOD; } // 3. GET if endpoint is configured with a query string. if (endpoint.getEndpointUri().indexOf('?') != -1) { return GET_METHOD; } // 4. POST if there is data to send (body is not null). if (exchange.getIn().getBody() != null) { return POST_METHOD; } // 5. GET otherwise. return GET_METHOD; }
@Test public void testGetMethodFromMethodHeader() { Exchange exchange = Mockito.mock(Exchange.class); Message message = Mockito.mock(Message.class); Mockito.when(exchange.getIn()).thenReturn(message); Mockito.when(message.getHeader(Exchange.HTTP_METHOD)).thenReturn("PUT"); assertEquals("PUT", AbstractHttpSpanDecorator.getHttpMethod(exchange, null)); }
@Override public void triggerDisconnect(DeviceId deviceId) { log.debug("Forcing disconnect for device {}", deviceId); controller.disconnectDevice(deviceId, true); }
@Test public void testDiscoverPortsAfterDeviceAdded() { provider.connectionExecutor = MoreExecutors.newDirectExecutorService(); prepareMocks(PORT_COUNT); deviceService.listener.event(new DeviceEvent(DeviceEvent.Type.DEVICE_ADDED, netconfDevice)); assertEquals("Ports should be added", PORT_COUNT, providerService.ports.get(netconfDevice.id()).size()); provider.triggerDisconnect(netconfDevice.id()); assertEquals("Ports should be removed", 0, providerService.ports.get(netconfDevice.id()).size()); }
private static VerificationResult verifyChecksums(String expectedDigest, String actualDigest, boolean caseSensitive) { if (expectedDigest == null) { return VerificationResult.NOT_PROVIDED; } if (actualDigest == null) { return VerificationResult.NOT_COMPUTED; } if (caseSensitive) { if (MessageDigest.isEqual(expectedDigest.getBytes(StandardCharsets.US_ASCII), actualDigest.getBytes(StandardCharsets.US_ASCII))) { return VerificationResult.PASS; } } else { if (MessageDigest.isEqual(expectedDigest.toLowerCase().getBytes(StandardCharsets.US_ASCII), actualDigest.toLowerCase().getBytes(StandardCharsets.US_ASCII))) { return VerificationResult.PASS; } } return VerificationResult.FAIL; }
@Test public void sha1Mismatch() { final IOException ex = assertThrows(IOException.class, () -> UpdateCenter.verifyChecksums( new MockDownloadJob(EMPTY_SHA1.replace('k', 'f'), null, null), buildEntryWithExpectedChecksums(EMPTY_SHA1, null, null), new File("example"))); assertTrue(ex.getMessage().contains("does not match expected SHA-1, expected '2jmj7l5rSw0yVb/vlWAYkK/YBwk=', actual '2jmj7l5rSw0yVb/vlWAYfK/YBwf='")); }
public String render(Object o) { StringBuilder result = new StringBuilder(template.length()); render(o, result); return result.toString(); }
@Test public void supportsFilteringTheNotLastListValue() { Template template = new Template( "Hello {{#getValues}}{{toString}}{{#notLast}},{{/notLast}}{{/getValues}} "); assertEquals("Hello 1,2,3 ", template.render(foo)); }
void getGroupNames(SearchResult groupResult, Collection<String> groups, Collection<String> groupDNs, boolean doGetDNs) throws NamingException { Attribute groupName = groupResult.getAttributes().get(groupNameAttr); if (groupName == null) { throw new NamingException("The group object does not have " + "attribute '" + groupNameAttr + "'."); } groups.add(groupName.get().toString()); if (doGetDNs) { groupDNs.add(groupResult.getNameInNamespace()); } }
@Test public void testGetGroupsWithConnectionClosed() throws NamingException { // The case mocks connection is closed/gc-ed, so the first search call throws CommunicationException, // then after reconnected return the user NamingEnumeration first, and then the group when(getContext().search(anyString(), anyString(), any(Object[].class), any(SearchControls.class))) .thenThrow(new CommunicationException("Connection is closed")) .thenReturn(getUserNames(), getGroupNames()); // Although connection is down but after reconnected // it still should retrieve the result groups // 1 is the first failure call doTestGetGroups(Arrays.asList(getTestGroups()), 1 + 2); }
@Override public String getGroupKeyString(int rowIndex, int groupKeyColumnIndex) { throw new AssertionError("No group key string for selection results"); }
@Test(expectedExceptions = AssertionError.class) public void testGetGroupKeyString() { // Run the test _selectionResultSetUnderTest.getGroupKeyString(0, 0); }
public Optional<Measure> toMeasure(@Nullable MeasureDto measureDto, Metric metric) { requireNonNull(metric); if (measureDto == null) { return Optional.empty(); } Double value = measureDto.getValue(); String data = measureDto.getData(); switch (metric.getType().getValueType()) { case INT: return toIntegerMeasure(measureDto, value, data); case LONG: return toLongMeasure(measureDto, value, data); case DOUBLE: return toDoubleMeasure(measureDto, value, data); case BOOLEAN: return toBooleanMeasure(measureDto, value, data); case STRING: return toStringMeasure(measureDto, data); case LEVEL: return toLevelMeasure(measureDto, data); case NO_VALUE: return toNoValueMeasure(measureDto); default: throw new IllegalArgumentException("Unsupported Measure.ValueType " + metric.getType().getValueType()); } }
@Test public void toMeasure_for_LEVEL_Metric_can_have_an_qualityGateStatus() { MeasureDto measureDto = new MeasureDto().setData(Level.OK.name()).setAlertStatus(Level.ERROR.name()).setAlertText(SOME_ALERT_TEXT); Optional<Measure> measure = underTest.toMeasure(measureDto, SOME_LEVEL_METRIC); assertThat(measure).isPresent(); assertThat(measure.get().getValueType()).isEqualTo(Measure.ValueType.LEVEL); assertThat(measure.get().getLevelValue()).isEqualTo(Level.OK); assertThat(measure.get().getQualityGateStatus().getStatus()).isEqualTo(Level.ERROR); assertThat(measure.get().getQualityGateStatus().getText()).isEqualTo(SOME_ALERT_TEXT); }
static Schema removeFields(Schema schema, String... fields) { List<String> exclude = Arrays.stream(fields).collect(Collectors.toList()); Schema.Builder builder = Schema.builder(); for (Field field : schema.getFields()) { if (exclude.contains(field.getName())) { continue; } builder.addField(field); } return builder.build(); }
@Test public void testRemoveFields() { Schema input = Schema.of(STRING_FIELD, BOOLEAN_FIELD, INT64_FIELD); Schema expected = Schema.builder().build(); Schema actual = removeFields(input, STRING_FIELD.getName(), BOOLEAN_FIELD.getName(), INT64_FIELD.getName()); assertEquals(expected, actual); }
@Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; // This handles a tombstone message if (value == null) { return SchemaAndValue.NULL; } try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (config.schemasEnabled() && (!jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME) || !jsonValue.has(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); // The deserialized data should either be an envelope object containing the schema and the payload or the schema // was stripped during serialization and we need to fill in an all-encompassing schema. if (!config.schemasEnabled()) { ObjectNode envelope = JSON_NODE_FACTORY.objectNode(); envelope.set(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME, null); envelope.set(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME, jsonValue); jsonValue = envelope; } Schema schema = asConnectSchema(jsonValue.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); return new SchemaAndValue( schema, convertToConnect(schema, jsonValue.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME), config) ); }
@Test public void timestampToConnect() { Schema schema = Timestamp.SCHEMA; GregorianCalendar calendar = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0); calendar.setTimeZone(TimeZone.getTimeZone("UTC")); calendar.add(Calendar.MILLISECOND, 2000000000); calendar.add(Calendar.MILLISECOND, 2000000000); java.util.Date reference = calendar.getTime(); String msg = "{ \"schema\": { \"type\": \"int64\", \"name\": \"org.apache.kafka.connect.data.Timestamp\", \"version\": 1 }, \"payload\": 4000000000 }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); java.util.Date converted = (java.util.Date) schemaAndValue.value(); assertEquals(schema, schemaAndValue.schema()); assertEquals(reference, converted); }
public static Criterion matchUdpDst(TpPort udpPort) { return new UdpPortCriterion(udpPort, Type.UDP_DST); }
@Test public void testMatchUdpDstMethod() { Criterion matchUdpDst = Criteria.matchUdpDst(tpPort1); UdpPortCriterion udpPortCriterion = checkAndConvert(matchUdpDst, Criterion.Type.UDP_DST, UdpPortCriterion.class); assertThat(udpPortCriterion.udpPort(), is(equalTo(tpPort1))); }
@Override public DocStringType docStringType() { return docStringType; }
@Test public void correct_conversion_is_used_for_simple_and_complex_return_types() throws NoSuchMethodException { Method simpleMethod = JavaDocStringTypeDefinitionTest.class.getMethod("converts_string_to_simple_type", String.class); JavaDocStringTypeDefinition simpleDefinition = new JavaDocStringTypeDefinition("text/plain", simpleMethod, lookup); registry.defineDocStringType(simpleDefinition.docStringType()); Method complexMethod = JavaDocStringTypeDefinitionTest.class.getMethod("converts_string_to_complex_type", String.class); JavaDocStringTypeDefinition complexDefinition = new JavaDocStringTypeDefinition("text/plain", complexMethod, lookup); registry.defineDocStringType(complexDefinition.docStringType()); Type simpleType = Map.class; assertThat(converter.convert(docString, simpleType), hasEntry("some_simple_type", Collections.emptyMap())); Type complexType = new TypeReference<Map<String, Map<String, String>>>() { }.getType(); assertThat(converter.convert(docString, complexType), hasEntry("some_complex_type", Collections.emptyMap())); }
@Override public LocalAddress localAddress() { return (LocalAddress) super.localAddress(); }
@Test public void testWriteFailsFastOnClosedChannel() throws Exception { Bootstrap cb = new Bootstrap(); ServerBootstrap sb = new ServerBootstrap(); cb.group(group1) .channel(LocalChannel.class) .handler(new TestHandler()); sb.group(group2) .channel(LocalServerChannel.class) .childHandler(new ChannelInitializer<LocalChannel>() { @Override public void initChannel(LocalChannel ch) throws Exception { ch.pipeline().addLast(new TestHandler()); } }); Channel sc = null; Channel cc = null; try { // Start server sc = sb.bind(TEST_ADDRESS).sync().channel(); // Connect to the server cc = cb.connect(sc.localAddress()).sync().channel(); // Close the channel and write something. cc.close().sync(); try { cc.writeAndFlush(new Object()).sync(); fail("must raise a ClosedChannelException"); } catch (Exception e) { assertThat(e, is(instanceOf(ClosedChannelException.class))); // Ensure that the actual write attempt on a closed channel was never made by asserting that // the ClosedChannelException has been created by AbstractUnsafe rather than transport implementations. if (e.getStackTrace().length > 0) { assertThat( e.getStackTrace()[0].getClassName(), is(AbstractChannel.class.getName() + "$AbstractUnsafe")); e.printStackTrace(); } } } finally { closeChannel(cc); closeChannel(sc); } }
@Override public int aggrConfigInfoCount(String dataId, String group, String tenant) { String tenantTmp = StringUtils.isBlank(tenant) ? StringUtils.EMPTY : tenant; ConfigInfoAggrMapper configInfoAggrMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.CONFIG_INFO_AGGR); String sql = configInfoAggrMapper.count(Arrays.asList("data_id", "group_id", "tenant_id")); Integer result = databaseOperate.queryOne(sql, new Object[] {dataId, group, tenantTmp}, Integer.class); if (result == null) { throw new IllegalArgumentException("aggrConfigInfoCount error"); } return result; }
@Test void testAggrConfigInfoCount() { String dataId = "dataId11122"; String group = "group"; String tenant = "tenant"; //mock select count of aggr. Mockito.when(databaseOperate.queryOne(anyString(), eq(new Object[] {dataId, group, tenant}), eq(Integer.class))) .thenReturn(new Integer(101)); int result = embededConfigInfoAggrPersistService.aggrConfigInfoCount(dataId, group, tenant); assertEquals(101, result); }
public boolean fence(HAServiceTarget fromSvc) { return fence(fromSvc, null); }
@Test public void testShortNameShell() throws BadFencingConfigurationException { NodeFencer fencer = setupFencer(getFencerTrueCommand()); assertTrue(fencer.fence(MOCK_TARGET)); }
@ApiOperation(value = "Delete a model", tags = { "Models" }, code = 204) @ApiResponses(value = { @ApiResponse(code = 204, message = "Indicates the model was found and has been deleted. Response-body is intentionally empty."), @ApiResponse(code = 404, message = "Indicates the requested model was not found.") }) @DeleteMapping("/repository/models/{modelId}") @ResponseStatus(HttpStatus.NO_CONTENT) public void deleteModel(@ApiParam(name = "modelId") @PathVariable String modelId) { Model model = getModelFromRequest(modelId); repositoryService.deleteModel(model.getId()); }
@Test public void testDeleteModel() throws Exception { Model model = null; try { Calendar now = Calendar.getInstance(); now.set(Calendar.MILLISECOND, 0); processEngineConfiguration.getClock().setCurrentTime(now.getTime()); model = repositoryService.newModel(); model.setCategory("Model category"); model.setKey("Model key"); model.setMetaInfo("Model metainfo"); model.setName("Model name"); model.setVersion(2); repositoryService.saveModel(model); HttpDelete httpDelete = new HttpDelete(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_MODEL, model.getId())); closeResponse(executeRequest(httpDelete, HttpStatus.SC_NO_CONTENT)); // Check if the model is really gone assertThat(repositoryService.createModelQuery().modelId(model.getId()).singleResult()).isNull(); model = null; } finally { if (model != null) { try { repositoryService.deleteModel(model.getId()); } catch (Throwable ignore) { // Ignore, model might not be created } } } }
public static SessionBytesStoreSupplier persistentSessionStore(final String name, final Duration retentionPeriod) { Objects.requireNonNull(name, "name cannot be null"); final String msgPrefix = prepareMillisCheckFailMsgPrefix(retentionPeriod, "retentionPeriod"); final long retentionPeriodMs = validateMillisecondDuration(retentionPeriod, msgPrefix); if (retentionPeriodMs < 0) { throw new IllegalArgumentException("retentionPeriod cannot be negative"); } return new RocksDbSessionBytesStoreSupplier(name, retentionPeriodMs); }
@Test public void shouldCreateRocksDbSessionStore() { assertThat(Stores.persistentSessionStore("store", ofMillis(1)).get(), instanceOf(RocksDBSessionStore.class)); }
@Override public ExecuteContext before(ExecuteContext context) { KafkaConsumerWrapper kafkaConsumerWrapper = KafkaConsumerController.getKafkaConsumerCache() .get(context.getObject().hashCode()); if (kafkaConsumerWrapper == null) { return context; } if (handler != null) { handler.doBefore(context); } else { // Kafka does not allow multiple threads to operate consumers simultaneously, so it is not possible to // directly disable consumption when a new configuration is detected through dynamic configuration // monitoring. Considering that only when poll is truly called will rebalancing be triggered, a suitable // approach is to dynamically configure the thread to update the flag bit, check whether it is necessary // to handle the prohibition of consumption before poll, perform subscription addition and subtraction of // topics, and then trigger rebalancing by poll. if (kafkaConsumerWrapper.getIsConfigChanged().get()) { KafkaConsumerController.disableConsumption(kafkaConsumerWrapper, ProhibitionConfigManager.getKafkaProhibitionTopics()); kafkaConsumerWrapper.getIsConfigChanged().set(false); } } return context; }
@Test public void testBefore() { ExecuteContext context = ExecuteContext.forMemberMethod(mockConsumer, null, null, null, null); interceptor.before(context); Assert.assertFalse(kafkaConsumerWrapper.getIsConfigChanged().get()); }
public static ErrorCodes getErrorCode(ResponseException responseException) throws ResponseException { // Obtain the error response code. String errorContent = responseException.getContent(); if (errorContent == null) { throw responseException; } try { ErrorResponseTemplate errorResponse = JsonTemplateMapper.readJson(errorContent, ErrorResponseTemplate.class); List<ErrorEntryTemplate> errors = errorResponse.getErrors(); // There may be multiple error objects if (errors.size() == 1) { String errorCodeString = errors.get(0).getCode(); // May not get an error code back. if (errorCodeString != null) { // throws IllegalArgumentException if unknown error code return ErrorCodes.valueOf(errorCodeString); } } } catch (IOException | IllegalArgumentException ex) { // Parse exception: either isn't an error object or unknown error code } // rethrow the original exception throw responseException; }
@Test public void testGetErrorCode_knownErrorCode() throws ResponseException { Mockito.when(responseException.getContent()) .thenReturn( "{\"errors\":[{\"code\":\"MANIFEST_INVALID\",\"message\":\"manifest invalid\",\"detail\":{}}]}"); Assert.assertSame( ErrorCodes.MANIFEST_INVALID, ErrorResponseUtil.getErrorCode(responseException)); }
@Override public MeterStoreResult storeMeterFeatures(MeterFeatures meterfeatures) { // Store meter features, this is done once for each features of every device MeterStoreResult result = MeterStoreResult.success(); MeterTableKey key = MeterTableKey.key(meterfeatures.deviceId(), meterfeatures.scope()); try { metersFeatures.put(key, meterfeatures); } catch (StorageException e) { log.error("{} thrown a storage exception: {}", e.getStackTrace()[0].getMethodName(), e.getMessage(), e); result = MeterStoreResult.fail(TIMEOUT); } return result; }
@Test public void testStoreMeterFeatures() { initMeterStore(false); assertThat(meterStore.getMaxMeters(MeterTableKey.key(did1, MeterScope.globalScope())), is(3L)); assertThat(meterStore.getMaxMeters(MeterTableKey.key(did2, MeterScope.globalScope())), is(10L)); }
public static ByteBuffer toByteBuffer(String s) { /* ByteBuffer buffer = ByteBuffer.allocateDirect(s.length()); buffer.put(s.getBytes(UTF_8)); buffer.flip(); return buffer; */ return ByteBuffer.wrap(s.getBytes(UTF_8)); }
@Test public void testToByteBuffer2() { String s = "obufscate thdé alphebat and yolo!!"; ByteBuffer bb = NioUtils.toByteBuffer(s); String n = new String(bb.array(), StandardCharsets.UTF_8); Assert.assertEquals(s, n); }
@SuppressWarnings("unchecked") @VisibleForTesting Schema<T> initializeSchema() throws ClassNotFoundException { if (StringUtils.isEmpty(this.pulsarSinkConfig.getTypeClassName())) { return (Schema<T>) Schema.BYTES; } Class<?> typeArg = Reflections.loadClass(this.pulsarSinkConfig.getTypeClassName(), functionClassLoader); if (Void.class.equals(typeArg)) { // return type is 'void', so there's no schema to check return null; } ConsumerConfig consumerConfig = new ConsumerConfig(); consumerConfig.setSchemaProperties(pulsarSinkConfig.getSchemaProperties()); if (!StringUtils.isEmpty(pulsarSinkConfig.getSchemaType())) { if (GenericRecord.class.isAssignableFrom(typeArg)) { consumerConfig.setSchemaType(SchemaType.AUTO_CONSUME.toString()); SchemaType configuredSchemaType = SchemaType.valueOf(pulsarSinkConfig.getSchemaType()); if (SchemaType.AUTO_CONSUME != configuredSchemaType) { log.info("The configured schema type {} is not able to write GenericRecords." + " So overwrite the schema type to be {}", configuredSchemaType, SchemaType.AUTO_CONSUME); } } else { consumerConfig.setSchemaType(pulsarSinkConfig.getSchemaType()); } return (Schema<T>) topicSchema.getSchema(pulsarSinkConfig.getTopic(), typeArg, consumerConfig, false); } else { consumerConfig.setSchemaType(pulsarSinkConfig.getSerdeClassName()); return (Schema<T>) topicSchema.getSchema(pulsarSinkConfig.getTopic(), typeArg, consumerConfig, false, functionClassLoader); } }
@Test public void testDefaultSerDe() throws PulsarClientException { PulsarSinkConfig pulsarConfig = getPulsarConfigs(); // set type to void pulsarConfig.setTypeClassName(String.class.getName()); PulsarSink pulsarSink = new PulsarSink(getPulsarClient(), pulsarConfig, new HashMap<>(), mock(ComponentStatsManager.class), Thread.currentThread().getContextClassLoader(), producerCache); try { pulsarSink.initializeSchema(); } catch (Exception ex) { ex.printStackTrace(); fail(); } }
public List<String> getAvailableSecrets(String appId) { List<AccessKey> accessKeys = accessKeyCache.get(appId); if (CollectionUtils.isEmpty(accessKeys)) { return Collections.emptyList(); } return accessKeys.stream() .filter(AccessKey::isEnabled) .map(AccessKey::getSecret) .collect(Collectors.toList()); }
@Test public void testGetAvailableSecrets() throws Exception { String appId = "someAppId"; AccessKey firstAccessKey = assembleAccessKey(1L, appId, "secret-1", false, false, 1577808000000L); AccessKey secondAccessKey = assembleAccessKey(2L, appId, "secret-2", false, false, 1577808001000L); AccessKey thirdAccessKey = assembleAccessKey(3L, appId, "secret-3", true, false, 1577808005000L); // Initialize accessKeyServiceWithCache.afterPropertiesSet(); assertThat(accessKeyServiceWithCache.getAvailableSecrets(appId)).isEmpty(); // Add access key, disable by default when(accessKeyRepository.findFirst500ByDataChangeLastModifiedTimeGreaterThanOrderByDataChangeLastModifiedTimeAsc(new Date(0L))) .thenReturn(Lists.newArrayList(firstAccessKey, secondAccessKey)); when(accessKeyRepository.findAllById(anyList())) .thenReturn(Lists.newArrayList(firstAccessKey, secondAccessKey)); await().untilAsserted(() -> assertThat(accessKeyServiceWithCache.getAvailableSecrets(appId)).isEmpty()); // Update access key, enable both of them firstAccessKey = assembleAccessKey(1L, appId, "secret-1", true, false, 1577808002000L); secondAccessKey = assembleAccessKey(2L, appId, "secret-2", true, false, 1577808003000L); when(accessKeyRepository.findFirst500ByDataChangeLastModifiedTimeGreaterThanOrderByDataChangeLastModifiedTimeAsc(new Date(1577808001000L))) .thenReturn(Lists.newArrayList(firstAccessKey, secondAccessKey)); when(accessKeyRepository.findAllById(anyList())) .thenReturn(Lists.newArrayList(firstAccessKey, secondAccessKey)); await().untilAsserted(() -> assertThat(accessKeyServiceWithCache.getAvailableSecrets(appId)) .containsExactly("secret-1", "secret-2")); // should also work with appid in different case assertThat(accessKeyServiceWithCache.getAvailableSecrets(appId.toUpperCase())) .containsExactly("secret-1", "secret-2"); assertThat(accessKeyServiceWithCache.getAvailableSecrets(appId.toLowerCase())) .containsExactly("secret-1", "secret-2"); // Update access key, disable the first one firstAccessKey = assembleAccessKey(1L, appId, "secret-1", false, false, 1577808004000L); when(accessKeyRepository.findFirst500ByDataChangeLastModifiedTimeGreaterThanOrderByDataChangeLastModifiedTimeAsc(new Date(1577808003000L))) .thenReturn(Lists.newArrayList(firstAccessKey)); when(accessKeyRepository.findAllById(anyList())) .thenReturn(Lists.newArrayList(firstAccessKey, secondAccessKey)); await().untilAsserted(() -> assertThat(accessKeyServiceWithCache.getAvailableSecrets(appId)) .containsExactly("secret-2")); // Delete access key, delete the second one when(accessKeyRepository.findAllById(anyList())) .thenReturn(Lists.newArrayList(firstAccessKey)); await().untilAsserted( () -> assertThat(accessKeyServiceWithCache.getAvailableSecrets(appId)).isEmpty()); // Add new access key in runtime, enable by default when(accessKeyRepository.findFirst500ByDataChangeLastModifiedTimeGreaterThanOrderByDataChangeLastModifiedTimeAsc(new Date(1577808004000L))) .thenReturn(Lists.newArrayList(thirdAccessKey)); when(accessKeyRepository.findAllById(anyList())) .thenReturn(Lists.newArrayList(firstAccessKey, thirdAccessKey)); await().untilAsserted(() -> assertThat(accessKeyServiceWithCache.getAvailableSecrets(appId)) .containsExactly("secret-3")); reachabilityFence(accessKeyServiceWithCache); }
@ModelAttribute(name = "product", binding = false) public Mono<Product> loadProduct(@PathVariable("productId") int id) { return this.productsClient.findProduct(id) .switchIfEmpty(Mono.defer( () -> Mono.error(new NoSuchElementException("customer.products.error.not_found")) )); }
@Test void loadProduct_ProductExists_ReturnsNotEmptyMono() { // given var product = new Product(1, "Товар №1", "Описание товара №1"); doReturn(Mono.just(product)).when(this.productsClient).findProduct(1); // when StepVerifier.create(this.controller.loadProduct(1)) // then .expectNext(new Product(1, "Товар №1", "Описание товара №1")) .verifyComplete(); verify(this.productsClient).findProduct(1); verifyNoMoreInteractions(this.productsClient); verifyNoInteractions(this.favouriteProductsClient, this.productReviewsClient); }
public synchronized <K, V> KStream<K, V> stream(final String topic) { return stream(Collections.singleton(topic)); }
@Test public void shouldNotAllowReadingFromOverlappingAndUnequalCollectionOfTopics() { builder.stream(Collections.singletonList("topic")); builder.stream(asList("topic", "anotherTopic")); assertThrows(TopologyException.class, builder::build); }
@Override public AttributedList<Path> search(final Path workdir, final Filter<Path> regex, final ListProgressListener listener) throws BackgroundException { final AttributedList<Path> list = new AttributedList<>(); // avoid searching the "special" folders if users search from the account root if(workdir.getParent().isRoot()) { final Predicate<MantaObject> fastSearchPredicate = o -> session.isWorldReadable(o) || session.isUserWritable(o); final List<Path> homeFolderPaths = findObjectsAsPaths(workdir, fastSearchPredicate); cleanResults(homeFolderPaths, regex); addPaths(list, workdir, listener, homeFolderPaths); /* // disable search of system directories until we can provide incremental results // slowSearchPredicate will prevent us from looking at ~~/public and ~~/stor twice final Predicate<MantaObject> slowSearchPredicate = fastSearchPredicate.negate(); final List<Path> systemFolderObjects = findObjectsAsPaths(workdir, slowSearchPredicate.and(regexPredicate)); cleanResults(systemFolderObjects, regex); addPaths(list, workdir, listener, systemFolderObjects); */ } else { final List<Path> foundPaths = findObjectsAsPaths(workdir, null); cleanResults(foundPaths, regex); addPaths(list, workdir, listener, foundPaths); } return list; }
@Test public void testSearchFileNotFound() throws Exception { Assume.assumeTrue(session.getClient().existsAndIsAccessible(testPathPrefix.getAbsolute())); final String emptyDirectoryName = new AlphanumericRandomStringService().random(); final Path emptyDirectory = new Path(testPathPrefix, emptyDirectoryName, EnumSet.of(AbstractPath.Type.directory)); new MantaDirectoryFeature(session).mkdir(emptyDirectory, null); final MantaSearchFeature s = new MantaSearchFeature(session); final AttributedList<Path> search = s.search(emptyDirectory, new NullFilter<>(), new DisabledListProgressListener()); assertTrue(search.isEmpty()); }
@Override public UserInfo getByUsername(String username) { return userInfoRepository.getByUsername(username); }
@Test() public void loadByUsername_nullUser() { Mockito.when(userInfoRepository.getByUsername(adminUsername)).thenReturn(null); UserInfo user = service.getByUsername(adminUsername); assertNull(user); }
public String encode(String s) { if (s == null) { return null; } StringBuilder sb = new StringBuilder(); int len = s.length(); for (int i = 0; i < len; i++) { char c = s.charAt(i); if (c == _encodingChar || _reservedChars.contains(c)) { sb.append(_encodingChar); int asciiVal = (int) c; if (asciiVal < 16) // <= 'F' { sb.append('0'); } sb.append(Integer.toHexString(asciiVal).toUpperCase()); } else { sb.append(c); } } return sb.toString(); }
@Test public void testMultiCharEncode() { Assert.assertEquals(TEST_ENCODING_1.encode("a.b.c[d]~"), "a~2Eb~2Ec~5Bd~5D~7E"); }
@Override public void stop() { List<CoreExtensionBridge> bridges = platformContainer.getComponentsByType(CoreExtensionBridge.class); for (CoreExtensionBridge bridge : bridges) { Profiler profiler = Profiler.create(LOGGER).startInfo(format("Stopping %s", bridge.getPluginName())); bridge.stopPlugin(); profiler.stopInfo(); } }
@Test public void stop_does_not_call_stopPlugin_if_Bridge_does_not_exist_in_container() { componentContainer.startComponents(); underTest.stop(); verifyNoMoreInteractions(bridge); }
@Override public void beforeRequest() { if (!isEnable()) { return; } final long allRequest = allRequestCount.incrementAndGet(); if (allRequest <= 0) { allRequestCount.set(0); LOGGER.info("SimpleRequestRecorder has over the max num of long, it has been reset to 0!"); } LOGGER.info(String.format(Locale.ENGLISH, "currentTime: %s request count handle by plugin is: %s", HttpConstants.currentTime(), allRequest)); }
@Test public void beforeRequest() { final SimpleRequestRecorder simpleRequestRecorder = new SimpleRequestRecorder(); simpleRequestRecorder.beforeRequest(); final Optional<Object> allRequestCount = ReflectUtils.getFieldValue(simpleRequestRecorder, "allRequestCount"); Assert.assertTrue(allRequestCount.isPresent() && allRequestCount.get() instanceof AtomicLong); Assert.assertTrue(((AtomicLong) allRequestCount.get()).get() > 0); // Set the maximum so that the overflow is set to 0 ((AtomicLong) allRequestCount.get()).set(Long.MAX_VALUE); simpleRequestRecorder.beforeRequest(); Assert.assertEquals(((AtomicLong) allRequestCount.get()).get(), 0); // Print the logs discoveryPluginConfig.setEnableRequestCount(true); final SimpleRequestRecorder simpleRequestRecorder1 = new SimpleRequestRecorder(); simpleRequestRecorder1.beforeRequest(); }
public @Nullable SubscriptionPath getSubscription() { return subscription == null ? null : subscription.get(); }
@Test public void noSubscriptionSplitGeneratesSubscription() throws Exception { TopicPath topicPath = PubsubClient.topicPathFromName("my_project", "my_topic"); factory = PubsubTestClient.createFactoryForCreateSubscription(); PubsubUnboundedSource source = new PubsubUnboundedSource( factory, StaticValueProvider.of(PubsubClient.projectPathFromId("my_project")), StaticValueProvider.of(topicPath), null /* subscription */, null /* timestampLabel */, null /* idLabel */, false /* needsAttributes */); assertThat(source.getSubscription(), nullValue()); assertThat(source.getSubscription(), nullValue()); PipelineOptions options = PipelineOptionsFactory.create(); List<PubsubSource> splits = new PubsubSource(source).split(3, options); // We have at least one returned split assertThat(splits, hasSize(greaterThan(0))); for (PubsubSource split : splits) { // Each split is equal assertThat(split, equalTo(splits.get(0))); } assertThat(splits.get(0).subscriptionPath, not(nullValue())); }
public static void rename( @NonNull final HybridFile oldFile, @NonNull final HybridFile newFile, final boolean rootMode, @NonNull final Context context, @NonNull final ErrorCallBack errorCallBack) { new AsyncTask<Void, Void, Void>() { private final DataUtils dataUtils = DataUtils.getInstance(); /** * Determines whether double rename is required based on original and new file name regardless * of the case-sensitivity of the filesystem */ private final boolean isCaseSensitiveRename = oldFile.getSimpleName().equalsIgnoreCase(newFile.getSimpleName()) && !oldFile.getSimpleName().equals(newFile.getSimpleName()); /** * random string that is appended to file to prevent name collision, max file name is 255 * bytes */ private static final String TEMP_FILE_EXT = "u0CtHRqWUnvxIaeBQ@nY2umVm9MDyR1P"; private boolean localRename(@NonNull HybridFile oldFile, @NonNull HybridFile newFile) { File file = new File(oldFile.getPath()); File file1 = new File(newFile.getPath()); boolean result = false; switch (oldFile.getMode()) { case FILE: int mode = checkFolder(file.getParentFile(), context); if (mode == 1 || mode == 0) { try { RenameOperation.renameFolder(file, file1, context); } catch (ShellNotRunningException e) { LOG.warn("failed to rename file in local filesystem", e); } result = !file.exists() && file1.exists(); if (!result && rootMode) { try { RenameFileCommand.INSTANCE.renameFile(file.getPath(), file1.getPath()); } catch (ShellNotRunningException e) { LOG.warn("failed to rename file in local filesystem", e); } oldFile.setMode(OpenMode.ROOT); newFile.setMode(OpenMode.ROOT); result = !file.exists() && file1.exists(); } } break; case ROOT: try { result = RenameFileCommand.INSTANCE.renameFile(file.getPath(), file1.getPath()); } catch (ShellNotRunningException e) { LOG.warn("failed to rename file in root", e); } newFile.setMode(OpenMode.ROOT); break; } return result; } private boolean localDoubleRename(@NonNull HybridFile oldFile, @NonNull HybridFile newFile) { HybridFile tempFile = new HybridFile(oldFile.mode, oldFile.getPath().concat(TEMP_FILE_EXT)); if (localRename(oldFile, tempFile)) { if (localRename(tempFile, newFile)) { return true; } else { // attempts to rollback // changes the temporary file name back to original file name LOG.warn("reverting temporary file rename"); return localRename(tempFile, oldFile); } } return false; } private Function<DocumentFile, Void> safRenameFile = input -> { boolean result = false; try { result = input.renameTo(newFile.getName(context)); } catch (Exception e) { LOG.warn(getClass().getSimpleName(), "Failed to rename", e); } errorCallBack.done(newFile, result); return null; }; @Override protected Void doInBackground(Void... params) { // check whether file names for new file are valid or recursion occurs. // If rename is on OTG, we are skipping if (!Operations.isFileNameValid(newFile.getName(context))) { errorCallBack.invalidName(newFile); return null; } if (newFile.exists() && !isCaseSensitiveRename) { errorCallBack.exists(newFile); return null; } if (oldFile.isSmb()) { try { SmbFile smbFile = oldFile.getSmbFile(); // FIXME: smbFile1 should be created from SmbUtil too so it can be mocked SmbFile smbFile1 = new SmbFile(new URL(newFile.getPath()), smbFile.getContext()); if (newFile.exists()) { errorCallBack.exists(newFile); return null; } smbFile.renameTo(smbFile1); if (!smbFile.exists() && smbFile1.exists()) errorCallBack.done(newFile, true); } catch (SmbException | MalformedURLException e) { String errmsg = context.getString( R.string.cannot_rename_file, HybridFile.parseAndFormatUriForDisplay(oldFile.getPath()), e.getMessage()); try { ArrayList<HybridFileParcelable> failedOps = new ArrayList<>(); failedOps.add(new HybridFileParcelable(oldFile.getSmbFile())); context.sendBroadcast( new Intent(TAG_INTENT_FILTER_GENERAL) .putParcelableArrayListExtra(TAG_INTENT_FILTER_FAILED_OPS, failedOps)); } catch (SmbException exceptionThrownDuringBuildParcelable) { LOG.error( "Error creating HybridFileParcelable", exceptionThrownDuringBuildParcelable); } LOG.error(errmsg, e); } return null; } else if (oldFile.isSftp()) { SshClientUtils.execute( new SFtpClientTemplate<Void>(oldFile.getPath(), true) { @Override public Void execute(@NonNull SFTPClient client) { try { client.rename( NetCopyClientUtils.extractRemotePathFrom(oldFile.getPath()), NetCopyClientUtils.extractRemotePathFrom(newFile.getPath())); errorCallBack.done(newFile, true); } catch (IOException e) { String errmsg = context.getString( R.string.cannot_rename_file, HybridFile.parseAndFormatUriForDisplay(oldFile.getPath()), e.getMessage()); LOG.error(errmsg); ArrayList<HybridFileParcelable> failedOps = new ArrayList<>(); // Nobody care the size or actual permission here. Put a simple "r" and zero // here failedOps.add( new HybridFileParcelable( oldFile.getPath(), "r", oldFile.lastModified(), 0, oldFile.isDirectory(context))); context.sendBroadcast( new Intent(TAG_INTENT_FILTER_GENERAL) .putParcelableArrayListExtra(TAG_INTENT_FILTER_FAILED_OPS, failedOps)); errorCallBack.done(newFile, false); } return null; } }); } else if (oldFile.isFtp()) { NetCopyClientUtils.INSTANCE.execute( new FtpClientTemplate<Boolean>(oldFile.getPath(), false) { public Boolean executeWithFtpClient(@NonNull FTPClient ftpClient) throws IOException { boolean result = ftpClient.rename( NetCopyClientUtils.extractRemotePathFrom(oldFile.getPath()), NetCopyClientUtils.extractRemotePathFrom(newFile.getPath())); errorCallBack.done(newFile, result); return result; } }); } else if (oldFile.isDropBoxFile()) { CloudStorage cloudStorageDropbox = dataUtils.getAccount(OpenMode.DROPBOX); try { cloudStorageDropbox.move( CloudUtil.stripPath(OpenMode.DROPBOX, oldFile.getPath()), CloudUtil.stripPath(OpenMode.DROPBOX, newFile.getPath())); errorCallBack.done(newFile, true); } catch (Exception e) { LOG.warn("failed to rename file in cloud connection", e); errorCallBack.done(newFile, false); } } else if (oldFile.isBoxFile()) { CloudStorage cloudStorageBox = dataUtils.getAccount(OpenMode.BOX); try { cloudStorageBox.move( CloudUtil.stripPath(OpenMode.BOX, oldFile.getPath()), CloudUtil.stripPath(OpenMode.BOX, newFile.getPath())); errorCallBack.done(newFile, true); } catch (Exception e) { LOG.warn("failed to rename file in cloud connection", e); errorCallBack.done(newFile, false); } } else if (oldFile.isOneDriveFile()) { CloudStorage cloudStorageOneDrive = dataUtils.getAccount(OpenMode.ONEDRIVE); try { cloudStorageOneDrive.move( CloudUtil.stripPath(OpenMode.ONEDRIVE, oldFile.getPath()), CloudUtil.stripPath(OpenMode.ONEDRIVE, newFile.getPath())); errorCallBack.done(newFile, true); } catch (Exception e) { LOG.warn("failed to rename file in cloud connection", e); errorCallBack.done(newFile, false); } } else if (oldFile.isGoogleDriveFile()) { CloudStorage cloudStorageGdrive = dataUtils.getAccount(OpenMode.GDRIVE); try { cloudStorageGdrive.move( CloudUtil.stripPath(OpenMode.GDRIVE, oldFile.getPath()), CloudUtil.stripPath(OpenMode.GDRIVE, newFile.getPath())); errorCallBack.done(newFile, true); } catch (Exception e) { LOG.warn("failed to rename file in cloud connection", e); errorCallBack.done(newFile, false); } } else if (oldFile.isOtgFile()) { if (checkOtgNewFileExists(newFile, context)) { errorCallBack.exists(newFile); return null; } safRenameFile.apply(OTGUtil.getDocumentFile(oldFile.getPath(), context, false)); return null; } else if (oldFile.isDocumentFile()) { if (checkDocumentFileNewFileExists(newFile, context)) { errorCallBack.exists(newFile); return null; } safRenameFile.apply( OTGUtil.getDocumentFile( oldFile.getPath(), SafRootHolder.getUriRoot(), context, OpenMode.DOCUMENT_FILE, false)); return null; } else { File file = new File(oldFile.getPath()); if (oldFile.getMode() == OpenMode.FILE) { int mode = checkFolder(file.getParentFile(), context); if (mode == 2) { errorCallBack.launchSAF(oldFile, newFile); } } boolean result; if (isCaseSensitiveRename) { result = localDoubleRename(oldFile, newFile); } else { result = localRename(oldFile, newFile); } errorCallBack.done(newFile, result); } return null; } @Override protected void onPostExecute(Void aVoid) { super.onPostExecute(aVoid); if (newFile != null && oldFile != null) { HybridFile[] hybridFiles = {newFile, oldFile}; MediaConnectionUtils.scanFile(context, hybridFiles); } } }.executeOnExecutor(executor); }
@Test public void testRename() throws InterruptedException { File oldFolder = new File(storageRoot, "test1"); HybridFile oldFolderHF = new HybridFile(OpenMode.FILE, oldFolder.getAbsolutePath()); File newFolder = new File(storageRoot, "test2"); HybridFile newFolderHF = new HybridFile(OpenMode.FILE, newFolder.getAbsolutePath()); CountDownLatch waiter1 = new CountDownLatch(1); Operations.mkdir( newFolderHF, oldFolderHF, ApplicationProvider.getApplicationContext(), false, new AbstractErrorCallback() { @Override public void done(HybridFile hFile, boolean b) { waiter1.countDown(); } }); waiter1.await(); assertTrue(oldFolder.exists()); CountDownLatch waiter2 = new CountDownLatch(1); Operations.rename( oldFolderHF, newFolderHF, false, ApplicationProvider.getApplicationContext(), new AbstractErrorCallback() { @Override public void done(HybridFile hFile, boolean b) { waiter2.countDown(); } }); waiter2.await(); assertFalse(oldFolder.exists()); assertTrue(newFolder.exists()); }
@Override public void createOrUpdate(final String path, final Object data) { zkClient.createOrUpdate(path, data, CreateMode.PERSISTENT); }
@Test public void testOnSelectorChangedCreate() { SelectorData selectorData = SelectorData.builder().id(MOCK_ID).name(MOCK_NAME).pluginName(MOCK_PLUGIN_NAME).build(); String selectorRealPath = DefaultPathConstants.buildSelectorRealPath(selectorData.getPluginName(), selectorData.getId()); zookeeperDataChangedListener.onSelectorChanged(ImmutableList.of(selectorData), DataEventTypeEnum.CREATE); verify(zkClient, times(1)).createOrUpdate(selectorRealPath, selectorData, CreateMode.PERSISTENT); }
@Override public Class module() { return ClusterModule.class; }
@Test public void module() { assertEquals(ClusterModule.class, provider.module()); }
@Override public RexNode visit(CallExpression call) { boolean isBatchMode = unwrapContext(relBuilder).isBatchMode(); for (CallExpressionConvertRule rule : getFunctionConvertChain(isBatchMode)) { Optional<RexNode> converted = rule.convert(call, newFunctionContext()); if (converted.isPresent()) { return converted.get(); } } throw new RuntimeException("Unknown call expression: " + call); }
@Test void testBinaryLiteral() { RexNode rex = converter.visit(valueLiteral(new byte[] {1, 2, 3}, DataTypes.BINARY(4).notNull())); assertThat(((RexLiteral) rex).getValueAs(byte[].class)).isEqualTo(new byte[] {1, 2, 3, 0}); assertThat(rex.getType().getSqlTypeName()).isEqualTo(SqlTypeName.BINARY); assertThat(rex.getType().getPrecision()).isEqualTo(4); }
public static <T> Distinct<T> create() { return new Distinct<>(); }
@Test @Category(NeedsRunner.class) public void testDistinctEmpty() { List<String> strings = Arrays.asList(); PCollection<String> input = p.apply(Create.of(strings).withCoder(StringUtf8Coder.of())); PCollection<String> output = input.apply(Distinct.create()); PAssert.that(output).empty(); p.run(); }
void handleJobLevelCheckpointException( CheckpointProperties checkpointProperties, CheckpointException exception, long checkpointId) { if (!checkpointProperties.isSavepoint()) { checkFailureAgainstCounter(exception, checkpointId, failureCallback::failJob); } }
@Test void testContinuousFailure() { TestFailJobCallback callback = new TestFailJobCallback(); CheckpointFailureManager failureManager = new CheckpointFailureManager(2, callback); CheckpointProperties checkpointProperties = forCheckpoint(NEVER_RETAIN_AFTER_TERMINATION); failureManager.handleJobLevelCheckpointException( checkpointProperties, new CheckpointException(CheckpointFailureReason.CHECKPOINT_DECLINED), 1); failureManager.handleJobLevelCheckpointException( checkpointProperties, new CheckpointException(CheckpointFailureReason.CHECKPOINT_DECLINED), 2); // ignore this failureManager.handleJobLevelCheckpointException( checkpointProperties, new CheckpointException(CheckpointFailureReason.JOB_FAILOVER_REGION), 3); failureManager.handleJobLevelCheckpointException( checkpointProperties, new CheckpointException(CheckpointFailureReason.CHECKPOINT_DECLINED), 4); assertThat(callback.getInvokeCounter()).isOne(); }
public static UriTemplate create(String template, Charset charset) { return new UriTemplate(template, true, charset); }
@Test void encodeSlashes() { String template = "https://www.example.com/{path}"; UriTemplate uriTemplate = UriTemplate.create(template, Util.UTF_8); Map<String, Object> variables = new LinkedHashMap<>(); variables.put("path", "me/you/first"); String encoded = uriTemplate.expand(variables); assertThat(encoded).isEqualToIgnoringCase("https://www.example.com/me%2Fyou%2Ffirst"); }
@Override public void execute(ComputationStep.Context context) { new PathAwareCrawler<>( FormulaExecutorComponentVisitor.newBuilder(metricRepository, measureRepository) .buildFor(List.of(duplicationFormula))) .visit(treeRootHolder.getRoot()); }
@Test public void does_not_compute_new_lines_when_no_changeset() { underTest.execute(new TestComputationStepContext()); assertNoRawMeasures(NEW_LINES_KEY); }
public static RowCoder of(Schema schema) { return new RowCoder(schema); }
@Test public void testIterables() throws Exception { Schema schema = Schema.builder().addIterableField("f_iter", FieldType.STRING).build(); Row row = Row.withSchema(schema).addIterable(ImmutableList.of("one", "two", "three", "four")).build(); CoderProperties.coderDecodeEncodeEqual(RowCoder.of(schema), row); }
public static <T> Object create(Class<T> iface, T implementation, RetryPolicy retryPolicy) { return RetryProxy.create(iface, new DefaultFailoverProxyProvider<T>(iface, implementation), retryPolicy); }
@Test public void testRetryForeverWithFixedSleep() throws UnreliableException { UnreliableInterface unreliable = (UnreliableInterface) RetryProxy.create( UnreliableInterface.class, unreliableImpl, retryForeverWithFixedSleep(1, TimeUnit.MILLISECONDS)); unreliable.alwaysSucceeds(); unreliable.failsOnceThenSucceeds(); unreliable.failsTenTimesThenSucceeds(); }
public static Ip4Prefix valueOf(int address, int prefixLength) { return new Ip4Prefix(Ip4Address.valueOf(address), prefixLength); }
@Test(expected = IllegalArgumentException.class) public void testInvalidValueOfAddressNegativePrefixLengthIPv4() { Ip4Address ipAddress; Ip4Prefix ipPrefix; ipAddress = Ip4Address.valueOf("1.2.3.4"); ipPrefix = Ip4Prefix.valueOf(ipAddress, -1); }
static String generateJarDir(Configuration configuration) { return String.join( File.separator, new File(configuration.get(ArtifactFetchOptions.BASE_DIR)).getAbsolutePath(), configuration.get(KubernetesConfigOptions.NAMESPACE), configuration.get(KubernetesConfigOptions.CLUSTER_ID)); }
@Test public void testGenerateJarDir() { String baseDir = KubernetesApplicationClusterEntrypoint.generateJarDir(configuration); String expectedDir = String.join( File.separator, new String[] {tempDir.toString(), TEST_NAMESPACE, TEST_CLUSTER_ID}); Assertions.assertEquals(expectedDir, baseDir); }
@Override public HiveMetastoreClient createMetastoreClient(Optional<String> token) throws TException { List<HostAndPort> metastores = new ArrayList<>(addresses); if (metastoreLoadBalancingEnabled) { Collections.shuffle(metastores); } TException lastException = null; for (HostAndPort metastore : metastores) { try { HiveMetastoreClient client = clientFactory.create(metastore, token); if (!isNullOrEmpty(metastoreUsername)) { client.setUGI(metastoreUsername); } return client; } catch (TException e) { lastException = e; } } throw new TException("Failed connecting to Hive metastore: " + addresses, lastException); }
@Test public void testFallbackHiveMetastoreWithHiveUser() throws TException { HiveCluster cluster = createHiveCluster(CONFIG_WITH_FALLBACK_WITH_USER, asList(null, null, FALLBACK_CLIENT)); assertEquals(cluster.createMetastoreClient(Optional.empty()), FALLBACK_CLIENT); }
public static Future<Void> maybeUpdateMetadataVersion( Reconciliation reconciliation, Vertx vertx, TlsPemIdentity coTlsPemIdentity, AdminClientProvider adminClientProvider, String desiredMetadataVersion, KafkaStatus status ) { String bootstrapHostname = KafkaResources.bootstrapServiceName(reconciliation.name()) + "." + reconciliation.namespace() + ".svc:" + KafkaCluster.REPLICATION_PORT; LOGGER.debugCr(reconciliation, "Creating AdminClient for Kafka cluster in namespace {}", reconciliation.namespace()); Admin kafkaAdmin = adminClientProvider.createAdminClient(bootstrapHostname, coTlsPemIdentity.pemTrustSet(), coTlsPemIdentity.pemAuthIdentity()); Promise<Void> updatePromise = Promise.promise(); maybeUpdateMetadataVersion(reconciliation, vertx, kafkaAdmin, desiredMetadataVersion, status) .onComplete(res -> { // Close the Admin client and return the original result LOGGER.debugCr(reconciliation, "Closing the Kafka Admin API connection"); kafkaAdmin.close(); updatePromise.handle(res); }); return updatePromise.future(); }
@Test public void testUnexpectedError(VertxTestContext context) { // Mock the Admin client Admin mockAdminClient = mock(Admin.class); // Mock describing the current metadata version @SuppressWarnings(value = "unchecked") KafkaFuture<FeatureMetadata> kf = mock(KafkaFuture.class); when(kf.whenComplete(any())).thenAnswer(i -> { KafkaFuture.BiConsumer<FeatureMetadata, Throwable> action = i.getArgument(0); action.accept(null, new RuntimeException("Test error ...")); return null; }); DescribeFeaturesResult dfr = mock(DescribeFeaturesResult.class); when(dfr.featureMetadata()).thenReturn(kf); when(mockAdminClient.describeFeatures()).thenReturn(dfr); // Mock the Admin client provider AdminClientProvider mockAdminClientProvider = mockAdminClientProvider(mockAdminClient); // Dummy KafkaStatus to check the values from KafkaStatus status = new KafkaStatus(); Checkpoint checkpoint = context.checkpoint(); KRaftMetadataManager.maybeUpdateMetadataVersion(Reconciliation.DUMMY_RECONCILIATION, vertx, DUMMY_IDENTITY, mockAdminClientProvider, "3.5", status) .onComplete(context.failing(s -> { assertThat(s, instanceOf(RuntimeException.class)); assertThat(s.getMessage(), is("Test error ...")); assertThat(status.getKafkaMetadataVersion(), is(nullValue())); verify(mockAdminClient, never()).updateFeatures(any(), any()); verify(mockAdminClient, times(1)).describeFeatures(); checkpoint.flag(); })); }
@Override protected final Iterator<C> containerIterator(int partitionId) { return new ContainerIterator(partitionId); }
@Test public void testContainerIterator() { TestNamedContainerCollector collector = new TestNamedContainerCollector(nodeEngine, true, true); assertEquals(1, collector.containers.size()); int partitionId = collector.getContainerPartitionId("myContainer"); Iterator<Object> iterator = collector.containerIterator(partitionId); assertInstanceOf(AbstractNamedContainerCollector.ContainerIterator.class, iterator); assertTrue("Expected next elements in iterator", iterator.hasNext()); assertNotNull("", iterator.next()); // iterator.remove() should remove the current container iterator.remove(); assertEquals(0, collector.containers.size()); }
public static Write write() { return new Write(null /* Configuration */, ""); }
@Test public void testWritingFailsBadElement() throws Exception { final String table = tmpTable.getName(); final String key = "KEY"; createTable(table); p.apply(Create.of(makeBadMutation(key))) .apply(HBaseIO.write().withConfiguration(conf).withTableId(table)); thrown.expect(Pipeline.PipelineExecutionException.class); thrown.expectCause(Matchers.instanceOf(IllegalArgumentException.class)); thrown.expectMessage("No columns to insert"); p.run().waitUntilFinish(); }
public static int length(final CharSequence cs) { return cs == null ? 0 : cs.length(); }
@Test void testLength() throws Exception { assertThat(StringUtils.length(null), equalTo(0)); assertThat(StringUtils.length("abc"), equalTo(3)); }
@Override public ExecutorService newCachedThreadPool(ThreadFactory threadFactory) { return new InstrumentedExecutorService(threadPoolFactory.newCachedThreadPool(threadFactory), metricRegistry); }
@Test public void testNewCacheThreadPool() { final ExecutorService executorService = instrumentedThreadPoolFactory.newCachedThreadPool(threadFactory); assertThat(executorService, is(notNullValue())); assertThat(executorService, is(instanceOf(InstrumentedExecutorService.class))); inOrder.verify(registry, times(1)).meter(anyString()); inOrder.verify(registry, times(1)).counter(anyString()); inOrder.verify(registry, times(1)).meter(anyString()); inOrder.verify(registry, times(2)).timer(anyString()); }
public static Builder newBuilder() { return new Builder(); }
@Test void noRulesOk() { MessagingRuleSampler.newBuilder().build(); }
public static <T> ArrayList<T> ofArrayList(T e) { ArrayList<T> list = new ArrayList(1); list.add(e); return list; }
@Test public void testOfArrayList() { assertEquals(Collections.ofArrayList(1, 2), new ArrayList<>(ImmutableList.of(1, 2))); }
@Override @MethodNotAvailable public CompletionStage<V> getAsync(K key) { throw new MethodNotAvailableException(); }
@Test(expected = MethodNotAvailableException.class) public void testGetAsync() { adapter.getAsync(42); }
public static RestSettingBuilder head() { return all(HttpMethod.HEAD); }
@Test public void should_not_head_with_unknown_id() throws Exception { server.resource("targets", head("1").response(header("ETag", "Moco")) ); running(server, () -> { HttpResponse httpResponse = helper.headForResponse(remoteUrl("/targets/2")); assertThat(httpResponse.getCode(), is(404)); }); }
public static HashingAlgorithm getHashingAlgorithm(String password) { if (password.startsWith("$2y")) { if (getBCryptCost(password) < BCRYPT_MIN_COST) { throw new HashedPasswordException("Minimum cost of BCrypt password must be " + BCRYPT_MIN_COST); } return HashingAlgorithm.BCRYPT; } if (password.contains(":")) { if (getPBKDF2Iterations(password) < PBKDF2_MIN_ITERATIONS) { throw new HashedPasswordException("Minimum iterations of PBKDF2 password must be " + PBKDF2_MIN_ITERATIONS); } return HashingAlgorithm.PBKDF2; } throw new HashedPasswordException("Password hashing algorithm cannot be determined"); }
@Test public void testInvalidPasswordFormatPBKDF2() { // PBKDF2 password with iteration count of 100 String password = "100:5b4240333032306164:f38d165fce8ce42f59d366139ef5d9e1ca1247f0e06e503ee1a611dd9ec40876bb5edb8409f5abe5504aab6628e70cfb3d3a18e99d70357d295002c3d0a308a0"; assertThatThrownBy(() -> getHashingAlgorithm(password)) .isInstanceOf(HashedPasswordException.class) .hasMessage("Minimum iterations of PBKDF2 password must be 1000"); }
public static Filter<Member> parseMemberFilter(String providerFilter) { if (providerFilter == null) { return AlwaysApplyFilter.newInstance(); } providerFilter = providerFilter.trim(); if (providerFilter.startsWith(HAS_ATTRIBUTE_PREFIX)) { providerFilter = providerFilter.substring(HAS_ATTRIBUTE_PREFIX.length()); providerFilter = providerFilter.trim(); return new MemberAttributeFilter(providerFilter); } else if (providerFilter.isEmpty()) { return AlwaysApplyFilter.newInstance(); } else { throw new IllegalArgumentException("Unknown provider filter: " + providerFilter); } }
@Test public void givenMemberAttributeFilterIsUsed_whenMemberAttributeIsNotPresent_thenFilterDoesNotMatch() { Filter<Member> memberFilter = MemberProviderFilterParser.parseMemberFilter("HAS_ATTRIBUTE:foo"); Map<String, String> attributes = Map.of( "bar", "other" ); Member mockMember = createMockMemberWithAttributes(attributes); assertFalse(memberFilter.accept(mockMember)); }
@Override public void captureData(PrintWriter writer) { writer.println("<html>"); writer.println("<h1>Channelz</h1>"); appendTopChannels(writer); writer.println("</html>"); }
@Test public void testRendersAllChannels() throws UnsupportedEncodingException { String windmill1 = "WindmillHost1"; String windmill2 = "WindmillHost2"; String nonWindmill1 = "NonWindmillHost1"; String someOtherHost1 = "SomeOtherHost2"; ManagedChannel[] unusedChannels = new ManagedChannel[] { InProcessChannelBuilder.forName(windmill1).build(), InProcessChannelBuilder.forName(windmill2).build(), InProcessChannelBuilder.forName(nonWindmill1).build(), InProcessChannelBuilder.forName(someOtherHost1).build() }; DataflowWorkerHarnessOptions options = PipelineOptionsFactory.create().as(DataflowWorkerHarnessOptions.class); FakeWindmillServer fakeWindmillServer = new FakeWindmillServer(new ErrorCollector(), s -> Optional.empty()); fakeWindmillServer.setWindmillServiceEndpoints( ImmutableSet.of(HostAndPort.fromHost(windmill1), HostAndPort.fromHost(windmill2))); options.setChannelzShowOnlyWindmillServiceChannels(false); ChannelzServlet channelzServlet = new ChannelzServlet("/channelz", options, fakeWindmillServer::getWindmillServiceEndpoints); StringWriter stringWriter = new StringWriter(); PrintWriter writer = new PrintWriter(stringWriter); channelzServlet.captureData(writer); writer.flush(); String channelzData = stringWriter.toString(); assertTrue(channelzData.contains(windmill1)); assertTrue(channelzData.contains(windmill2)); assertTrue(channelzData.contains(nonWindmill1)); assertTrue(channelzData.contains(someOtherHost1)); }
@Override protected String getAnalyzerEnabledSettingKey() { return Settings.KEYS.ANALYZER_FALSE_POSITIVE_ENABLED; }
@Test public void testGetAnalyzerEnabledSettingKey() { FalsePositiveAnalyzer instance = new FalsePositiveAnalyzer(); String expResult = Settings.KEYS.ANALYZER_FALSE_POSITIVE_ENABLED; String result = instance.getAnalyzerEnabledSettingKey(); assertEquals(expResult, result); }
public static <T> T visit(final Schema start, final SchemaVisitor<T> visitor) { // Set of Visited Schemas IdentityHashMap<Schema, Schema> visited = new IdentityHashMap<>(); // Stack that contains the Schemas to process and afterVisitNonTerminal // functions. // Deque<Either<Schema, Supplier<SchemaVisitorAction>>> // Using Either<...> has a cost we want to avoid... Deque<Object> dq = new ArrayDeque<>(); dq.push(start); Object current; while ((current = dq.poll()) != null) { if (current instanceof Supplier) { // We are executing a non-terminal post visit. SchemaVisitor.SchemaVisitorAction action = ((Supplier<SchemaVisitor.SchemaVisitorAction>) current).get(); switch (action) { case CONTINUE: break; case SKIP_SIBLINGS: while (dq.peek() instanceof Schema) { dq.remove(); } break; case TERMINATE: return visitor.get(); case SKIP_SUBTREE: default: throw new UnsupportedOperationException("Invalid action " + action); } } else { Schema schema = (Schema) current; boolean terminate; if (visited.containsKey(schema)) { terminate = visitTerminal(visitor, schema, dq); } else { Schema.Type type = schema.getType(); switch (type) { case ARRAY: terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getElementType())); visited.put(schema, schema); break; case RECORD: terminate = visitNonTerminal(visitor, schema, dq, () -> schema.getFields().stream().map(Field::schema) .collect(Collectors.toCollection(ArrayDeque::new)).descendingIterator()); visited.put(schema, schema); break; case UNION: terminate = visitNonTerminal(visitor, schema, dq, schema.getTypes()); visited.put(schema, schema); break; case MAP: terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getValueType())); visited.put(schema, schema); break; default: terminate = visitTerminal(visitor, schema, dq); break; } } if (terminate) { return visitor.get(); } } } return visitor.get(); }
@Test public void testVisit7() { String s7 = "{\"type\": \"record\", \"name\": \"c1\", \"fields\": [" + "{\"name\": \"f1\", \"type\": {\"type\": \"record\", \"name\": \"css2\", \"fields\": " + "[{\"name\": \"f11\", \"type\": \"int\"}]}}," + "{\"name\": \"f2\", \"type\": \"long\"}" + "]}"; Assert.assertEquals("c1.css2.\"int\"!!", Schemas.visit(new Schema.Parser().parse(s7), new TestVisitor())); }
public static Compressor getCompressor(String alias) { // 工厂模式 托管给ExtensionLoader return EXTENSION_LOADER.getExtension(alias); }
@Test public void getCompressor() throws Exception { Compressor compressor = CompressorFactory.getCompressor((byte) 113); Assert.assertNotNull(compressor); Assert.assertEquals(compressor.getClass(), TestCompressor.class); }
public ConfigurationPropertiesBean() { }
@Test public void testConfigurationPropertiesBean() { // make sure the values that go in come back out unchanged ConfigurationPropertiesBean bean = new ConfigurationPropertiesBean(); String iss = "http://localhost:8080/openid-connect-server/"; String title = "OpenID Connect Server"; String logoUrl = "/images/logo.png"; bean.setIssuer(iss); bean.setTopbarTitle(title); bean.setLogoImageUrl(logoUrl); bean.setForceHttps(true); assertEquals(iss, bean.getIssuer()); assertEquals(title, bean.getTopbarTitle()); assertEquals(logoUrl, bean.getLogoImageUrl()); assertEquals(true, bean.isForceHttps()); }
public static Locale localeFromString(String s) { if (!s.contains(LOBAR)) { return new Locale(s); } String[] items = s.split(LOBAR); return new Locale(items[0], items[1]); }
@Test public void localeFromStringKoKR() { title("localeFromStringKoKR"); locale = LionUtils.localeFromString("ko_KR"); checkLanguageCountry(locale, "ko", "KR"); }
@Override public int getIdleTimeout() { return clientConfig.getPropertyAsInteger( IClientConfigKey.Keys.ConnIdleEvictTimeMilliSeconds, DEFAULT_IDLE_TIMEOUT); }
@Test void testGetIdleTimeoutOverride() { clientConfig.set(IClientConfigKey.Keys.ConnIdleEvictTimeMilliSeconds, 70000); assertEquals(70000, connectionPoolConfig.getIdleTimeout()); }
@Override public Map<Long, CIMServerResVO> loadRouteRelated() { Map<Long, CIMServerResVO> routes = new HashMap<>(64); RedisConnection connection = redisTemplate.getConnectionFactory().getConnection(); ScanOptions options = ScanOptions.scanOptions() .match(ROUTE_PREFIX + "*") .build(); Cursor<byte[]> scan = connection.scan(options); while (scan.hasNext()) { byte[] next = scan.next(); String key = new String(next, StandardCharsets.UTF_8); log.info("key={}", key); parseServerInfo(routes, key); } scan.close(); return routes; }
@Test public void loadRouteRelated() throws Exception { for (int i = 0; i < 100; i++) { Map<Long, CIMServerResVO> longCIMServerResVOMap = accountService.loadRouteRelated(); log.info("longCIMServerResVOMap={},cun={}" , JSON.toJSONString(longCIMServerResVOMap),i); } TimeUnit.SECONDS.sleep(10); }
public static <T> List<List<T>> diffList(Collection<T> oldList, Collection<T> newList, BiFunction<T, T, Boolean> sameFunc) { List<T> createList = new LinkedList<>(newList); // 默认都认为是新增的,后续会进行移除 List<T> updateList = new ArrayList<>(); List<T> deleteList = new ArrayList<>(); // 通过以 oldList 为主遍历,找出 updateList 和 deleteList for (T oldObj : oldList) { // 1. 寻找是否有匹配的 T foundObj = null; for (Iterator<T> iterator = createList.iterator(); iterator.hasNext(); ) { T newObj = iterator.next(); // 1.1 不匹配,则直接跳过 if (!sameFunc.apply(oldObj, newObj)) { continue; } // 1.2 匹配,则移除,并结束寻找 iterator.remove(); foundObj = newObj; break; } // 2. 匹配添加到 updateList;不匹配则添加到 deleteList 中 if (foundObj != null) { updateList.add(foundObj); } else { deleteList.add(oldObj); } } return asList(createList, updateList, deleteList); }
@Test public void testDiffList() { // 准备参数 Collection<Dog> oldList = Arrays.asList( new Dog(1, "花花", "hh"), new Dog(2, "旺财", "wc") ); Collection<Dog> newList = Arrays.asList( new Dog(null, "花花2", "hh"), new Dog(null, "小白", "xb") ); BiFunction<Dog, Dog, Boolean> sameFunc = (oldObj, newObj) -> { boolean same = oldObj.getCode().equals(newObj.getCode()); // 如果相等的情况下,需要设置下 id,后续好更新 if (same) { newObj.setId(oldObj.getId()); } return same; }; // 调用 List<List<Dog>> result = CollectionUtils.diffList(oldList, newList, sameFunc); // 断言 assertEquals(result.size(), 3); // 断言 create assertEquals(result.get(0).size(), 1); assertEquals(result.get(0).get(0), new Dog(null, "小白", "xb")); // 断言 update assertEquals(result.get(1).size(), 1); assertEquals(result.get(1).get(0), new Dog(1, "花花2", "hh")); // 断言 delete assertEquals(result.get(2).size(), 1); assertEquals(result.get(2).get(0), new Dog(2, "旺财", "wc")); }
@LiteralParameters("x") @ScalarOperator(NOT_EQUAL) @SqlType(StandardTypes.BOOLEAN) @SqlNullable public static Boolean notEqual(@SqlType("varchar(x)") Slice left, @SqlType("varchar(x)") Slice right) { return !left.equals(right); }
@Test public void testNotEqual() { assertFunction("'foo' <> 'foo'", BOOLEAN, false); assertFunction("'foo' <> 'bar'", BOOLEAN, true); assertFunction("'bar' <> 'foo'", BOOLEAN, true); assertFunction("'bar' <> 'bar'", BOOLEAN, false); }
@Override public EvaluatedQualityGate evaluate(QualityGate gate, Measures measures, Configuration configuration) { EvaluatedQualityGate.Builder result = EvaluatedQualityGate.newBuilder() .setQualityGate(gate); boolean ignoreSmallChanges = configuration.getBoolean(CoreProperties.QUALITY_GATE_IGNORE_SMALL_CHANGES).orElse(true); boolean isSmallChangeset = ignoreSmallChanges && isSmallChangeset(measures); gate.getConditions().forEach(condition -> { String metricKey = condition.getMetricKey(); EvaluatedCondition evaluation = ConditionEvaluator.evaluate(condition, measures); if (isSmallChangeset && evaluation.getStatus() != EvaluationStatus.OK && METRICS_TO_IGNORE_ON_SMALL_CHANGESETS.contains(metricKey)) { result.addEvaluatedCondition(new EvaluatedCondition(evaluation.getCondition(), EvaluationStatus.OK, evaluation.getValue().orElse(null))); result.setIgnoredConditionsOnSmallChangeset(true); } else { result.addEvaluatedCondition(evaluation); } }); result.setStatus(overallStatusOf(result.getEvaluatedConditions())); return result.build(); }
@Test public void evaluate_is_OK_for_empty_qgate() { QualityGate gate = mock(QualityGate.class); QualityGateEvaluator.Measures measures = mock(QualityGateEvaluator.Measures.class); EvaluatedQualityGate evaluatedQualityGate = underTest.evaluate(gate, measures, configuration); assertThat(evaluatedQualityGate.getStatus()).isEqualTo(Metric.Level.OK); }
@Override @SuppressWarnings("unchecked") public <K> K getCurrentKey() { return (K) currentKeySupplier.get(); }
@Test void testGetCurrentKey() { final String key = "key"; DefaultStateManager stateManager = new DefaultStateManager( () -> key, ignore -> {}, new MockStreamingRuntimeContext(false, 1, 0), new MockOperatorStateStore()); assertThat((String) stateManager.getCurrentKey()).isEqualTo(key); }
@Override public VertexParallelismInformation getParallelismInfo(JobVertexID vertexId) { return Optional.ofNullable(vertexToParallelismInfo.get(vertexId)) .orElseThrow( () -> new IllegalStateException( String.format( "No parallelism information set for vertex %s", vertexId))); }
@Test void testNotSet() { DefaultVertexParallelismStore store = new DefaultVertexParallelismStore(); assertThatThrownBy(() -> store.getParallelismInfo(new JobVertexID())) .withFailMessage("No parallelism information set for vertex") .isInstanceOf(IllegalStateException.class); }
public void startAsync() { try { udfLoader.load(); ProcessingLogServerUtils.maybeCreateProcessingLogTopic( serviceContext.getTopicClient(), processingLogConfig, ksqlConfig); if (processingLogConfig.getBoolean(ProcessingLogConfig.STREAM_AUTO_CREATE)) { log.warn("processing log auto-create is enabled, but this is not supported " + "for headless mode."); } rocksDBConfigSetterHandler.accept(ksqlConfig); processesQueryFile(readQueriesFile(queriesFile)); showWelcomeMessage(); final Properties properties = new Properties(); ksqlConfig.originals().forEach((key, value) -> { if (nonNull(value)) { properties.put(key, value.toString()); } }); versionChecker.start(KsqlModuleType.SERVER, properties); } catch (final Exception e) { log.error("Failed to start KSQL Server with query file: " + queriesFile, e); throw e; } }
@Test public void shouldFailOnDropStatement() { // Given: givenQueryFileParsesTo( PreparedStatement.of("DROP Test", new DropStream(SOME_NAME, false, false)) ); // When: final Exception e = assertThrows( KsqlStatementException.class, () -> standaloneExecutor.startAsync() ); // Then: assertThat(e.getMessage(), containsString("Unsupported statement. " + "Only the following statements are supporting in standalone mode:\n" + "CREAETE STREAM AS SELECT\n" + "CREATE STREAM\n" + "CREATE TABLE\n" + "CREATE TABLE AS SELECT\n" + "INSERT INTO\n" + "REGISTER TYPE\n" + "SET\n" + "UNSET")); }
@Deprecated public static KsqlRestClient create( final String serverAddress, final Map<String, ?> localProps, final Map<String, String> clientProps, final Optional<BasicCredentials> creds ) { return create( serverAddress, localProps, clientProps, creds, Optional.empty(), (cprops, credz, lprops) -> new KsqlClient(cprops, credz, lprops, new HttpClientOptions(), Optional.of(new HttpClientOptions().setProtocolVersion(HttpVersion.HTTP_2))) ); }
@Test @SuppressWarnings("deprecation") public void shouldAllowCallingCreateWithoutNeedingACCloudApiKey() { // This is a backwards compatibility check final KsqlRestClient client = KsqlRestClient.create(SOME_SERVER_ADDRESS, LOCAL_PROPS, CLIENT_PROPS, Optional.empty()); assertThat(client, is(instanceOf(KsqlRestClient.class))); // Also with new signature final KsqlRestClient ccloudClient = KsqlRestClient.create(SOME_SERVER_ADDRESS, LOCAL_PROPS, CLIENT_PROPS, Optional.empty(), Optional.empty()); assertThat(ccloudClient, is(instanceOf(KsqlRestClient.class))); }
public static Expression create(final String value) { /* remove the start and end braces */ final String expression = stripBraces(value); if (expression == null || expression.isEmpty()) { throw new IllegalArgumentException("an expression is required."); } /* Check if the expression is too long */ if (expression.length() > MAX_EXPRESSION_LENGTH) { throw new IllegalArgumentException( "expression is too long. Max length: " + MAX_EXPRESSION_LENGTH); } /* create a new regular expression matcher for the expression */ String variableName = null; String variablePattern = null; String operator = null; Matcher matcher = EXPRESSION_PATTERN.matcher(value); if (matcher.matches()) { /* grab the operator */ operator = matcher.group(2).trim(); /* we have a valid variable expression, extract the name from the first group */ variableName = matcher.group(3).trim(); if (variableName.contains(":")) { /* split on the colon and ensure the size of parts array must be 2 */ String[] parts = variableName.split(":", 2); variableName = parts[0]; variablePattern = parts[1]; } /* look for nested expressions */ if (variableName.contains("{")) { /* nested, literal */ return null; } } /* check for an operator */ if (PATH_STYLE_OPERATOR.equalsIgnoreCase(operator)) { return new PathStyleExpression(variableName, variablePattern); } /* default to simple */ return SimpleExpression.isSimpleExpression(value) ? new SimpleExpression(variableName, variablePattern) : null; // Return null if it can't be validated as a Simple Expression -- Probably a Literal }
@Test void malformedExpression() { String[] malformedStrings = {"{:}", "{str1:}", "{str1:{:}", "{str1:{str2:}"}; for (String malformed : malformedStrings) { try { Expressions.create(malformed); } catch (Exception e) { assertThatObject(e).isNotInstanceOf(ArrayIndexOutOfBoundsException.class); } } }
private DeviceKeyId() { super(); }
@Test(expected = NullPointerException.class) public void testConstructionUsingNullId() { DeviceKeyId.deviceKeyId(null); }
@Override public ClientDetailsEntity saveNewClient(ClientDetailsEntity client) { if (client.getId() != null) { // if it's not null, it's already been saved, this is an error throw new IllegalArgumentException("Tried to save a new client with an existing ID: " + client.getId()); } if (client.getRegisteredRedirectUri() != null) { for (String uri : client.getRegisteredRedirectUri()) { if (blacklistedSiteService.isBlacklisted(uri)) { throw new IllegalArgumentException("Client URI is blacklisted: " + uri); } } } // assign a random clientid if it's empty // NOTE: don't assign a random client secret without asking, since public clients have no secret if (Strings.isNullOrEmpty(client.getClientId())) { client = generateClientId(client); } // make sure that clients with the "refresh_token" grant type have the "offline_access" scope, and vice versa ensureRefreshTokenConsistency(client); // make sure we don't have both a JWKS and a JWKS URI ensureKeyConsistency(client); // check consistency when using HEART mode checkHeartMode(client); // timestamp this to right now client.setCreatedAt(new Date()); // check the sector URI checkSectorIdentifierUri(client); ensureNoReservedScopes(client); ClientDetailsEntity c = clientRepository.saveClient(client); statsService.resetCache(); return c; }
@Test(expected = IllegalArgumentException.class) public void heartMode_clientcreds_invalidGrants() { Mockito.when(config.isHeartMode()).thenReturn(true); ClientDetailsEntity client = new ClientDetailsEntity(); Set<String> grantTypes = new LinkedHashSet<>(); grantTypes.add("client_credentials"); grantTypes.add("authorization_code"); grantTypes.add("implicit"); client.setGrantTypes(grantTypes); client.setTokenEndpointAuthMethod(AuthMethod.PRIVATE_KEY); client.setJwksUri("https://foo.bar/jwks"); service.saveNewClient(client); }
@Override public int size() { begin(); int size = transactionalMap.size(); commit(); return size; }
@Test public void testSize() { map.put(23, "foo"); map.put(42, "bar"); assertEquals(2, adapter.size()); }
public static boolean writeFile(File file, byte[] content, boolean append) { try (FileChannel fileChannel = new FileOutputStream(file, append).getChannel()) { ByteBuffer buffer = ByteBuffer.wrap(content); fileChannel.write(buffer); return true; } catch (IOException ioe) { if (ioe.getMessage() != null) { String errMsg = ioe.getMessage(); if (NO_SPACE_CN.equals(errMsg) || NO_SPACE_EN.equals(errMsg) || errMsg.contains(DISK_QUOTA_CN) || errMsg .contains(DISK_QUOTA_EN)) { LOGGER.warn("磁盘满,自杀退出"); System.exit(0); } } } return false; }
@Test void writeFile() { assertTrue(DiskUtils.writeFile(testFile, "unit test".getBytes(StandardCharsets.UTF_8), false)); assertEquals("unit test", DiskUtils.readFile(testFile)); }
public void setSecurityContextFactory(SecurityContextFactory securityContextFactory) { if (securityContextFactory == null) { throw new IllegalArgumentException("SecurityContextFactory argument cannot be null."); } this.securityContextFactory = securityContextFactory; }
@Test(expected = IllegalArgumentException.class) public void setNullSecurityContextFactory() { filter.setSecurityContextFactory(null); }
@Override public SortedSet<ZuulFilter<?, ?>> getFiltersByType(FilterType filterType) { return filtersByType.get(filterType); }
@Test void getFiltersByType() { StaticFilterLoader filterLoader = new StaticFilterLoader( factory, ImmutableSet.of(DummyFilter2.class, DummyFilter1.class, DummyFilter22.class)); SortedSet<ZuulFilter<?, ?>> filters = filterLoader.getFiltersByType(FilterType.INBOUND); Truth.assertThat(filters).hasSize(3); List<ZuulFilter<?, ?>> filterList = new ArrayList<>(filters); Truth.assertThat(filterList.get(0)).isInstanceOf(DummyFilter1.class); Truth.assertThat(filterList.get(1)).isInstanceOf(DummyFilter2.class); Truth.assertThat(filterList.get(2)).isInstanceOf(DummyFilter22.class); }
@Override public List<OptExpression> transform(OptExpression input, OptimizerContext context) { LogicalScanOperator scanOperator = (LogicalScanOperator) input.getOp(); ColumnRefSet requiredOutputColumns = context.getTaskContext().getRequiredColumns(); Set<ColumnRefOperator> scanColumns = scanOperator.getColRefToColumnMetaMap().keySet().stream().filter(requiredOutputColumns::contains) .collect(Collectors.toSet()); scanColumns.addAll(Utils.extractColumnRef(scanOperator.getPredicate())); checkPartitionColumnType(scanOperator, scanColumns, context); // make sure there is at least one materialized column in new output columns. // if not, we have to choose one materialized column from scan operator output columns // with the minimal cost. boolean canUseAnyColumn = false; // if this scan operator columns are all partitions columns(like iceberg table) // we have to take partition columns are materialized columns and read them from files. // And we can not use `canUseAnyColumn` optimization either. boolean allPartitionColumns = scanOperator.getPartitionColumns() .containsAll(scanOperator.getColRefToColumnMetaMap().values().stream().map(x -> x.getName()).collect( Collectors.toList())); if (!containsMaterializedColumn(scanOperator, scanColumns)) { List<ColumnRefOperator> preOutputColumns = new ArrayList<>(scanOperator.getColRefToColumnMetaMap().keySet()); List<ColumnRefOperator> outputColumns = preOutputColumns.stream() .filter(column -> !column.getType().getPrimitiveType().equals(PrimitiveType.UNKNOWN_TYPE)) .collect(Collectors.toList()); int smallestIndex = -1; int smallestColumnLength = Integer.MAX_VALUE; for (int index = 0; index < outputColumns.size(); ++index) { if (!allPartitionColumns && isPartitionColumn(scanOperator, outputColumns.get(index).getName())) { continue; } if (smallestIndex == -1) { smallestIndex = index; } Type columnType = outputColumns.get(index).getType(); if (columnType.isScalarType() && columnType.isSupported()) { int columnLength = columnType.getTypeSize(); if (columnLength < smallestColumnLength) { smallestIndex = index; smallestColumnLength = columnLength; } } } Preconditions.checkArgument(smallestIndex != -1); scanColumns.add(outputColumns.get(smallestIndex)); canUseAnyColumn = true; } if (allPartitionColumns || !context.getSessionVariable().isEnableCountStarOptimization()) { canUseAnyColumn = false; } if (scanOperator.getOutputColumns().equals(new ArrayList<>(scanColumns))) { scanOperator.getScanOptimzeOption().setCanUseAnyColumn(canUseAnyColumn); return Collections.emptyList(); } else { try { Class<? extends LogicalScanOperator> classType = scanOperator.getClass(); Map<ColumnRefOperator, Column> newColumnRefMap = scanColumns.stream() .collect(Collectors.toMap(identity(), scanOperator.getColRefToColumnMetaMap()::get)); LogicalScanOperator newScanOperator = classType.getConstructor(Table.class, Map.class, Map.class, long.class, ScalarOperator.class).newInstance( scanOperator.getTable(), newColumnRefMap, scanOperator.getColumnMetaToColRefMap(), scanOperator.getLimit(), scanOperator.getPredicate()); newScanOperator.getScanOptimzeOption().setCanUseAnyColumn(canUseAnyColumn); newScanOperator.setScanOperatorPredicates(scanOperator.getScanOperatorPredicates()); newScanOperator.setTableVersionRange(scanOperator.getTableVersionRange()); return Lists.newArrayList(new OptExpression(newScanOperator)); } catch (Exception e) { throw new StarRocksPlannerException(e.getMessage(), ErrorType.INTERNAL_ERROR); } } }
@Test public void transformIcebergWithAllPartitionColumns(@Mocked IcebergTable table, @Mocked OptimizerContext context, @Mocked TaskContext taskContext) { OptExpression scan = new OptExpression( new LogicalIcebergScanOperator(table, scanColumnMap, Maps.newHashMap(), -1, null, TableVersionRange.empty())); List<TaskContext> taskContextList = new ArrayList<>(); taskContextList.add(taskContext); ColumnRefSet requiredOutputColumns = new ColumnRefSet(new ArrayList<>()); new Expectations() { { context.getTaskContext(); minTimes = 0; result = taskContextList; taskContext.getRequiredColumns(); minTimes = 0; result = requiredOutputColumns; table.getPartitionColumnNames(); result = scanColumnMap.values().stream().map(x -> x.getName()).collect(Collectors.toList()); } }; List<OptExpression> list = icebergRule.transform(scan, context); LogicalIcebergScanOperator op = ((LogicalIcebergScanOperator) list.get(0).getOp()); Assert.assertEquals(op.getScanOptimzeOption().getCanUseAnyColumn(), false); }
public static <T> T checkNotNull(T argument, String name) { if (argument == null) { throw new NullPointerException(name + " can't be null"); } return argument; }
@Test(expected = NullPointerException.class) public void test_checkNotNull1_whenNull() { checkNotNull(null); }
protected DubboParam parserToDubboParam(final String rpcExt) { return GsonUtils.getInstance().fromJson(rpcExt, DubboParam.class); }
@Test public void parserToDubboParamTest() { DubboConfigCache dubboConfigCache = new DubboConfigCache(); String dubboJsonStr = "{\"group\":\"Group\",\"version\":\"2.6.5\",\"loadbalance\":\"random\",\"retries\":\"1\",\"timeout\":\"3000\",\"url\":\"http://192.168.55.113/dubbo\",\"sent\":\"true\",\"cluster\":\"failover\"}"; DubboParam dubboParam = dubboConfigCache.parserToDubboParam(dubboJsonStr); assertNotNull(dubboParam); assertEquals(dubboParam.getCluster(), "failover"); assertEquals(dubboParam.getTimeout(), 3000); assertEquals(dubboParam.getRetries(), 1); assertEquals(dubboParam.getUrl(), "http://192.168.55.113/dubbo"); assertEquals(dubboParam.getVersion(), "2.6.5"); assertEquals(dubboParam.getGroup(), "Group"); assertEquals(dubboParam.getLoadbalance(), "random"); assertEquals(dubboParam.getSent(), true); }
public static File getResourceAsFile(String resource) throws IOException { return new File(getResourceUrl(resource).getFile()); }
@Test void testGetResourceAsFile() throws IOException { File file = ResourceUtils.getResourceAsFile("classpath:resource_utils_test.properties"); assertNotNull(file); }
@Override public HttpResponseOutputStream<Node> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final CreateFileUploadResponse uploadResponse = upload.start(file, status); final String uploadUrl = uploadResponse.getUploadUrl(); if(StringUtils.isBlank(uploadUrl)) { throw new InteroperabilityException("Missing upload URL in server response"); } final String uploadToken = uploadResponse.getToken(); if(StringUtils.isBlank(uploadToken)) { throw new InteroperabilityException("Missing upload token in server response"); } final MultipartUploadTokenOutputStream proxy = new MultipartUploadTokenOutputStream(session, nodeid, file, status, uploadUrl); return new HttpResponseOutputStream<Node>(new MemorySegementingOutputStream(proxy, new HostPreferences(session.getHost()).getInteger("sds.upload.multipart.chunksize")), new SDSAttributesAdapter(session), status) { private final AtomicBoolean close = new AtomicBoolean(); private final AtomicReference<Node> node = new AtomicReference<>(); @Override public Node getStatus() { return node.get(); } @Override public void close() throws IOException { try { if(close.get()) { log.warn(String.format("Skip double close of stream %s", this)); return; } super.close(); node.set(upload.complete(file, uploadToken, status)); } catch(BackgroundException e) { throw new IOException(e); } finally { close.set(true); } } @Override protected void handleIOException(final IOException e) throws IOException { // Cancel upload on error reply try { upload.cancel(file, uploadToken); } catch(BackgroundException f) { log.warn(String.format("Failure %s cancelling upload for file %s with upload token %s after failure %s", f, file, uploadToken, e)); } throw e; } }; }
@Test public void testWriteUnknownContentLength() throws Exception { final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session); final Path room = new SDSDirectoryFeature(session, nodeid).mkdir( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final byte[] content = RandomUtils.nextBytes(32769); final TransferStatus status = new TransferStatus(); status.setLength(-1L); status.setMime("text/plain"); final Path test = new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final SDSMultipartWriteFeature writer = new SDSMultipartWriteFeature(session, nodeid); final StatusOutputStream<Node> out = writer.write(test, status, new DisabledConnectionCallback()); assertNotNull(out); new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out); assertNotNull(test.attributes().getVersionId()); assertEquals(content.length, out.getStatus().getSize(), 0L); assertTrue(new DefaultFindFeature(session).find(test)); final byte[] compare = new byte[content.length]; final InputStream stream = new SDSReadFeature(session, nodeid).read(test, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()); IOUtils.readFully(stream, compare); stream.close(); assertArrayEquals(content, compare); new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public StopReplicaResponse getErrorResponse(int throttleTimeMs, Throwable e) { Errors error = Errors.forException(e); StopReplicaResponseData data = new StopReplicaResponseData(); data.setErrorCode(error.code()); List<StopReplicaPartitionError> partitions = new ArrayList<>(); for (StopReplicaTopicState topic : topicStates()) { for (StopReplicaPartitionState partition : topic.partitionStates()) { partitions.add(new StopReplicaPartitionError() .setTopicName(topic.topicName()) .setPartitionIndex(partition.partitionIndex()) .setErrorCode(error.code())); } } data.setPartitionErrors(partitions); return new StopReplicaResponse(data); }
@Test public void testGetErrorResponse() { List<StopReplicaTopicState> topicStates = topicStates(true); Set<StopReplicaPartitionError> expectedPartitions = new HashSet<>(); for (StopReplicaTopicState topicState : topicStates) { for (StopReplicaPartitionState partitionState: topicState.partitionStates()) { expectedPartitions.add(new StopReplicaPartitionError() .setTopicName(topicState.topicName()) .setPartitionIndex(partitionState.partitionIndex()) .setErrorCode(Errors.CLUSTER_AUTHORIZATION_FAILED.code())); } } for (short version : STOP_REPLICA.allVersions()) { StopReplicaRequest.Builder builder = new StopReplicaRequest.Builder(version, 0, 0, 0L, false, topicStates); StopReplicaRequest request = builder.build(); StopReplicaResponse response = request.getErrorResponse(0, new ClusterAuthorizationException("Not authorized")); assertEquals(Errors.CLUSTER_AUTHORIZATION_FAILED, response.error()); assertEquals(expectedPartitions, new HashSet<>(response.partitionErrors())); } }
public ProfileAnalyzation analyze(final List<SegmentProfileAnalyzeQuery> queries) throws IOException { ProfileAnalyzation analyzation = new ProfileAnalyzation(); // query sequence range list SequenceSearch sequenceSearch = getAllSequenceRange(queries); if (sequenceSearch == null) { analyzation.setTip("Data not found"); return analyzation; } if (sequenceSearch.getTotalSequenceCount() > analyzeSnapshotMaxSize) { analyzation.setTip("Out of snapshot analyze limit, " + sequenceSearch.getTotalSequenceCount() + " snapshots found, but analysis first " + analyzeSnapshotMaxSize + " snapshots only."); } // query snapshots List<ProfileStack> stacks = sequenceSearch.getRanges().parallelStream().map(r -> { try { return getProfileThreadSnapshotQueryDAO().queryRecords(r.getSegmentId(), r.getMinSequence(), r.getMaxSequence()); } catch (IOException e) { LOGGER.warn(e.getMessage(), e); return Collections.<ProfileThreadSnapshotRecord>emptyList(); } }).flatMap(Collection::stream).map(ProfileStack::deserialize).distinct().collect(Collectors.toList()); // analyze final List<ProfileStackTree> trees = analyzeByStack(stacks); if (trees != null) { analyzation.getTrees().addAll(trees); } return analyzation; }
@Test public void testAnalyze() throws IOException { ProfileStackAnalyzeHolder holder = loadYaml("thread-snapshot.yml", ProfileStackAnalyzeHolder.class); for (ProfileStackAnalyze analyze : holder.getList()) { analyze.analyzeAndAssert(MAX_ANALYZE_COUNT); } }
@Subscribe public void handleDeadEvent(DeadEvent event) { LOGGER.debug("Received unhandled event of type <{}> from event bus <{}>", event.getEvent().getClass().getCanonicalName(), event.getSource().toString()); LOGGER.debug("Dead event contents: {}", event.getEvent()); }
@Test public void testEventListenerWithEventBus() { final EventBus eventBus = new EventBus("test"); final SimpleEvent event = new SimpleEvent("test"); final DeadEventLoggingListener listener = spy(new DeadEventLoggingListener()); eventBus.register(listener); eventBus.post(event); verify(listener, times(1)).handleDeadEvent(any(DeadEvent.class)); }
public static DescribeAclsRequest parse(ByteBuffer buffer, short version) { return new DescribeAclsRequest(new DescribeAclsRequestData(new ByteBufferAccessor(buffer), version), version); }
@Test public void shouldRoundTripLiteralV1() { final DescribeAclsRequest original = new DescribeAclsRequest.Builder(LITERAL_FILTER).build(V1); final DescribeAclsRequest result = DescribeAclsRequest.parse(original.serialize(), V1); assertRequestEquals(original, result); }
@Override public List<ResourceGroupRuntimeInfo> getResourceGroupInfo() throws ResourceManagerInconsistentException { InternalNode currentNode = internalNodeManager.getCurrentNode(); return cache.apply(currentNode); }
@Test public void testGetResourceGroupInfo() throws Exception { TestingResourceManagerClient resourceManagerClient = new TestingResourceManagerClient(); InMemoryNodeManager nodeManager = new InMemoryNodeManager(); ResourceManagerConfig resourceManagerConfig = new ResourceManagerConfig(); ResourceManagerResourceGroupService service = new ResourceManagerResourceGroupService((addressSelectionContext, headers) -> resourceManagerClient, resourceManagerConfig, nodeManager); List<ResourceGroupRuntimeInfo> resourceGroupInfos = service.getResourceGroupInfo(); assertNotNull(resourceGroupInfos); assertTrue(resourceGroupInfos.isEmpty()); assertEquals(resourceManagerClient.getResourceGroupInfoCalls("local"), 1); resourceManagerClient.setResourceGroupRuntimeInfos(ImmutableList.of(new ResourceGroupRuntimeInfo(new ResourceGroupId("global"), 1, 2, 3, 0, 1, Optional.empty()))); Thread.sleep(SECONDS.toMillis(2)); resourceGroupInfos = service.getResourceGroupInfo(); assertNotNull(resourceGroupInfos); assertEquals(resourceGroupInfos.size(), 1); assertEquals(resourceManagerClient.getResourceGroupInfoCalls("local"), 2); }
public static String convert15To18(String idCard) { StringBuilder idCard18; if (idCard.length() != CHINA_ID_MIN_LENGTH) { return null; } if (ReUtil.isMatch(PatternPool.NUMBERS, idCard)) { // 获取出生年月日 String birthday = idCard.substring(6, 12); Date birthDate = DateUtil.parse(birthday, "yyMMdd"); // 获取出生年(完全表现形式,如:2010) int sYear = DateUtil.year(birthDate); if (sYear > 2000) { // 2000年之后不存在15位身份证号,此处用于修复此问题的判断 sYear -= 100; } idCard18 = StrUtil.builder().append(idCard, 0, 6).append(sYear).append(idCard.substring(8)); // 获取校验位 char sVal = getCheckCode18(idCard18.toString()); idCard18.append(sVal); } else { return null; } return idCard18.toString(); }
@Test public void convert15To18Test() { String convert15To18 = IdcardUtil.convert15To18(ID_15); assertEquals("150102198807303035", convert15To18); String convert15To18Second = IdcardUtil.convert15To18("330102200403064"); assertEquals("33010219200403064X", convert15To18Second); }
public static boolean isConfluentCustomer(final String customerId) { return customerId != null && (CUSTOMER_PATTERN.matcher(customerId.toLowerCase(Locale.ROOT)).matches() || NEW_CUSTOMER_CASE_INSENSISTIVE_PATTERN.matcher(customerId).matches() || NEW_CUSTOMER_CASE_SENSISTIVE_PATTERN.matcher(customerId).matches()); }
@Test public void testValidNewCustomer() { String[] validNewCustomerIds = Stream.concat( CustomerIdExamples.VALID_CASE_SENSISTIVE_NEW_CUSTOMER_IDS.stream(), CustomerIdExamples.VALID_CASE_INSENSISTIVE_NEW_CUSTOMER_IDS.stream()). toArray(String[]::new); for (String validId : validNewCustomerIds) { assertTrue(validId + " is an invalid new customer identifier", BaseSupportConfig.isConfluentCustomer(validId)); } }
public static SchemaPairCompatibility checkReaderWriterCompatibility(final Schema reader, final Schema writer) { final SchemaCompatibilityResult compatibility = new ReaderWriterCompatibilityChecker().getCompatibility(reader, writer); final String message; switch (compatibility.getCompatibility()) { case INCOMPATIBLE: { message = String.format( "Data encoded using writer schema:%n%s%n" + "will or may fail to decode using reader schema:%n%s%n", writer.toString(true), reader.toString(true)); break; } case COMPATIBLE: { message = READER_WRITER_COMPATIBLE_MESSAGE; break; } default: throw new AvroRuntimeException("Unknown compatibility: " + compatibility); } return new SchemaPairCompatibility(compatibility, reader, writer, message); }
@Test void validateSchemaNewFieldWithDefault() { final List<Schema.Field> readerFields = list(new Schema.Field("oldfield1", INT_SCHEMA, null, null), new Schema.Field("newfield1", INT_SCHEMA, null, 42)); final Schema reader = Schema.createRecord(null, null, null, false, readerFields); final SchemaCompatibility.SchemaPairCompatibility expectedResult = new SchemaCompatibility.SchemaPairCompatibility( SchemaCompatibility.SchemaCompatibilityResult.compatible(), reader, WRITER_SCHEMA, SchemaCompatibility.READER_WRITER_COMPATIBLE_MESSAGE); // Test new field with default value. assertEquals(expectedResult, checkReaderWriterCompatibility(reader, WRITER_SCHEMA)); }