focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public OutputStream getOutputStream(boolean append) throws AccessDeniedException { final NSURL resolved; try { resolved = this.lock(this.exists()); if(null == resolved) { return super.getOutputStream(append); } } catch(LocalAccessDeniedException e) { log.warn(String.format("Failure obtaining lock for %s. %s", this, e)); return super.getOutputStream(append); } return new LockReleaseProxyOutputStream(super.getOutputStream(resolved.path(), append), resolved, append); }
@Test public void testWriteNewFile() throws Exception { final FinderLocal file = new FinderLocal(System.getProperty("java.io.tmpdir"), new AlphanumericRandomStringService().random()); final OutputStream out = file.getOutputStream(false); out.close(); file.delete(); }
@Override public boolean parseClientIpAndPort(Span span) { if (parseClientIpFromXForwardedFor(span)) return true; return span.remoteIpAndPort(delegate.getRemoteAddr(), delegate.getRemotePort()); }
@Test void parseClientIpAndPort_prefersXForwardedFor() { when(span.remoteIpAndPort("1.2.3.4", 0)).thenReturn(true); when(request.getHeader("X-Forwarded-For")).thenReturn("1.2.3.4"); wrapper.parseClientIpAndPort(span); verify(span).remoteIpAndPort("1.2.3.4", 0); verifyNoMoreInteractions(span); }
@Override public boolean add(E item) { checkNotNull(item, "Item to be added cannot be null."); return items.add(serializer.encode(item)); }
@Test public void testAdd() throws Exception { //Test of add for (int i = 0; i < 10; i++) { assertEquals("The size of the set is wrong.", i, set.size()); assertTrue("The first add of an element should be true.", set.add(i)); assertFalse("The second add of an element should be false.", set.add(i)); } }
public String doLayout(ILoggingEvent event) { if (!isStarted()) { return CoreConstants.EMPTY_STRING; } StringBuilder sb = new StringBuilder(); long timestamp = event.getTimeStamp(); sb.append(cachingDateFormatter.format(timestamp)); sb.append(" ["); sb.append(event.getThreadName()); sb.append("] "); sb.append(event.getLevel().toString()); sb.append(" "); sb.append(event.getLoggerName()); sb.append(" - "); sb.append(event.getFormattedMessage()); sb.append(CoreConstants.LINE_SEPARATOR); IThrowableProxy tp = event.getThrowableProxy(); if (tp != null) { String stackTrace = tpc.convert(event); sb.append(stackTrace); } return sb.toString(); }
@Test public void nullMessage() { LoggingEvent event = new LoggingEvent("", logger, Level.INFO, null, null, null); event.setTimeStamp(0); String result = layout.doLayout(event); String firstLine = result.split(CoreConstants.LINE_SEPARATOR)[0]; assertThat(firstLine, matchesPattern(TTLL_PREFIX_PATTERN + " - null")); }
public static void tripSuggestions( List<CharSequence> suggestions, final int maxSuggestions, List<CharSequence> stringsPool) { while (suggestions.size() > maxSuggestions) { removeSuggestion(suggestions, maxSuggestions, stringsPool); } }
@Test public void testTrimSuggestionsWhenThreeNeeded() { ArrayList<CharSequence> list = new ArrayList<>( Arrays.<CharSequence>asList("typed", "something", "duped", "duped", "something")); IMEUtil.tripSuggestions(list, 2, mStringPool); Assert.assertEquals(2, list.size()); Assert.assertEquals("typed", list.get(0)); Assert.assertEquals("something", list.get(1)); }
public void setSimpleLoadBalancerState(SimpleLoadBalancerState state) { _watcherManager.updateWatcher(state, this::doRegisterLoadBalancerState); doRegisterLoadBalancerState(state, null); state.register(new SimpleLoadBalancerStateListener() { @Override public void onStrategyAdded(String serviceName, String scheme, LoadBalancerStrategy strategy) { _watcherManager.updateWatcher(serviceName, scheme, strategy, (item, mode) -> doRegisterLoadBalancerStrategy(serviceName, scheme, item, mode)); doRegisterLoadBalancerStrategy(serviceName, scheme, strategy, null); } @Override public void onStrategyRemoved(String serviceName, String scheme, LoadBalancerStrategy strategy) { _watcherManager.removeWatcherForLoadBalancerStrategy(serviceName, scheme); _jmxManager.unregister(getLoadBalancerStrategyJmxName(serviceName, scheme, null)); } @Override public void onClientAdded(String clusterName, TrackerClient client) { // We currently think we can make this no-op as the info provided is not helpful // _jmxManager.checkReg(new DegraderControl((DegraderImpl) client.getDegrader(DefaultPartitionAccessor.DEFAULT_PARTITION_ID)), // _prefix + "-" + clusterName + "-" + client.getUri().toString().replace("://", "-") + "-TrackerClient-Degrader"); } @Override public void onClientRemoved(String clusterName, TrackerClient client) { // We currently think we can make this no-op as the info provided is not helpful // _jmxManager.unregister(_prefix + "-" + clusterName + "-" + client.getUri().toString().replace("://", "-") + "-TrackerClient-Degrader"); } @Override public void onClusterInfoUpdate(ClusterInfoItem clusterInfoItem) { if (clusterInfoItem != null && clusterInfoItem.getClusterPropertiesItem() != null && clusterInfoItem.getClusterPropertiesItem().getProperty() != null) { String clusterName = clusterInfoItem.getClusterPropertiesItem().getProperty().getClusterName(); _watcherManager.updateWatcher(clusterName, clusterInfoItem, (item, mode) -> doRegisterClusterInfo(clusterName, item, mode)); doRegisterClusterInfo(clusterName, clusterInfoItem, null); } } @Override public void onClusterInfoRemoval(ClusterInfoItem clusterInfoItem) { if (clusterInfoItem != null && clusterInfoItem.getClusterPropertiesItem() != null && clusterInfoItem.getClusterPropertiesItem().getProperty() != null) { String clusterName = clusterInfoItem.getClusterPropertiesItem().getProperty().getClusterName(); _watcherManager.removeWatcherForClusterInfoItem(clusterName); _jmxManager.unregister(getClusterInfoJmxName(clusterName, null)); } } @Override public void onServicePropertiesUpdate(LoadBalancerStateItem<ServiceProperties> serviceProperties) { if (serviceProperties != null && serviceProperties.getProperty() != null) { String serviceName = serviceProperties.getProperty().getServiceName(); _watcherManager.updateWatcher(serviceName, serviceProperties, (item, mode) -> doRegisterServiceProperties(serviceName, item, mode)); doRegisterServiceProperties(serviceName, serviceProperties, null); } } @Override public void onServicePropertiesRemoval(LoadBalancerStateItem<ServiceProperties> serviceProperties) { if (serviceProperties != null && serviceProperties.getProperty() != null) { String serviceName = serviceProperties.getProperty().getServiceName(); _watcherManager.removeWatcherForServiceProperties(serviceName); _jmxManager.unregister(getServicePropertiesJmxName(serviceName, null)); } } private void doRegisterLoadBalancerStrategy(String serviceName, String scheme, LoadBalancerStrategy strategy, @Nullable DualReadModeProvider.DualReadMode mode) { String jmxName = getLoadBalancerStrategyJmxName(serviceName, scheme, mode); _jmxManager.registerLoadBalancerStrategy(jmxName, strategy); } private void doRegisterClusterInfo(String clusterName, ClusterInfoItem clusterInfoItem, @Nullable DualReadModeProvider.DualReadMode mode) { String jmxName = getClusterInfoJmxName(clusterName, mode); _jmxManager.registerClusterInfo(jmxName, clusterInfoItem); } private void doRegisterServiceProperties(String serviceName, LoadBalancerStateItem<ServiceProperties> serviceProperties, @Nullable DualReadModeProvider.DualReadMode mode) { _jmxManager.registerServiceProperties(getServicePropertiesJmxName(serviceName, mode), serviceProperties); } private String getClusterInfoJmxName(String clusterName, @Nullable DualReadModeProvider.DualReadMode mode) { return String.format("%s%s-ClusterInfo", getClusterPrefixForLBPropertyJmxNames(clusterName, mode), clusterName); } private String getServicePropertiesJmxName(String serviceName, @Nullable DualReadModeProvider.DualReadMode mode) { return String.format("%s%s-ServiceProperties", getServicePrefixForLBPropertyJmxNames(serviceName, mode), serviceName); } private String getLoadBalancerStrategyJmxName(String serviceName, String scheme, @Nullable DualReadModeProvider.DualReadMode mode) { return String.format("%s%s-%s-LoadBalancerStrategy", getServicePrefixForLBPropertyJmxNames(serviceName, mode), serviceName, scheme); } }); }
@SuppressWarnings({"unchecked", "rawtypes"}) @Test public void testAddAndRemoveWatcherAtServicePropertiesUpdate() { D2ClientJmxManagerFixture fixture = new D2ClientJmxManagerFixture(); D2ClientJmxManager d2ClientJmxManager = fixture.getD2ClientJmxManager("Foo", D2ClientJmxManager.DiscoverySourceType.XDS, true); // Initial dual read mode is ZK only. DualReadStateManager dualReadStateManager = fixture._dualReadStateManager; dualReadStateManager.updateGlobal(DualReadModeProvider.DualReadMode.OLD_LB_ONLY); Mockito.doReturn(DualReadModeProvider.DualReadMode.OLD_LB_ONLY).when(dualReadStateManager).getGlobalDualReadMode(); Mockito.doReturn(DualReadModeProvider.DualReadMode.OLD_LB_ONLY).when(dualReadStateManager).getServiceDualReadMode(any()); Mockito.doReturn(DualReadModeProvider.DualReadMode.OLD_LB_ONLY).when(dualReadStateManager).getClusterDualReadMode(any()); d2ClientJmxManager.setSimpleLoadBalancerState(fixture._simpleLoadBalancerState); SimpleLoadBalancerState.SimpleLoadBalancerStateListener lbStateListener = fixture._simpleLoadBalancerStateListenerCaptor.getValue(); ArgumentCaptor<D2ClientJmxDualReadModeWatcherManager.D2ClientJmxDualReadModeWatcher> addWatcherCaptor = fixture._addWatcherCaptor; lbStateListener.onServicePropertiesUpdate(SERVICE_PROPERTIES_LOAD_BALANCER_STATE_ITEM); // Verify watcher is added with properties inside verify(dualReadStateManager).addServiceWatcher(eq("S_Foo"), addWatcherCaptor.capture()); D2ClientJmxDualReadModeWatcherManager.D2ClientJmxDualReadModeWatcher<LoadBalancerStateItem<ServiceProperties>> watcher = addWatcherCaptor.getValue(); Assert.assertEquals(watcher.getLatestJmxProperty(), SERVICE_PROPERTIES_LOAD_BALANCER_STATE_ITEM); lbStateListener.onServicePropertiesUpdate(UPDATED_SERVICE_PROPERTIES_LB_STATE_ITEM); // Verify watcher is not added again, and properties in the watcher is updated verify(dualReadStateManager, times(1)).addServiceWatcher(any(), any()); Assert.assertEquals(watcher.getLatestJmxProperty(), UPDATED_SERVICE_PROPERTIES_LB_STATE_ITEM); // Verify watch is removed lbStateListener.onServicePropertiesRemoval(UPDATED_SERVICE_PROPERTIES_LB_STATE_ITEM); verify(dualReadStateManager).removeServiceWatcher(eq("S_Foo"), eq(watcher)); }
public Set<GsonGroup> getGroups(String gitlabUrl, String token) { return Set.copyOf(executePaginatedQuery(gitlabUrl, token, "/groups", resp -> GSON.fromJson(resp, GITLAB_GROUP))); }
@Test public void getGroups_whenCallIsSuccessful_deserializesAndReturnsCorrectlyGroups() throws IOException { ArgumentCaptor<Function<String, List<GsonGroup>>> deserializerCaptor = ArgumentCaptor.forClass(Function.class); String token = "token-toto"; GitlabToken gitlabToken = new GitlabToken(token); List<GsonGroup> expectedGroups = expectedGroups(); when(gitlabPaginatedHttpClient.get(eq(gitlabUrl), eq(gitlabToken), eq("/groups"), deserializerCaptor.capture())).thenReturn(expectedGroups); Set<GsonGroup> groups = underTest.getGroups(gitlabUrl, token); assertThat(groups).containsExactlyInAnyOrderElementsOf(expectedGroups); String responseContent = getResponseContent("groups-full-response.json"); List<GsonGroup> deserializedGroups = deserializerCaptor.getValue().apply(responseContent); assertThat(deserializedGroups).usingRecursiveComparison().isEqualTo(expectedGroups); }
@Override public String toString() { return "LocalCacheWideEventData{" + "eventType=" + eventType + ", source='" + source + '\'' + ", numberOfEntriesAffected=" + numberOfEntriesAffected + ", mapName=" + mapName + '}'; }
@Test public void testToString() { assertContains(localCacheWideEventData.toString(), "LocalCacheWideEventData"); }
@GetMapping("/authorize") @Operation(summary = "获得授权信息", description = "适合 code 授权码模式,或者 implicit 简化模式;在 sso.vue 单点登录界面被【获取】调用") @Parameter(name = "clientId", required = true, description = "客户端编号", example = "tudou") public CommonResult<OAuth2OpenAuthorizeInfoRespVO> authorize(@RequestParam("clientId") String clientId) { // 0. 校验用户已经登录。通过 Spring Security 实现 // 1. 获得 Client 客户端的信息 OAuth2ClientDO client = oauth2ClientService.validOAuthClientFromCache(clientId); // 2. 获得用户已经授权的信息 List<OAuth2ApproveDO> approves = oauth2ApproveService.getApproveList(getLoginUserId(), getUserType(), clientId); // 拼接返回 return success(OAuth2OpenConvert.INSTANCE.convert(client, approves)); }
@Test public void testAuthorize() { // 准备参数 String clientId = randomString(); // mock 方法(client) OAuth2ClientDO client = randomPojo(OAuth2ClientDO.class).setClientId("demo_client_id").setScopes(ListUtil.toList("read", "write", "all")); when(oauth2ClientService.validOAuthClientFromCache(eq(clientId))).thenReturn(client); // mock 方法(approve) List<OAuth2ApproveDO> approves = asList( randomPojo(OAuth2ApproveDO.class).setScope("read").setApproved(true), randomPojo(OAuth2ApproveDO.class).setScope("write").setApproved(false)); when(oauth2ApproveService.getApproveList(isNull(), eq(UserTypeEnum.ADMIN.getValue()), eq(clientId))).thenReturn(approves); // 调用 CommonResult<OAuth2OpenAuthorizeInfoRespVO> result = oauth2OpenController.authorize(clientId); // 断言 assertEquals(0, result.getCode()); assertPojoEquals(client, result.getData().getClient()); assertEquals(new KeyValue<>("read", true), result.getData().getScopes().get(0)); assertEquals(new KeyValue<>("write", false), result.getData().getScopes().get(1)); assertEquals(new KeyValue<>("all", false), result.getData().getScopes().get(2)); }
@Override public void processElement(StreamRecord<FlinkInputSplit> element) { splits.add(element.getValue()); enqueueProcessSplits(); }
@TestTemplate public void testTriggerCheckpoint() throws Exception { // Received emitted splits: split1, split2, split3, checkpoint request is triggered when reading // records from // split1. List<List<Record>> expectedRecords = generateRecordsAndCommitTxn(3); List<FlinkInputSplit> splits = generateSplits(); assertThat(splits).hasSize(3); long timestamp = 0; try (OneInputStreamOperatorTestHarness<FlinkInputSplit, RowData> harness = createReader()) { harness.setup(); harness.open(); SteppingMailboxProcessor processor = createLocalMailbox(harness); harness.processElement(splits.get(0), ++timestamp); harness.processElement(splits.get(1), ++timestamp); harness.processElement(splits.get(2), ++timestamp); // Trigger snapshot state, it will start to work once all records from split0 are read. processor.getMainMailboxExecutor().execute(() -> harness.snapshot(1, 3), "Trigger snapshot"); assertThat(processor.runMailboxStep()).as("Should have processed the split0").isTrue(); assertThat(processor.runMailboxStep()) .as("Should have processed the snapshot state action") .isTrue(); TestHelpers.assertRecords(readOutputValues(harness), expectedRecords.get(0), SCHEMA); // Read records from split1. assertThat(processor.runMailboxStep()).as("Should have processed the split1").isTrue(); // Read records from split2. assertThat(processor.runMailboxStep()).as("Should have processed the split2").isTrue(); TestHelpers.assertRecords( readOutputValues(harness), Lists.newArrayList(Iterables.concat(expectedRecords)), SCHEMA); } }
@Override public void upgrade() { if (hasBeenRunSuccessfully()) { LOG.debug("Migration already completed."); return; } final Set<String> dashboardIdToViewId = new HashSet<>(); final Consumer<String> recordMigratedDashboardIds = dashboardIdToViewId::add; final Map<String, Set<String>> widgetIdMigrationMapping = new HashMap<>(); final Consumer<Map<String, Set<String>>> recordMigratedWidgetIds = widgetIdMigrationMapping::putAll; final Map<View, Search> newViews = this.dashboardsService.streamAll() .sorted(Comparator.comparing(Dashboard::id)) .map(dashboard -> migrateDashboard(dashboard, recordMigratedDashboardIds, recordMigratedWidgetIds)) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); writeViews(newViews); final MigrationCompleted migrationCompleted = MigrationCompleted.create(dashboardIdToViewId, widgetIdMigrationMapping); writeMigrationCompleted(migrationCompleted); }
@Test public void runsIfNoDashboardsArePresent() { this.migration.upgrade(); }
public static Optional<Expression> convert( org.apache.flink.table.expressions.Expression flinkExpression) { if (!(flinkExpression instanceof CallExpression)) { return Optional.empty(); } CallExpression call = (CallExpression) flinkExpression; Operation op = FILTERS.get(call.getFunctionDefinition()); if (op != null) { switch (op) { case IS_NULL: return onlyChildAs(call, FieldReferenceExpression.class) .map(FieldReferenceExpression::getName) .map(Expressions::isNull); case NOT_NULL: return onlyChildAs(call, FieldReferenceExpression.class) .map(FieldReferenceExpression::getName) .map(Expressions::notNull); case LT: return convertFieldAndLiteral(Expressions::lessThan, Expressions::greaterThan, call); case LT_EQ: return convertFieldAndLiteral( Expressions::lessThanOrEqual, Expressions::greaterThanOrEqual, call); case GT: return convertFieldAndLiteral(Expressions::greaterThan, Expressions::lessThan, call); case GT_EQ: return convertFieldAndLiteral( Expressions::greaterThanOrEqual, Expressions::lessThanOrEqual, call); case EQ: return convertFieldAndLiteral( (ref, lit) -> { if (NaNUtil.isNaN(lit)) { return Expressions.isNaN(ref); } else { return Expressions.equal(ref, lit); } }, call); case NOT_EQ: return convertFieldAndLiteral( (ref, lit) -> { if (NaNUtil.isNaN(lit)) { return Expressions.notNaN(ref); } else { return Expressions.notEqual(ref, lit); } }, call); case NOT: return onlyChildAs(call, CallExpression.class) .flatMap(FlinkFilters::convert) .map(Expressions::not); case AND: return convertLogicExpression(Expressions::and, call); case OR: return convertLogicExpression(Expressions::or, call); case STARTS_WITH: return convertLike(call); } } return Optional.empty(); }
@Test public void testNot() { Expression expr = resolve( ApiExpressionUtils.unresolvedCall( BuiltInFunctionDefinitions.NOT, Expressions.$("field1").isEqual(Expressions.lit(1)))); Optional<org.apache.iceberg.expressions.Expression> actual = FlinkFilters.convert(expr); assertThat(actual).isPresent(); Not not = (Not) actual.get(); Not expected = (Not) org.apache.iceberg.expressions.Expressions.not( org.apache.iceberg.expressions.Expressions.equal("field1", 1)); assertThat(not.op()).as("Predicate operation should match").isEqualTo(expected.op()); assertPredicatesMatch(expected.child(), not.child()); }
public void init(final CountersReader countersReader, final CounterProvider counterProvider) { delegateResolver.init(countersReader, counterProvider); }
@Test void initShouldCallActualMethod() { final NameResolver delegateResolver = mock(NameResolver.class); final NanoClock clock = mock(NanoClock.class); final DutyCycleTracker maxTime = mock(DutyCycleTracker.class); final TimeTrackingNameResolver resolver = new TimeTrackingNameResolver(delegateResolver, clock, maxTime); final CountersReader countersReader = mock(CountersReader.class); final CounterProvider factory = mock(CounterProvider.class); resolver.init(countersReader, factory); verify(delegateResolver).init(countersReader, factory); verifyNoMoreInteractions(delegateResolver); verifyNoInteractions(clock, maxTime); }
public String getUriScheme() { return isHttpEnableTls() ? "https" : "http"; }
@Test public void testGetUriScheme() throws RepositoryException, ValidationException, IOException { final HttpConfiguration configWithoutTls = new HttpConfiguration(); new JadConfig(new InMemoryRepository(ImmutableMap.of("http_enable_tls", "false")), configWithoutTls) .addConverterFactory(new GuavaConverterFactory()) .process(); assertThat(configWithoutTls.getUriScheme()).isEqualTo("http"); final Map<String, String> properties = ImmutableMap.of( "http_bind_address", "127.0.0.1:9000", "http_enable_tls", "true", "http_tls_key_file", temporaryFolder.newFile("graylog.key").getAbsolutePath(), "http_tls_cert_file", temporaryFolder.newFile("graylog.crt").getAbsolutePath()); final HttpConfiguration configWithTls = new HttpConfiguration(); new JadConfig(new InMemoryRepository(properties), configWithTls) .addConverterFactory(new GuavaConverterFactory()) .process(); assertThat(configWithTls.getUriScheme()).isEqualTo("https"); }
@Override public long retrieveInitialSequence() { // -1 indicates start from next message. return -1; }
@Test public void retrieveInitialSequence() { MessageListener<String> listener = createMessageListenerMock(); ReliableMessageListenerAdapter<String> adapter = new ReliableMessageListenerAdapter<>(listener); assertEquals(-1, adapter.retrieveInitialSequence()); }
protected void insertModelBefore(EpoxyModel<?> modelToInsert, EpoxyModel<?> modelToInsertBefore) { int targetIndex = getModelPosition(modelToInsertBefore); if (targetIndex == -1) { throw new IllegalStateException("Model is not added: " + modelToInsertBefore); } pauseModelListNotifications(); models.add(targetIndex, modelToInsert); resumeModelListNotifications(); notifyItemInserted(targetIndex); }
@Test() public void testInsertModelBefore() { TestModel firstModel = new TestModel(); testAdapter.addModels(firstModel); testAdapter.insertModelBefore(new TestModel(), firstModel); verify(observer, times(2)).onItemRangeInserted(0, 1); assertEquals(2, testAdapter.models.size()); assertEquals(firstModel, testAdapter.models.get(1)); checkDifferState(); }
public static <T> RBFNetwork<T> fit(T[] x, int[] y, RBF<T>[] rbf) { return fit(x, y, rbf, false); }
@Test public void testPenDigits() { System.out.println("Pen Digits"); MathEx.setSeed(19650218); // to get repeatable results. ClassificationValidations<RBFNetwork<double[]>> result = CrossValidation.classification(10, PenDigits.x, PenDigits.y, (x, y) -> RBFNetwork.fit(x, y, RBF.fit(x, 50))); System.out.println("RBF Network: " + result); assertEquals(0.9162, result.avg.accuracy, 1E-4); result = CrossValidation.classification(10, PenDigits.x, PenDigits.y, (x, y) -> RBFNetwork.fit(x, y, RBF.fit(x, 50), true)); System.out.println("Normalized RBF Network: " + result); assertEquals(0.9190, result.avg.accuracy, 1E-4); }
@Override public Object[] toArray() { Set<Object> res = (Set<Object>) get(readAllAsync()); return res.toArray(); }
@Test public void testToArray() { Set<String> set = redisson.getSet("set"); set.add("1"); set.add("4"); set.add("2"); set.add("5"); set.add("3"); assertThat(set.toArray()).containsOnly("1", "2", "4", "5", "3"); String[] strs = set.toArray(new String[0]); assertThat(strs).containsOnly("1", "2", "4", "5", "3"); }
@Override public JFieldVar apply(String nodeName, JsonNode node, JsonNode parent, JFieldVar field, Schema currentSchema) { if (ruleFactory.getGenerationConfig().isIncludeJsr303Annotations() && (node.has("minItems") || node.has("maxItems")) && isApplicableType(field)) { final Class<? extends Annotation> sizeClass = ruleFactory.getGenerationConfig().isUseJakartaValidation() ? Size.class : javax.validation.constraints.Size.class; JAnnotationUse annotation = field.annotate(sizeClass); if (node.has("minItems")) { annotation.param("min", node.get("minItems").asInt()); } if (node.has("maxItems")) { annotation.param("max", node.get("maxItems").asInt()); } } return field; }
@Test public void jsrDisable() { when(config.isIncludeJsr303Annotations()).thenReturn(false); JFieldVar result = rule.apply("node", node, null, fieldVar, null); assertSame(fieldVar, result); verify(fieldVar, never()).annotate(sizeClass); verify(annotation, never()).param(anyString(), anyInt()); }
public void validate(String clientId, String clientSecret, String workspace) { Token token = validateAccessToken(clientId, clientSecret); if (token.getScopes() == null || !token.getScopes().contains("pullrequest")) { LOG.info(MISSING_PULL_REQUEST_READ_PERMISSION + String.format(SCOPE, token.getScopes())); throw new IllegalArgumentException(ERROR_BBC_SERVERS + ": " + MISSING_PULL_REQUEST_READ_PERMISSION); } try { doGet(token.getAccessToken(), buildUrl("/repositories/" + workspace), r -> null); } catch (NotFoundException | IllegalStateException e) { throw new IllegalArgumentException(e.getMessage()); } }
@Test public void responseBodyWithoutErrorFieldIsSupported() { String body = "{\"foo\": \"bar\"}"; server.enqueue(new MockResponse().setResponseCode(500) .setHeader("content-type", "application/json; charset=utf-8") .setBody(body)); assertThatIllegalArgumentException() .isThrownBy(() -> underTest.validate("clientId", "clientSecret", "workspace")) .withMessage(UNABLE_TO_CONTACT_BBC_SERVERS); assertThat(logTester.logs(Level.INFO)).containsExactly(String.format(BBC_FAIL_WITH_RESPONSE, serverURL, "500", body)); }
@Subscribe public void onChatMessage(ChatMessage chatMessage) { if (chatMessage.getType() != ChatMessageType.TRADE && chatMessage.getType() != ChatMessageType.GAMEMESSAGE && chatMessage.getType() != ChatMessageType.SPAM && chatMessage.getType() != ChatMessageType.FRIENDSCHATNOTIFICATION) { return; } String message = chatMessage.getMessage(); Matcher matcher = KILLCOUNT_PATTERN.matcher(message); if (matcher.find()) { final String boss = matcher.group("boss"); final int kc = Integer.parseInt(matcher.group("kc")); final String pre = matcher.group("pre"); final String post = matcher.group("post"); if (Strings.isNullOrEmpty(pre) && Strings.isNullOrEmpty(post)) { unsetKc(boss); return; } String renamedBoss = KILLCOUNT_RENAMES .getOrDefault(boss, boss) // The config service doesn't support keys with colons in them .replace(":", ""); if (boss != renamedBoss) { // Unset old TOB kc unsetKc(boss); unsetPb(boss); unsetKc(boss.replace(":", ".")); unsetPb(boss.replace(":", ".")); // Unset old story mode unsetKc("Theatre of Blood Story Mode"); unsetPb("Theatre of Blood Story Mode"); } setKc(renamedBoss, kc); // We either already have the pb, or need to remember the boss for the upcoming pb if (lastPb > -1) { log.debug("Got out-of-order personal best for {}: {}", renamedBoss, lastPb); if (renamedBoss.contains("Theatre of Blood")) { // TOB team size isn't sent in the kill message, but can be computed from varbits int tobTeamSize = tobTeamSize(); lastTeamSize = tobTeamSize == 1 ? "Solo" : (tobTeamSize + " players"); } else if (renamedBoss.contains("Tombs of Amascut")) { // TOA team size isn't sent in the kill message, but can be computed from varbits int toaTeamSize = toaTeamSize(); lastTeamSize = toaTeamSize == 1 ? "Solo" : (toaTeamSize + " players"); } final double pb = getPb(renamedBoss); // If a raid with a team size, only update the pb if it is lower than the existing pb // so that the pb is the overall lowest of any team size if (lastTeamSize == null || pb == 0 || lastPb < pb) { log.debug("Setting overall pb (old: {})", pb); setPb(renamedBoss, lastPb); } if (lastTeamSize != null) { log.debug("Setting team size pb: {}", lastTeamSize); setPb(renamedBoss + " " + lastTeamSize, lastPb); } lastPb = -1; lastTeamSize = null; } else { lastBossKill = renamedBoss; lastBossTime = client.getTickCount(); } return; } matcher = DUEL_ARENA_WINS_PATTERN.matcher(message); if (matcher.find()) { final int oldWins = getKc("Duel Arena Wins"); final int wins = matcher.group(2).equals("one") ? 1 : Integer.parseInt(matcher.group(2).replace(",", "")); final String result = matcher.group(1); int winningStreak = getKc("Duel Arena Win Streak"); int losingStreak = getKc("Duel Arena Lose Streak"); if (result.equals("won") && wins > oldWins) { losingStreak = 0; winningStreak += 1; } else if (result.equals("were defeated")) { losingStreak += 1; winningStreak = 0; } else { log.warn("unrecognized duel streak chat message: {}", message); } setKc("Duel Arena Wins", wins); setKc("Duel Arena Win Streak", winningStreak); setKc("Duel Arena Lose Streak", losingStreak); } matcher = DUEL_ARENA_LOSSES_PATTERN.matcher(message); if (matcher.find()) { int losses = matcher.group(1).equals("one") ? 1 : Integer.parseInt(matcher.group(1).replace(",", "")); setKc("Duel Arena Losses", losses); } matcher = KILL_DURATION_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = NEW_PB_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = RAIDS_PB_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = RAIDS_DURATION_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = HS_PB_PATTERN.matcher(message); if (matcher.find()) { int floor = Integer.parseInt(matcher.group("floor")); String floortime = matcher.group("floortime"); String floorpb = matcher.group("floorpb"); String otime = matcher.group("otime"); String opb = matcher.group("opb"); String pb = MoreObjects.firstNonNull(floorpb, floortime); setPb("Hallowed Sepulchre Floor " + floor, timeStringToSeconds(pb)); if (otime != null) { pb = MoreObjects.firstNonNull(opb, otime); setPb("Hallowed Sepulchre", timeStringToSeconds(pb)); } } matcher = HS_KC_FLOOR_PATTERN.matcher(message); if (matcher.find()) { int floor = Integer.parseInt(matcher.group(1)); int kc = Integer.parseInt(matcher.group(2).replaceAll(",", "")); setKc("Hallowed Sepulchre Floor " + floor, kc); } matcher = HS_KC_GHC_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1).replaceAll(",", "")); setKc("Hallowed Sepulchre", kc); } matcher = HUNTER_RUMOUR_KC_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1).replaceAll(",", "")); setKc("Hunter Rumours", kc); } if (lastBossKill != null && lastBossTime != client.getTickCount()) { lastBossKill = null; lastBossTime = -1; } matcher = COLLECTION_LOG_ITEM_PATTERN.matcher(message); if (matcher.find()) { String item = matcher.group(1); int petId = findPet(item); if (petId != -1) { final List<Integer> petList = new ArrayList<>(getPetList()); if (!petList.contains(petId)) { log.debug("New pet added: {}/{}", item, petId); petList.add(petId); setPetList(petList); } } } matcher = GUARDIANS_OF_THE_RIFT_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1)); setKc("Guardians of the Rift", kc); } }
@Test public void testShayzienBasicAgilityLap() { // This sets lastBoss ChatMessage chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Your Shayzien Basic Agility Course lap count is: <col=ff0000>2</col>.", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Lap duration: <col=ff0000>1:01</col> (new personal best).", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); verify(configManager).setRSProfileConfiguration("personalbest", "shayzien basic agility course", 61.0); verify(configManager).setRSProfileConfiguration("killcount", "shayzien basic agility course", 2); // Precise times chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Lap duration: <col=ff0000>1:01.20</col> (new personal best).", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); verify(configManager).setRSProfileConfiguration("personalbest", "shayzien basic agility course", 61.2); }
@Override public void close() { // No more tasks may be submitted after this point. singleThreadExecutor.shutdown(); parallelExecutor.shutdown(); if (proxy != null) { // TODO: this can hang for quite some time if the client // is currently in the middle of a call to a downed JN. // We should instead do this asynchronously, and just stop // making any more calls after this point (eg clear the queue) RPC.stopProxy(proxy); } metrics.unregister(); }
@Test public void testMetricsRemovedOnClose() { MetricsSystem metricsSystem = DefaultMetricsSystem.instance(); String sourceName = "IPCLoggerChannel-" + FAKE_ADDR.getAddress().getHostAddress() + "-" + FAKE_ADDR.getPort(); // Ensure the metrics exist MetricsSource source = metricsSystem.getSource(sourceName); assertNotNull(source); ch.close(); // ensure the metrics are removed. source = metricsSystem.getSource(sourceName); assertNull(source); }
@Override public synchronized void write(int b) throws IOException { if (MBYTES[match] == b) { // another byte matched. Good. Keep going... match++; if (match == MBYTES.length) { // don't send MARK to the output, but instead notify the callback onMarkFound(); match = 0; } } else { if (match > 0) { // only matched partially. send the partial match that we held off down the pipe base.write(MBYTES, 0, match); match = 0; // this might match the first byte in MARK, so retry. write(b); } else { base.write(b); } } }
@Test public void partialMatchTurnsOutToBeWrongIn2ndWrite() throws IOException { write("bar" + markHead); assertOutput("bar"); // at this point we should just see 'bar' write("foo"); // this needs to write what was held off during the first write assertCount(0); assertOutput("bar" + markHead + "foo"); }
@Override public Optional<ReadError> read(DbFileSources.Line.Builder lineBuilder) { if (readError == null) { try { processSymbols(lineBuilder); } catch (RangeOffsetConverter.RangeOffsetConverterException e) { readError = new ReadError(Data.SYMBOLS, lineBuilder.getLine()); LOG.warn(format("Inconsistency detected in Symbols data. Symbols will be ignored for file '%s'", file.getKey()), e); } } return Optional.ofNullable(readError); }
@Test public void read_symbols_declared_on_a_whole_line() { TextRange declaration = newTextRange(LINE_1, LINE_2, OFFSET_0, OFFSET_0); when(rangeOffsetConverter.offsetToString(declaration, LINE_1, DEFAULT_LINE_LENGTH)).thenReturn(RANGE_LABEL_1); when(rangeOffsetConverter.offsetToString(declaration, LINE_2, DEFAULT_LINE_LENGTH)).thenReturn(""); TextRange reference = newSingleLineTextRangeWithExpectedLabel(LINE_3, OFFSET_1, OFFSET_3, RANGE_LABEL_2); SymbolsLineReader symbolsLineReader = newReader(newSymbol(declaration, reference)); assertThat(symbolsLineReader.read(line1)).isEmpty(); assertThat(symbolsLineReader.read(line2)).isEmpty(); assertThat(symbolsLineReader.read(line3)).isEmpty(); assertThat(symbolsLineReader.read(line4)).isEmpty(); assertThat(line1.getSymbols()).isEqualTo(RANGE_LABEL_1 + ",1"); assertThat(line2.getSymbols()).isEmpty(); assertThat(line3.getSymbols()).isEqualTo(RANGE_LABEL_2 + ",1"); assertThat(line4.getSymbols()).isEmpty(); }
@Override public JType apply(String nodeName, JsonNode node, JsonNode parent, JClassContainer jClassContainer, Schema schema) { String propertyTypeName = getTypeName(node); JType type; if (propertyTypeName.equals("object") || node.has("properties") && node.path("properties").size() > 0) { type = ruleFactory.getObjectRule().apply(nodeName, node, parent, jClassContainer.getPackage(), schema); } else if (node.has("existingJavaType")) { String typeName = node.path("existingJavaType").asText(); if (isPrimitive(typeName, jClassContainer.owner())) { type = primitiveType(typeName, jClassContainer.owner()); } else { type = resolveType(jClassContainer, typeName); } } else if (propertyTypeName.equals("string")) { type = jClassContainer.owner().ref(String.class); } else if (propertyTypeName.equals("number")) { type = getNumberType(jClassContainer.owner(), ruleFactory.getGenerationConfig()); } else if (propertyTypeName.equals("integer")) { type = getIntegerType(jClassContainer.owner(), node, ruleFactory.getGenerationConfig()); } else if (propertyTypeName.equals("boolean")) { type = unboxIfNecessary(jClassContainer.owner().ref(Boolean.class), ruleFactory.getGenerationConfig()); } else if (propertyTypeName.equals("array")) { type = ruleFactory.getArrayRule().apply(nodeName, node, parent, jClassContainer.getPackage(), schema); } else { type = jClassContainer.owner().ref(Object.class); } if (!node.has("javaType") && !node.has("existingJavaType") && node.has("format")) { type = ruleFactory.getFormatRule().apply(nodeName, node.get("format"), node, type, schema); } else if (!node.has("javaType") && !node.has("existingJavaType") && propertyTypeName.equals("string") && node.has("media")) { type = ruleFactory.getMediaRule().apply(nodeName, node.get("media"), node, type, schema); } return type; }
@Test public void applyGeneratesIntegerUsingJavaTypeLongWhenMinimumLessThanIntegerMin() { JPackage jpackage = new JCodeModel()._package(getClass().getPackage().getName()); ObjectNode objectNode = new ObjectMapper().createObjectNode(); objectNode.put("type", "integer"); objectNode.put("minimum", Integer.MIN_VALUE - 1L); when(config.isUsePrimitives()).thenReturn(false); JType result = rule.apply("fooBar", objectNode, null, jpackage, null); assertThat(result.fullName(), is(Long.class.getName())); }
@Override public WebServer getWebServer(ServletContextInitializer... initializers) { for (ServletContextInitializer i : initializers) { try { if (handler.getServletContext() == null) { throw new WebServerException("Attempting to initialize ServletEmbeddedWebServer without ServletContext in Handler", null); } i.onStartup(handler.getServletContext()); } catch (ServletException e) { throw new WebServerException("Could not initialize Servlets", e); } } return this; }
@Test void getWebServer_callsInitializers() { ServerlessServletEmbeddedServerFactory factory = new ServerlessServletEmbeddedServerFactory(); factory.getWebServer(new ServletContextInitializer() { @Override public void onStartup(ServletContext servletContext) throws ServletException { if (servletContext == null) { fail("Null servlet context"); } } }); }
@PostMapping() @Secured(action = ActionTypes.WRITE, signType = SignType.CONFIG) public Result<Boolean> publishConfig(ConfigForm configForm, HttpServletRequest request) throws NacosException { // check required field configForm.validate(); String encryptedDataKeyFinal = configForm.getEncryptedDataKey(); if (StringUtils.isBlank(encryptedDataKeyFinal)) { // encrypted Pair<String, String> pair = EncryptionHandler.encryptHandler(configForm.getDataId(), configForm.getContent()); configForm.setContent(pair.getSecond()); encryptedDataKeyFinal = pair.getFirst(); } //fix issue #9783 configForm.setNamespaceId(NamespaceUtil.processNamespaceParameter(configForm.getNamespaceId())); // check param ParamUtils.checkTenantV2(configForm.getNamespaceId()); ParamUtils.checkParam(configForm.getDataId(), configForm.getGroup(), "datumId", configForm.getContent()); ParamUtils.checkParamV2(configForm.getTag()); if (StringUtils.isBlank(configForm.getSrcUser())) { configForm.setSrcUser(RequestUtil.getSrcUserName(request)); } if (!ConfigType.isValidType(configForm.getType())) { configForm.setType(ConfigType.getDefaultType().getType()); } ConfigRequestInfo configRequestInfo = new ConfigRequestInfo(); configRequestInfo.setSrcIp(RequestUtil.getRemoteIp(request)); configRequestInfo.setRequestIpApp(RequestUtil.getAppName(request)); configRequestInfo.setBetaIps(request.getHeader("betaIps")); configRequestInfo.setCasMd5(request.getHeader("casMd5")); return Result.success(configOperationService.publishConfig(configForm, configRequestInfo, encryptedDataKeyFinal)); }
@Test void testPublishConfig() throws Exception { ConfigForm configForm = new ConfigForm(); configForm.setDataId(TEST_DATA_ID); configForm.setGroup(TEST_GROUP); configForm.setNamespaceId(TEST_NAMESPACE_ID); configForm.setContent(TEST_CONTENT); MockHttpServletRequest request = new MockHttpServletRequest(); when(configOperationService.publishConfig(any(ConfigForm.class), any(ConfigRequestInfo.class), anyString())).thenReturn(true); Result<Boolean> booleanResult = configControllerV2.publishConfig(configForm, request); verify(configOperationService).publishConfig(any(ConfigForm.class), any(ConfigRequestInfo.class), anyString()); assertEquals(ErrorCode.SUCCESS.getCode(), booleanResult.getCode()); assertTrue(booleanResult.getData()); }
@Override protected void notifyFirstSampleAfterLoopRestart() { log.debug("notifyFirstSampleAfterLoopRestart called " + "with config(httpclient.reset_state_on_thread_group_iteration={})", RESET_STATE_ON_THREAD_GROUP_ITERATION); JMeterVariables jMeterVariables = JMeterContextService.getContext().getVariables(); if (jMeterVariables.isSameUserOnNextIteration()) { log.debug("Thread Group is configured to simulate a returning visitor on each iteration, ignoring property value {}", RESET_STATE_ON_THREAD_GROUP_ITERATION); resetStateOnThreadGroupIteration.set(false); } else { log.debug("Thread Group is configured to simulate a new visitor on each iteration, using property value {}", RESET_STATE_ON_THREAD_GROUP_ITERATION); resetStateOnThreadGroupIteration.set(RESET_STATE_ON_THREAD_GROUP_ITERATION); } log.debug("Thread state will be reset ?: {}", RESET_STATE_ON_THREAD_GROUP_ITERATION); }
@Test public void testNotifyFirstSampleAfterLoopRestartWhenThreadIterationIsANewUser() { jmvars.putObject(SAME_USER, false); jmctx.setVariables(jmvars); HTTPSamplerBase sampler = (HTTPSamplerBase) new HttpTestSampleGui().createTestElement(); sampler.setThreadContext(jmctx); HTTPHC4Impl hc = new HTTPHC4Impl(sampler); hc.notifyFirstSampleAfterLoopRestart(); Assertions.assertTrue(HTTPHC4Impl.resetStateOnThreadGroupIteration.get(), "Users are different, the state should be reset"); }
public byte[] data() { if (buf.hasArray()) { byte[] array = buf.array(); int offset = buf.arrayOffset(); if (offset == 0 && array.length == size) { // If the backing array is exactly what we need, return it without copy. return array; } else { // Else use it to make an efficient copy. return Arrays.copyOfRange(array, offset, offset + size); } } else { // No backing array -> use ByteBuffer#get(). byte[] array = new byte[size]; ByteBuffer dup = buf.duplicate(); dup.position(0); dup.get(array); return array; } }
@Test public void testDataNonZeroOffset() { byte[] data = new byte[]{10, 11, 12}; ByteBuffer buffer = ByteBuffer.wrap(data, 1, 2).slice(); Msg msg = new Msg(buffer); assertThat(msg.data(), is(new byte[]{11, 12})); }
@Override public LocalAddress localAddress() { return (LocalAddress) super.localAddress(); }
@Test public void testServerCloseChannelSameEventLoop() throws Exception { final CountDownLatch latch = new CountDownLatch(1); ServerBootstrap sb = new ServerBootstrap() .group(group2) .channel(LocalServerChannel.class) .childHandler(new SimpleChannelInboundHandler<Object>() { @Override protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Exception { ctx.close(); latch.countDown(); } }); Channel sc = null; Channel cc = null; try { sc = sb.bind(TEST_ADDRESS).sync().channel(); Bootstrap b = new Bootstrap() .group(group2) .channel(LocalChannel.class) .handler(new SimpleChannelInboundHandler<Object>() { @Override protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Exception { // discard } }); cc = b.connect(sc.localAddress()).sync().channel(); cc.writeAndFlush(new Object()); assertTrue(latch.await(5, SECONDS)); } finally { closeChannel(cc); closeChannel(sc); } }
static DynamicState handleEmpty(DynamicState dynamicState, StaticState staticState) throws InterruptedException, IOException { if (dynamicState.container != null) { throw new IOException("dynamicState.container is not null"); } if (dynamicState.currentAssignment != null) { throw new IOException("dynamicState.currentAssignment is not null"); } if (!dynamicState.pendingChangingBlobs.isEmpty()) { throw new IOException("dynamicState.pendingChangingBlobs is not empty"); } if (dynamicState.pendingChangingBlobsAssignment != null) { throw new IOException("dynamicState.pendingChangingBlobsAssignment is not null"); } if (dynamicState.pendingLocalization != null) { throw new IOException("dynamicState.pendingLocalization is not null"); } if (dynamicState.pendingDownload != null) { throw new IOException("dynamicState.pendingDownload is not null"); } if (!EquivalenceUtils.areLocalAssignmentsEquivalent(dynamicState.newAssignment, dynamicState.currentAssignment)) { return prepareForNewAssignmentNoWorkersRunning(dynamicState, staticState); } dynamicState = updateAssignmentIfNeeded(dynamicState); //Both assignments are null, just wait if (dynamicState.profileActions != null && !dynamicState.profileActions.isEmpty()) { //Nothing is scheduled here so throw away all of the profileActions LOG.warn("Dropping {} no topology is running", dynamicState.profileActions); dynamicState = dynamicState.withProfileActions(Collections.emptySet(), Collections.emptySet()); } //Drop the change notifications we are not running anything right now dynamicState = drainAllChangingBlobs(dynamicState); Time.sleep(1000); return dynamicState; }
@Test public void testEmptyToEmpty() throws Exception { try (SimulatedTime ignored = new SimulatedTime(1010)) { AsyncLocalizer localizer = mock(AsyncLocalizer.class); LocalState state = mock(LocalState.class); BlobChangingCallback cb = mock(BlobChangingCallback.class); ContainerLauncher containerLauncher = mock(ContainerLauncher.class); ISupervisor iSuper = mock(ISupervisor.class); SlotMetrics slotMetrics = new SlotMetrics(new StormMetricsRegistry()); StaticState staticState = new StaticState(localizer, 1000, 1000, 1000, 1000, containerLauncher, "localhost", 8080, iSuper, state, cb, null, null, slotMetrics); DynamicState dynamicState = new DynamicState(null, null, null, slotMetrics); DynamicState nextState = Slot.handleEmpty(dynamicState, staticState); assertEquals(MachineState.EMPTY, nextState.state); assertTrue(Time.currentTimeMillis() > 1000); } }
@Override public Reverse.Output run(RunContext runContext) throws Exception { URI from = new URI(runContext.render(this.from)); String extension = FileUtils.getExtension(from); String separator = runContext.render(this.separator); Charset charset = Charsets.toCharset(runContext.render(this.charset)); File tempFile = runContext.workingDir().createTempFile(extension).toFile(); File originalFile = runContext.workingDir().createTempFile(extension).toFile(); try (OutputStream outputStream = new FileOutputStream(originalFile)) { IOUtils.copyLarge(runContext.storage().getFile(from), outputStream); } ReversedLinesFileReader reversedLinesFileReader = ReversedLinesFileReader.builder() .setPath(originalFile.toPath()) .setCharset(charset) .get(); try ( BufferedOutputStream output = new BufferedOutputStream(new FileOutputStream(tempFile)); ) { String line; while ((line = reversedLinesFileReader.readLine()) != null) { output.write((line + separator).getBytes(charset)); } } return Reverse.Output.builder() .uri(runContext.storage().putFile(tempFile)) .build(); }
@Test void run() throws Exception { RunContext runContext = runContextFactory.of(); URI put = storageInterface.put( null, new URI("/file/storage/get.yml"), new ByteArrayInputStream("1\n2\n3\n".getBytes()) ); Reverse result = Reverse.builder() .from(put.toString()) .build(); Reverse.Output run = result.run(runContext); assertThat(run.getUri().getPath(), endsWith(".yml")); assertThat(CharStreams.toString(new InputStreamReader(storageInterface.get(null, run.getUri()))), is("3\n2\n1\n")); }
public ValidationResult validateMessagesAndAssignOffsets(PrimitiveRef.LongRef offsetCounter, MetricsRecorder metricsRecorder, BufferSupplier bufferSupplier) { if (sourceCompressionType == CompressionType.NONE && targetCompression.type() == CompressionType.NONE) { // check the magic value if (!records.hasMatchingMagic(toMagic)) return convertAndAssignOffsetsNonCompressed(offsetCounter, metricsRecorder); else // Do in-place validation, offset assignment and maybe set timestamp return assignOffsetsNonCompressed(offsetCounter, metricsRecorder); } else return validateMessagesAndAssignOffsetsCompressed(offsetCounter, metricsRecorder, bufferSupplier); }
@Test public void testRelativeOffsetAssignmentCompressedV2() { long now = System.currentTimeMillis(); Compression compression = Compression.gzip().build(); MemoryRecords records = createRecords(RecordBatch.MAGIC_VALUE_V2, now, compression); long offset = 1234567; checkOffsets(records, 0); MemoryRecords compressedMessagesWithOffset = new LogValidator( records, topicPartition, time, CompressionType.GZIP, compression, false, RecordBatch.MAGIC_VALUE_V2, TimestampType.CREATE_TIME, 5000L, 5000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, AppendOrigin.CLIENT, MetadataVersion.latestTesting() ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() ).validatedRecords; checkOffsets(compressedMessagesWithOffset, offset); }
public List<String> tokenize(String text) { List<String> tokens = new ArrayList<>(); Matcher regexMatcher = regexExpression.matcher(text); int lastIndexOfPrevMatch = 0; while (regexMatcher.find(lastIndexOfPrevMatch)) // this is where the magic happens: // the regexp is used to find a matching pattern for substitution { int beginIndexOfNextMatch = regexMatcher.start(); String prevToken = text.substring(lastIndexOfPrevMatch, beginIndexOfNextMatch); if (!prevToken.isEmpty()) { tokens.add(prevToken); } String currentMatch = regexMatcher.group(); tokens.add(currentMatch); lastIndexOfPrevMatch = regexMatcher.end(); if (lastIndexOfPrevMatch < text.length() && text.charAt(lastIndexOfPrevMatch) != '_') { // beause it is sometimes positioned after the "_", but it should be positioned // before the "_" --lastIndexOfPrevMatch; } } String tail = text.substring(lastIndexOfPrevMatch); if (!tail.isEmpty()) { tokens.add(tail); } return tokens; }
@Test void testTokenize_happyPath_3() { // given CompoundCharacterTokenizer tokenizer = new CompoundCharacterTokenizer( new HashSet<>(Arrays.asList(new String[] { "_67_112_96_", "_74_112_76_" }))); String text = "_67_112_96_103_93_108_93_"; // when List<String> tokens = tokenizer.tokenize(text); // then assertEquals(Arrays.asList("_67_112_96_", "_103_93_108_93_"), tokens); }
@VisibleForTesting static SwitchGenerationCase checkSwitchGenerationCase(Type type, List<RowExpression> values) { if (values.size() > 32) { // 32 is chosen because // * SET_CONTAINS performs worst when smaller than but close to power of 2 // * Benchmark shows performance of SET_CONTAINS is better at 50, but similar at 25. return SwitchGenerationCase.SET_CONTAINS; } if (!(type instanceof IntegerType || type instanceof BigintType || type instanceof DateType)) { return SwitchGenerationCase.HASH_SWITCH; } for (RowExpression expression : values) { // For non-constant expressions, they will be added to the default case in the generated switch code. They do not affect any of // the cases other than the default one. Therefore, it's okay to skip them when choosing between DIRECT_SWITCH and HASH_SWITCH. // Same argument applies for nulls. if (!(expression instanceof ConstantExpression)) { continue; } Object constant = ((ConstantExpression) expression).getValue(); if (constant == null) { continue; } long longConstant = ((Number) constant).longValue(); if (longConstant < Integer.MIN_VALUE || longConstant > Integer.MAX_VALUE) { return SwitchGenerationCase.HASH_SWITCH; } } return SwitchGenerationCase.DIRECT_SWITCH; }
@Test public void testDouble() { List<RowExpression> values = new ArrayList<>(); values.add(constant(1.5, DOUBLE)); values.add(constant(2.5, DOUBLE)); values.add(constant(3.5, DOUBLE)); assertEquals(checkSwitchGenerationCase(DOUBLE, values), HASH_SWITCH); values.add(constant(null, DOUBLE)); assertEquals(checkSwitchGenerationCase(DOUBLE, values), HASH_SWITCH); for (int i = 5; i <= 32; ++i) { values.add(constant(i + 0.5, DOUBLE)); } assertEquals(checkSwitchGenerationCase(DOUBLE, values), HASH_SWITCH); values.add(constant(33.5, DOUBLE)); assertEquals(checkSwitchGenerationCase(DOUBLE, values), SET_CONTAINS); }
@Override public ImportResult importItem( UUID jobId, IdempotentImportExecutor executor, TokensAndUrlAuthData authData, VideosContainerResource data) throws Exception { if (data == null) { // Nothing to do return ImportResult.OK; } PhotosLibraryClient client; if (clientsMap.containsKey(jobId)) { client = clientsMap.get(jobId); } else { client = buildPhotosLibraryClient(appCredentials, authData); clientsMap.put(jobId, client); } for (VideoAlbum album : data.getAlbums()) { executor.importAndSwallowIOExceptions(album, (a) -> { String title = GooglePhotosImportUtils.cleanAlbumTitle(a.getName()); return ItemImportResult.success(client.createAlbum(title).getId()); }); } long bytes = 0L; // Uploads videos final Collection<VideoModel> videos = data.getVideos(); if (videos != null && videos.size() > 0) { Stream<VideoModel> stream = videos.stream() .filter(video -> shouldImport(video, executor)) .map(this::transformVideoName); // We partition into groups of 49 as 50 is the maximum number of items that can be created in // one call. (We use 49 to avoid potential off by one errors) // https://developers.google.com/photos/library/guides/upload-media#creating-media-item final UnmodifiableIterator<List<VideoModel>> batches = Iterators.partition(stream.iterator(), 49); while (batches.hasNext()) { long batchBytes = uploadBatchOfVideos( jobId, batches.next(), dataStore, client, executor, connectionProvider, monitor); bytes += batchBytes; } } final ImportResult result = ImportResult.OK; return result.copyWithBytes(bytes); }
@Test public void importSameVideoInTwoDifferentAlbums() throws Exception { VideoModel videoModel1 = new VideoModel( VIDEO_TITLE, VIDEO_URI, VIDEO_DESCRIPTION, MP4_MEDIA_TYPE, "dataId", "album1", false, null); VideoModel videoModel2 = new VideoModel( VIDEO_TITLE, VIDEO_URI, VIDEO_DESCRIPTION, MP4_MEDIA_TYPE, "dataId", "album2", false, null); Album album1 = Album.newBuilder().setId("album1").setTitle("albumName").build(); Album album2 = Album.newBuilder().setId("album2").setTitle("albumName2").build(); when(client.createAlbum("albumName")).thenReturn(album1); when(client.createAlbum("albumName2")).thenReturn(album2); // Mock uploads when(client.uploadMediaItem(any())) .thenReturn( UploadMediaItemResponse.newBuilder().setUploadToken("token1").build(), UploadMediaItemResponse.newBuilder().setUploadToken("token2").build()); // Mock creation response final NewMediaItemResult newMediaItemResult = NewMediaItemResult.newBuilder() .setStatus(Status.newBuilder().setCode(Code.OK_VALUE).build()) .setUploadToken("token1") .build(); final NewMediaItemResult newMediaItemResult2 = NewMediaItemResult.newBuilder() .setStatus(Status.newBuilder().setCode(Code.OK_VALUE).build()) .setUploadToken("token2") .build(); BatchCreateMediaItemsResponse response = BatchCreateMediaItemsResponse.newBuilder() .addNewMediaItemResults(newMediaItemResult) .build(); BatchCreateMediaItemsResponse response2 = BatchCreateMediaItemsResponse.newBuilder() .addNewMediaItemResults(newMediaItemResult2) .build(); NewMediaItem mediaItem = NewMediaItemFactory.createNewMediaItem("token1", VIDEO_DESCRIPTION); NewMediaItem mediaItem2 = NewMediaItemFactory.createNewMediaItem("token2", VIDEO_DESCRIPTION); when(client.batchCreateMediaItems(eq("album1"), eq(List.of(mediaItem)))) .thenReturn(response); when(client.batchCreateMediaItems(eq("album2"), eq(List.of(mediaItem2)))) .thenReturn(response2); InMemoryIdempotentImportExecutor executor = new InMemoryIdempotentImportExecutor(mock(Monitor.class)); Long bytes = googleVideosImporter .importItem( jobId, executor, mock(TokensAndUrlAuthData.class), new VideosContainerResource( List.of(new VideoAlbum("album1", "albumName", null), new VideoAlbum("album2", "albumName2", null)), List.of(videoModel1, videoModel2))) .getBytes().get(); assertEquals(64L, bytes,"Expected the number of bytes to be the two files of 32L."); assertEquals(0, executor.getErrors().size(),"Expected executor to have no errors."); }
@Override public void publish(ScannerReportWriter writer) { Optional<String> targetBranch = getTargetBranch(); if (targetBranch.isPresent()) { Profiler profiler = Profiler.create(LOG).startInfo(LOG_MSG); int count = writeChangedLines(scmConfiguration.provider(), writer, targetBranch.get()); LOG.debug("SCM reported changed lines for {} {} in the branch", count, ScannerUtils.pluralize("file", count)); profiler.stopInfo(); } }
@Test public void write_changed_files() { DefaultInputFile fileWithChangedLines = createInputFile("path1", "l1\nl2\nl3\n"); DefaultInputFile fileNotReturned = createInputFile("path2", "l1\nl2\nl3\n"); DefaultInputFile fileWithoutChangedLines = createInputFile("path3", "l1\nl2\nl3\n"); Set<Path> paths = new HashSet<>(Arrays.asList(BASE_DIR.resolve("path1"), BASE_DIR.resolve("path2"), BASE_DIR.resolve("path3"))); Set<Integer> lines = new HashSet<>(Arrays.asList(1, 10)); when(provider.branchChangedLines(TARGET_BRANCH, BASE_DIR, paths)) .thenReturn(ImmutableMap.of(BASE_DIR.resolve("path1"), lines, BASE_DIR.resolve("path3"), Collections.emptySet())); when(inputComponentStore.allChangedFilesToPublish()).thenReturn(Arrays.asList(fileWithChangedLines, fileNotReturned, fileWithoutChangedLines)); publisher.publish(writer); assertPublished(fileWithChangedLines, new HashSet<>(Arrays.asList(1, 10))); assertPublished(fileWithoutChangedLines, Collections.emptySet()); assertPublished(fileNotReturned, Collections.emptySet()); assumeThat(logTester.logs()).contains("File '/root/path2' was detected as changed but without having changed lines"); }
public static List<PipelineOptionDescriptor> describe( Set<Class<? extends PipelineOptions>> ifaces) { checkNotNull(ifaces); List<PipelineOptionDescriptor> result = new ArrayList<>(); Set<Method> seenMethods = Sets.newHashSet(); for (Class<? extends PipelineOptions> iface : ifaces) { CACHE.get().validateWellFormed(iface); Set<PipelineOptionSpec> properties = PipelineOptionsReflector.getOptionSpecs(iface, false); RowSortedTable<Class<?>, String, Method> ifacePropGetterTable = TreeBasedTable.create(ClassNameComparator.INSTANCE, Ordering.natural()); for (PipelineOptionSpec prop : properties) { ifacePropGetterTable.put( prop.getDefiningInterface(), prop.getName(), prop.getGetterMethod()); } for (Map.Entry<Class<?>, Map<String, Method>> ifaceToPropertyMap : ifacePropGetterTable.rowMap().entrySet()) { Class<?> currentIface = ifaceToPropertyMap.getKey(); Map<String, Method> propertyNamesToGetters = ifaceToPropertyMap.getValue(); List<@KeyFor("propertyNamesToGetters") String> lists = Lists.newArrayList(propertyNamesToGetters.keySet()); lists.sort(String.CASE_INSENSITIVE_ORDER); for (String propertyName : lists) { Method method = propertyNamesToGetters.get(propertyName); if (!seenMethods.add(method)) { continue; } Class<?> returnType = method.getReturnType(); PipelineOptionType.Enum optionType = PipelineOptionType.Enum.STRING; if (JSON_INTEGER_TYPES.contains(returnType)) { optionType = PipelineOptionType.Enum.INTEGER; } else if (JSON_NUMBER_TYPES.contains(returnType)) { optionType = PipelineOptionType.Enum.NUMBER; } else if (returnType == boolean.class || returnType == Boolean.class) { optionType = PipelineOptionType.Enum.BOOLEAN; } else if (List.class.isAssignableFrom(returnType)) { optionType = PipelineOptionType.Enum.ARRAY; } String optionName = CaseFormat.LOWER_CAMEL.to(CaseFormat.LOWER_UNDERSCORE, propertyName); Description description = method.getAnnotation(Description.class); PipelineOptionDescriptor.Builder builder = PipelineOptionDescriptor.newBuilder() .setName(optionName) .setType(optionType) .setGroup(currentIface.getName()); Optional<String> defaultValue = getDefaultValueFromAnnotation(method); if (defaultValue.isPresent()) { builder.setDefaultValue(defaultValue.get()); } if (description != null) { builder.setDescription(description.value()); } result.add(builder.build()); } } } return result; }
@Test public void testDescribe() { List<PipelineOptionDescriptor> described = PipelineOptionsFactory.describe( Sets.newHashSet(PipelineOptions.class, TestDescribeOptions.class)); Map<String, PipelineOptionDescriptor> mapped = uniqueIndex(described, input -> input.getName()); assertEquals("no duplicates", described.size(), mapped.size()); Collection<PipelineOptionDescriptor> filtered = Collections2.filter( described, input -> input.getGroup().equals(TestDescribeOptions.class.getName())); assertEquals(6, filtered.size()); mapped = uniqueIndex(filtered, input -> input.getName()); PipelineOptionDescriptor listDesc = mapped.get("list"); assertThat(listDesc, notNullValue()); assertThat(listDesc.getDescription(), isEmptyString()); assertEquals(PipelineOptionType.Enum.ARRAY, listDesc.getType()); assertThat(listDesc.getDefaultValue(), isEmptyString()); PipelineOptionDescriptor stringDesc = mapped.get("string"); assertThat(stringDesc, notNullValue()); assertThat(stringDesc.getDescription(), isEmptyString()); assertEquals(PipelineOptionType.Enum.STRING, stringDesc.getType()); assertThat(stringDesc.getDefaultValue(), isEmptyString()); PipelineOptionDescriptor integerDesc = mapped.get("integer"); assertThat(integerDesc, notNullValue()); assertEquals("integer property", integerDesc.getDescription()); assertEquals(PipelineOptionType.Enum.INTEGER, integerDesc.getType()); assertThat(integerDesc.getDefaultValue(), isEmptyString()); PipelineOptionDescriptor floatDesc = mapped.get("float"); assertThat(integerDesc, notNullValue()); assertEquals("float number property", floatDesc.getDescription()); assertEquals(PipelineOptionType.Enum.NUMBER, floatDesc.getType()); assertThat(floatDesc.getDefaultValue(), isEmptyString()); PipelineOptionDescriptor booleanSimpleDesc = mapped.get("boolean_simple"); assertThat(booleanSimpleDesc, notNullValue()); assertEquals("simple boolean property", booleanSimpleDesc.getDescription()); assertEquals(PipelineOptionType.Enum.BOOLEAN, booleanSimpleDesc.getType()); assertThat(booleanSimpleDesc.getDefaultValue(), equalTo("true")); PipelineOptionDescriptor booleanWrapperDesc = mapped.get("boolean_wrapper"); assertThat(booleanWrapperDesc, notNullValue()); assertThat(booleanWrapperDesc.getDescription(), isEmptyString()); assertEquals(PipelineOptionType.Enum.BOOLEAN, booleanWrapperDesc.getType()); assertThat(booleanWrapperDesc.getDefaultValue(), equalTo("false")); }
@SuppressWarnings("fallthrough") @SuppressFBWarnings(justification = "As this is an encoding method the fallthrough is intentional", value = {"SF_SWITCH_NO_DEFAULT"}) public static void appendEscapedLuceneQuery(StringBuilder buf, final CharSequence text) { if (text == null || buf == null) { return; } for (int i = 0; i < text.length(); i++) { final char c = text.charAt(i); switch (c) { case '+': case '-': case '&': case '|': case '!': case '(': case ')': case '{': case '}': case '[': case ']': case '^': case '"': case '~': case '*': case '?': case ':': case '/': case '\\': //it is supposed to fall through here buf.append('\\'); default: buf.append(c); break; } } }
@Test public void testAppendEscapedLuceneQuery() { StringBuilder buf = new StringBuilder(); CharSequence text = "test encoding + - & | ! ( ) { } [ ] ^ \" ~ * ? : \\"; String expResult = "test encoding \\+ \\- \\& \\| \\! \\( \\) \\{ \\} \\[ \\] \\^ \\\" \\~ \\* \\? \\: \\\\"; LuceneUtils.appendEscapedLuceneQuery(buf, text); assertEquals(expResult, buf.toString()); }
@Override public boolean nextConfig(long timeout) { file.validateFile(); if (checkReloaded()) { log.log(FINE, () -> "User forced config reload at " + System.currentTimeMillis()); // User forced reload setConfigIfChanged(updateConfig()); ConfigState<T> configState = getConfigState(); log.log(FINE, () -> "Config updated at " + System.currentTimeMillis() + ", changed: " + configState.isConfigChanged()); log.log(FINE, () -> "Config: " + configState.getConfig().toString()); return true; } if (file.getLastModified() != ts) { setConfigIncGen(updateConfig()); return true; } try { Thread.sleep(timeout); } catch (InterruptedException e) { throw new ConfigInterruptedException(e); } return false; }
@Test public void require_that_new_config_is_detected_in_time() throws IOException, InterruptedException { writeConfig("intval", "23"); ConfigSubscription<SimpletypesConfig> sub = new FileConfigSubscription<>( new ConfigKey<>(SimpletypesConfig.class, ""), new FileSource(TEST_TYPES_FILE)); assertTrue(sub.nextConfig(1000)); assertEquals(23, sub.getConfigState().getConfig().intval()); Thread.sleep(1000); writeConfig("intval", "33"); assertTrue(sub.nextConfig(1000)); assertEquals(33, sub.getConfigState().getConfig().intval()); }
public Predicate convert(ScalarOperator operator) { if (operator == null) { return null; } return operator.accept(this, null); }
@Test public void testOr() { BinaryPredicateOperator op1 = new BinaryPredicateOperator( BinaryType.GE, F0, ConstantOperator.createInt(44)); BinaryPredicateOperator op2 = new BinaryPredicateOperator( BinaryType.LE, F0, ConstantOperator.createInt(22)); ScalarOperator op = new CompoundPredicateOperator(CompoundPredicateOperator.CompoundType.OR, op1, op2); Predicate result = CONVERTER.convert(op); Assert.assertTrue(result instanceof CompoundPredicate); CompoundPredicate compoundPredicate = (CompoundPredicate) result; Assert.assertTrue(compoundPredicate.function() instanceof Or); Assert.assertEquals(2, compoundPredicate.children().size()); Assert.assertTrue(compoundPredicate.children().get(0) instanceof LeafPredicate); LeafPredicate p1 = (LeafPredicate) compoundPredicate.children().get(0); Assert.assertTrue(p1.function() instanceof GreaterOrEqual); Assert.assertEquals(44, p1.literals().get(0)); Assert.assertTrue(compoundPredicate.children().get(1) instanceof LeafPredicate); LeafPredicate p2 = (LeafPredicate) compoundPredicate.children().get(1); Assert.assertTrue(p2.function() instanceof LessOrEqual); Assert.assertEquals(22, p2.literals().get(0)); }
static <T> T getWildcardMappedObject(final Map<String, T> mapping, final String query) { T value = mapping.get(query); if (value == null) { for (String key : mapping.keySet()) { // Turn the search key into a regex, using all characters but the * as a literal. String regex = Arrays.stream(key.split("\\*")) // split in parts that do not have a wildcard in them .map(Pattern::quote) // each part should be used as a literal (not as a regex or partial regex) .collect(Collectors.joining(".*")); // join all literal parts with a regex representation on the wildcard. if (key.endsWith("*")) { // the 'split' will have removed any trailing wildcard characters. Correct for that. regex += ".*"; } if (query.matches(regex)) { value = mapping.get(key); break; } } } return value; }
@Test public void testExact() throws Exception { // Setup test fixture. final Map<String, Object> haystack = Map.of("myplugin/foo", new Object()); // Execute system under test. final Object result = PluginServlet.getWildcardMappedObject(haystack, "myplugin/foo"); // Verify results. assertNotNull(result); }
public int getCount(int prevStep) { prevStep = prevStep % this.slotCount; int index = (this.index + this.slotCount - prevStep) % this.slotCount; return this.data[index].intValue(); }
@Test void testGetCount() { SimpleFlowData simpleFlowData = new SimpleFlowData(5, 10000); simpleFlowData.addAndGet(2); simpleFlowData.rotateSlot(); simpleFlowData.addAndGet(3); simpleFlowData.rotateSlot(); simpleFlowData.incrementAndGet(); assertEquals("0 0 2 3 1", simpleFlowData.getSlotInfo()); assertEquals(2, simpleFlowData.getCount(2)); }
public void mergeLoadDescToOriginStatement(RoutineLoadDesc routineLoadDesc) { if (origStmt == null) { return; } RoutineLoadDesc originLoadDesc = CreateRoutineLoadStmt.getLoadDesc(origStmt, sessionVariables); if (originLoadDesc == null) { originLoadDesc = new RoutineLoadDesc(); } if (routineLoadDesc.getColumnSeparator() != null) { originLoadDesc.setColumnSeparator(routineLoadDesc.getColumnSeparator()); } if (routineLoadDesc.getRowDelimiter() != null) { originLoadDesc.setRowDelimiter(routineLoadDesc.getRowDelimiter()); } if (routineLoadDesc.getColumnsInfo() != null) { originLoadDesc.setColumnsInfo(routineLoadDesc.getColumnsInfo()); } if (routineLoadDesc.getWherePredicate() != null) { originLoadDesc.setWherePredicate(routineLoadDesc.getWherePredicate()); } if (routineLoadDesc.getPartitionNames() != null) { originLoadDesc.setPartitionNames(routineLoadDesc.getPartitionNames()); } String tableName = null; try { tableName = getTableName(); } catch (Exception e) { LOG.warn("get table name failed", e); tableName = "unknown"; } // we use sql to persist the load properties, so we just put the load properties to sql. String sql = String.format("CREATE ROUTINE LOAD %s ON %s %s" + " PROPERTIES (\"desired_concurrent_number\"=\"1\")" + " FROM KAFKA (\"kafka_topic\" = \"my_topic\")", name, tableName, originLoadDesc.toSql()); LOG.debug("merge result: {}", sql); origStmt = new OriginStatement(sql, 0); }
@Test public void testMergeLoadDescToOriginStatement() throws Exception { KafkaRoutineLoadJob routineLoadJob = new KafkaRoutineLoadJob(1L, "job", 2L, 3L, "192.168.1.2:10000", "topic"); String originStmt = "CREATE ROUTINE LOAD job ON unknown " + "PROPERTIES (\"desired_concurrent_number\"=\"1\") " + "FROM KAFKA (\"kafka_topic\" = \"my_topic\")"; routineLoadJob.setOrigStmt(new OriginStatement(originStmt, 0)); // alter columns terminator RoutineLoadDesc loadDesc = CreateRoutineLoadStmt.getLoadDesc(new OriginStatement( "ALTER ROUTINE LOAD FOR job " + "COLUMNS TERMINATED BY ';'", 0), null); routineLoadJob.mergeLoadDescToOriginStatement(loadDesc); Assert.assertEquals("CREATE ROUTINE LOAD job ON unknown " + "COLUMNS TERMINATED BY ';' " + "PROPERTIES (\"desired_concurrent_number\"=\"1\") " + "FROM KAFKA (\"kafka_topic\" = \"my_topic\")", routineLoadJob.getOrigStmt().originStmt); // alter rows terminator loadDesc = CreateRoutineLoadStmt.getLoadDesc(new OriginStatement( "ALTER ROUTINE LOAD FOR job " + "ROWS TERMINATED BY '\n'", 0), null); routineLoadJob.mergeLoadDescToOriginStatement(loadDesc); Assert.assertEquals("CREATE ROUTINE LOAD job ON unknown " + "COLUMNS TERMINATED BY ';', " + "ROWS TERMINATED BY '\n' " + "PROPERTIES (\"desired_concurrent_number\"=\"1\") " + "FROM KAFKA (\"kafka_topic\" = \"my_topic\")", routineLoadJob.getOrigStmt().originStmt); // alter columns loadDesc = CreateRoutineLoadStmt.getLoadDesc(new OriginStatement( "ALTER ROUTINE LOAD FOR job " + "COLUMNS(`a`, `b`, `c`=1)", 0), null); routineLoadJob.mergeLoadDescToOriginStatement(loadDesc); Assert.assertEquals("CREATE ROUTINE LOAD job ON unknown " + "COLUMNS TERMINATED BY ';', " + "ROWS TERMINATED BY '\n', " + "COLUMNS(`a`, `b`, `c` = 1) " + "PROPERTIES (\"desired_concurrent_number\"=\"1\") " + "FROM KAFKA (\"kafka_topic\" = \"my_topic\")", routineLoadJob.getOrigStmt().originStmt); // alter partition loadDesc = CreateRoutineLoadStmt.getLoadDesc(new OriginStatement( "ALTER ROUTINE LOAD FOR job " + "TEMPORARY PARTITION(`p1`, `p2`)", 0), null); routineLoadJob.mergeLoadDescToOriginStatement(loadDesc); Assert.assertEquals("CREATE ROUTINE LOAD job ON unknown " + "COLUMNS TERMINATED BY ';', " + "ROWS TERMINATED BY '\n', " + "COLUMNS(`a`, `b`, `c` = 1), " + "TEMPORARY PARTITION(`p1`, `p2`) " + "PROPERTIES (\"desired_concurrent_number\"=\"1\") " + "FROM KAFKA (\"kafka_topic\" = \"my_topic\")", routineLoadJob.getOrigStmt().originStmt); // alter where loadDesc = CreateRoutineLoadStmt.getLoadDesc(new OriginStatement( "ALTER ROUTINE LOAD FOR job " + "WHERE a = 1", 0), null); routineLoadJob.mergeLoadDescToOriginStatement(loadDesc); Assert.assertEquals("CREATE ROUTINE LOAD job ON unknown " + "COLUMNS TERMINATED BY ';', " + "ROWS TERMINATED BY '\n', " + "COLUMNS(`a`, `b`, `c` = 1), " + "TEMPORARY PARTITION(`p1`, `p2`), " + "WHERE `a` = 1 " + "PROPERTIES (\"desired_concurrent_number\"=\"1\") " + "FROM KAFKA (\"kafka_topic\" = \"my_topic\")", routineLoadJob.getOrigStmt().originStmt); // alter columns terminator again loadDesc = CreateRoutineLoadStmt.getLoadDesc(new OriginStatement( "ALTER ROUTINE LOAD FOR job " + "COLUMNS TERMINATED BY '\t'", 0), null); routineLoadJob.mergeLoadDescToOriginStatement(loadDesc); Assert.assertEquals("CREATE ROUTINE LOAD job ON unknown " + "COLUMNS TERMINATED BY '\t', " + "ROWS TERMINATED BY '\n', " + "COLUMNS(`a`, `b`, `c` = 1), " + "TEMPORARY PARTITION(`p1`, `p2`), " + "WHERE `a` = 1 " + "PROPERTIES (\"desired_concurrent_number\"=\"1\") " + "FROM KAFKA (\"kafka_topic\" = \"my_topic\")", routineLoadJob.getOrigStmt().originStmt); // alter rows terminator again loadDesc = CreateRoutineLoadStmt.getLoadDesc(new OriginStatement( "ALTER ROUTINE LOAD FOR job " + "ROWS TERMINATED BY 'a'", 0), null); routineLoadJob.mergeLoadDescToOriginStatement(loadDesc); Assert.assertEquals("CREATE ROUTINE LOAD job ON unknown " + "COLUMNS TERMINATED BY '\t', " + "ROWS TERMINATED BY 'a', " + "COLUMNS(`a`, `b`, `c` = 1), " + "TEMPORARY PARTITION(`p1`, `p2`), " + "WHERE `a` = 1 " + "PROPERTIES (\"desired_concurrent_number\"=\"1\") " + "FROM KAFKA (\"kafka_topic\" = \"my_topic\")", routineLoadJob.getOrigStmt().originStmt); // alter columns again loadDesc = CreateRoutineLoadStmt.getLoadDesc(new OriginStatement( "ALTER ROUTINE LOAD FOR job " + "COLUMNS(`a`)", 0), null); routineLoadJob.mergeLoadDescToOriginStatement(loadDesc); Assert.assertEquals("CREATE ROUTINE LOAD job ON unknown " + "COLUMNS TERMINATED BY '\t', " + "ROWS TERMINATED BY 'a', " + "COLUMNS(`a`), " + "TEMPORARY PARTITION(`p1`, `p2`), " + "WHERE `a` = 1 " + "PROPERTIES (\"desired_concurrent_number\"=\"1\") " + "FROM KAFKA (\"kafka_topic\" = \"my_topic\")", routineLoadJob.getOrigStmt().originStmt); // alter partition again loadDesc = CreateRoutineLoadStmt.getLoadDesc(new OriginStatement( "ALTER ROUTINE LOAD FOR job " + " PARTITION(`p1`, `p2`)", 0), null); routineLoadJob.mergeLoadDescToOriginStatement(loadDesc); Assert.assertEquals("CREATE ROUTINE LOAD job ON unknown " + "COLUMNS TERMINATED BY '\t', " + "ROWS TERMINATED BY 'a', " + "COLUMNS(`a`), " + "PARTITION(`p1`, `p2`), " + "WHERE `a` = 1 " + "PROPERTIES (\"desired_concurrent_number\"=\"1\") " + "FROM KAFKA (\"kafka_topic\" = \"my_topic\")", routineLoadJob.getOrigStmt().originStmt); // alter where again loadDesc = CreateRoutineLoadStmt.getLoadDesc(new OriginStatement( "ALTER ROUTINE LOAD FOR job " + "WHERE a = 5", 0), null); routineLoadJob.mergeLoadDescToOriginStatement(loadDesc); Assert.assertEquals("CREATE ROUTINE LOAD job ON unknown " + "COLUMNS TERMINATED BY '\t', " + "ROWS TERMINATED BY 'a', " + "COLUMNS(`a`), " + "PARTITION(`p1`, `p2`), " + "WHERE `a` = 5 " + "PROPERTIES (\"desired_concurrent_number\"=\"1\") " + "FROM KAFKA (\"kafka_topic\" = \"my_topic\")", routineLoadJob.getOrigStmt().originStmt); // alter where again loadDesc = CreateRoutineLoadStmt.getLoadDesc(new OriginStatement( "ALTER ROUTINE LOAD FOR job " + "WHERE a = 5 and b like 'c1%' and c between 1 and 100 and substring(d,1,5) = 'cefd' ", 0), null); routineLoadJob.mergeLoadDescToOriginStatement(loadDesc); Assert.assertEquals("CREATE ROUTINE LOAD job ON unknown " + "COLUMNS TERMINATED BY '\t', " + "ROWS TERMINATED BY 'a', " + "COLUMNS(`a`), " + "PARTITION(`p1`, `p2`), " + "WHERE (((`a` = 5) " + "AND (`b` LIKE 'c1%')) " + "AND (`c` BETWEEN 1 AND 100)) " + "AND (substring(`d`, 1, 5) = 'cefd') " + "PROPERTIES (\"desired_concurrent_number\"=\"1\") " + "FROM KAFKA (\"kafka_topic\" = \"my_topic\")", routineLoadJob.getOrigStmt().originStmt); }
@Override public void execute(Runnable task) { execute0(task); }
@Test @Timeout(value = 5000, unit = TimeUnit.MILLISECONDS) public void testTakeTaskAlwaysHasTask() throws Exception { //for https://github.com/netty/netty/issues/1614 //add scheduled task TestRunnable t = new TestRunnable(0); final ScheduledFuture<?> f = e.schedule(t, 1500, TimeUnit.MILLISECONDS); //ensure always has at least one task in taskQueue //check if scheduled tasks are triggered e.execute(new Runnable() { @Override public void run() { if (!f.isDone()) { e.execute(this); } } }); f.sync(); assertThat(t.ran.get(), is(true)); }
static ImmutableList<PushImageStep> makeListForManifestList( BuildContext buildContext, ProgressEventDispatcher.Factory progressEventDispatcherFactory, RegistryClient registryClient, ManifestTemplate manifestList, boolean manifestListAlreadyExists) throws IOException { Set<String> tags = buildContext.getAllTargetImageTags(); EventHandlers eventHandlers = buildContext.getEventHandlers(); try (TimerEventDispatcher ignored = new TimerEventDispatcher(eventHandlers, "Preparing manifest list pushers"); ProgressEventDispatcher progressEventDispatcher = progressEventDispatcherFactory.create("launching manifest list pushers", tags.size())) { boolean singlePlatform = buildContext.getContainerConfiguration().getPlatforms().size() == 1; if (singlePlatform) { return ImmutableList.of(); // single image; no need to push a manifest list } if (JibSystemProperties.skipExistingImages() && manifestListAlreadyExists) { eventHandlers.dispatch(LogEvent.info("Skipping pushing manifest list; already exists.")); return ImmutableList.of(); } DescriptorDigest manifestListDigest = Digests.computeJsonDigest(manifestList); return tags.stream() .map( tag -> new PushImageStep( buildContext, progressEventDispatcher.newChildProducer(), registryClient, manifestList, tag, manifestListDigest, // TODO: a manifest list digest isn't an "image id". Figure out the right // return value and type. manifestListDigest)) .collect(ImmutableList.toImmutableList()); } }
@Test public void testMakeListForManifestList_singlePlatform() throws IOException { when(containerConfig.getPlatforms()) .thenReturn(ImmutableSet.of(new Platform("amd64", "linux"))); List<PushImageStep> pushImageStepList = PushImageStep.makeListForManifestList( buildContext, progressDispatcherFactory, registryClient, manifestList, false); assertThat(pushImageStepList).isEmpty(); }
public Map<String, String> connectorBaseConfig(SourceAndTarget sourceAndTarget, Class<?> connectorClass) { Map<String, String> props = new HashMap<>(); props.putAll(rawProperties); props.keySet().retainAll(allConfigNames()); props.putAll(stringsWithPrefix(CONFIG_PROVIDERS_CONFIG)); props.putAll(stringsWithPrefix("replication.policy")); Map<String, String> sourceClusterProps = clusterProps(sourceAndTarget.source()); // attrs non prefixed with producer|consumer|admin props.putAll(clusterConfigsWithPrefix(SOURCE_CLUSTER_PREFIX, sourceClusterProps)); // attrs prefixed with producer|consumer|admin props.putAll(clientConfigsWithPrefix(SOURCE_PREFIX, sourceClusterProps)); Map<String, String> targetClusterProps = clusterProps(sourceAndTarget.target()); props.putAll(clusterConfigsWithPrefix(TARGET_CLUSTER_PREFIX, targetClusterProps)); props.putAll(clientConfigsWithPrefix(TARGET_PREFIX, targetClusterProps)); props.putIfAbsent(NAME, connectorClass.getSimpleName()); props.putIfAbsent(CONNECTOR_CLASS, connectorClass.getName()); props.putIfAbsent(SOURCE_CLUSTER_ALIAS, sourceAndTarget.source()); props.putIfAbsent(TARGET_CLUSTER_ALIAS, sourceAndTarget.target()); // override with connector-level properties props.putAll(stringsWithPrefixStripped(sourceAndTarget.source() + "->" + sourceAndTarget.target() + ".")); // disabled by default props.putIfAbsent(MirrorConnectorConfig.ENABLED, "false"); // don't transform -- the worker will handle transformation of Connector and Task configs return props; }
@Test public void testLazyConfigResolution() { MirrorMakerConfig mirrorConfig = new MirrorMakerConfig(makeProps( "clusters", "a, b", "config.providers", "fake", "config.providers.fake.class", FakeConfigProvider.class.getName(), "replication.policy.separator", "__", "offset.storage.replication.factor", "123", "b.status.storage.replication.factor", "456", "b.producer.client.id", "client-one", "b.security.protocol", "PLAINTEXT", "b.producer.security.protocol", "SASL", "ssl.truststore.password", "secret1", "ssl.key.password", "${fake:secret:password}", // should not be resolved "b.xxx", "yyy", "b->a.topics", "${fake:secret:password}")); // should not be resolved SourceAndTarget a = new SourceAndTarget("b", "a"); Map<String, String> props = mirrorConfig.connectorBaseConfig(a, MirrorSourceConnector.class); assertEquals("${fake:secret:password}", props.get("ssl.key.password"), "connector properties should not be transformed"); assertEquals("${fake:secret:password}", props.get("topics"), "connector properties should not be transformed"); }
public void selectInputStreams(Collection<EditLogInputStream> streams, long fromTxnId, boolean inProgressOk) throws IOException { selectInputStreams(streams, fromTxnId, inProgressOk, false); }
@Test public void testSelectInputStreamsNotOnBoundary() throws Exception { final int txIdsPerSegment = 10; for (int txid = 1; txid <= 5 * txIdsPerSegment; txid += txIdsPerSegment) { writeSegment(cluster, qjm, txid, txIdsPerSegment, true); } File curDir = cluster.getCurrentDir(0, JID); GenericTestUtils.assertGlobEquals(curDir, "edits_.*", NNStorage.getFinalizedEditsFileName(1, 10), NNStorage.getFinalizedEditsFileName(11, 20), NNStorage.getFinalizedEditsFileName(21, 30), NNStorage.getFinalizedEditsFileName(31, 40), NNStorage.getFinalizedEditsFileName(41, 50)); ArrayList<EditLogInputStream> streams = new ArrayList<EditLogInputStream>(); qjm.selectInputStreams(streams, 25, false); verifyEdits(streams, 25, 50); }
@Override public void createOrUpdate(final String path, final Object data) { zkClient.createOrUpdate(path, data, CreateMode.PERSISTENT); }
@Test public void testOnPluginChangedUpdate() { PluginData pluginData = PluginData.builder().id(MOCK_ID).name(MOCK_NAME).config(MOCK_CONFIG).build(); String pluginPath = DefaultPathConstants.buildPluginPath(pluginData.getName()); zookeeperDataChangedListener.onPluginChanged(ImmutableList.of(pluginData), DataEventTypeEnum.UPDATE); verify(zkClient, times(1)).createOrUpdate(pluginPath, pluginData, CreateMode.PERSISTENT); }
@Override public MeterCellId allocateMeterId(DeviceId deviceId, MeterScope meterScope) { if (userDefinedIndexMode) { log.warn("Unable to allocate meter id when user defined index mode is enabled"); return null; } MeterTableKey meterTableKey = MeterTableKey.key(deviceId, meterScope); MeterCellId meterCellId; long id; // First, search for reusable key meterCellId = firstReusableMeterId(meterTableKey); if (meterCellId != null) { return meterCellId; } // If there was no reusable meter id we have to generate a new value // using start and end index as lower and upper bound respectively. long startIndex = getStartIndex(meterTableKey); long endIndex = getEndIndex(meterTableKey); // If the device does not give us MeterFeatures fallback to queryMeters if (startIndex == -1L || endIndex == -1L) { // Only meaningful for OpenFlow today long maxMeters = queryMaxMeters(deviceId); if (maxMeters == 0L) { return null; } else { // OpenFlow meter index starts from 1, ends with max startIndex = 1L; endIndex = maxMeters; } } do { id = meterIdGenerators.getAndIncrement(meterTableKey); } while (id < startIndex); if (id > endIndex) { return null; } // For backward compatibility if we are using global scope, // return a MeterId, otherwise we create a PiMeterCellId if (meterScope.isGlobal()) { return MeterId.meterId(id); } else { return PiMeterCellId.ofIndirect(PiMeterId.of(meterScope.id()), id); } }
@Test public void testAllocateId() { initMeterStore(false); assertThat(mid1, is(meterStore.allocateMeterId(did1, MeterScope.globalScope()))); assertThat(mid2, is(meterStore.allocateMeterId(did1, MeterScope.globalScope()))); }
public static <T> List<T> getBeans(BeanFactory beanFactory, Class<T> requiredType) { if (!(beanFactory instanceof ListableBeanFactory)) { throw new RuntimeException("bean factory not support get list bean. factory type = " + beanFactory.getClass() .getName()); } Map<String, T> beanMap = beansOfTypeIncludingAncestors((ListableBeanFactory) beanFactory, requiredType); return new ArrayList<>(beanMap.values()); }
@Test public void testGetBeansIncludingAncestors() { DefaultListableBeanFactory parentBeanFactory = new DefaultListableBeanFactory(); parentBeanFactory.registerBeanDefinition("foo", new RootBeanDefinition(Foo.class)); DefaultListableBeanFactory childBeanFactory = new DefaultListableBeanFactory(parentBeanFactory); assertThat(childBeanFactory.getBeansOfType(Foo.class)).isEmpty(); assertThat(BeanFactoryUtils.getBeans(childBeanFactory, Foo.class).size()).isEqualTo(1); assertThat(BeanFactoryUtils.getBeans(childBeanFactory, Bar.class)).isEmpty(); MockBeanFactory mockBeanFactory = new MockBeanFactory(); assertThatThrownBy(() -> BeanFactoryUtils.getBeans(mockBeanFactory, Bar.class)) .isExactlyInstanceOf(RuntimeException.class) .hasMessageContaining("bean factory not support get list bean."); }
static Properties readProps(List<String> producerProps, String producerConfig) throws IOException { Properties props = new Properties(); if (producerConfig != null) { props.putAll(Utils.loadProps(producerConfig)); } if (producerProps != null) for (String prop : producerProps) { String[] pieces = prop.split("="); if (pieces.length != 2) throw new IllegalArgumentException("Invalid property: " + prop); props.put(pieces[0], pieces[1]); } props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer"); props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer"); if (props.getProperty(ProducerConfig.CLIENT_ID_CONFIG) == null) { props.put(ProducerConfig.CLIENT_ID_CONFIG, "perf-producer-client"); } return props; }
@Test public void testDefaultClientId() throws Exception { List<String> producerProps = Collections.singletonList("acks=1"); Properties prop = ProducerPerformance.readProps(producerProps, null); assertNotNull(prop); assertEquals("perf-producer-client", prop.getProperty("client.id")); }
@Override public Num getValue(int index) { return values.get(index); }
@Test public void cashFlowSell() { BarSeries sampleBarSeries = new MockBarSeries(numFunction, 1, 2, 4, 8, 16, 32); TradingRecord tradingRecord = new BaseTradingRecord(Trade.sellAt(2, sampleBarSeries), Trade.buyAt(3, sampleBarSeries)); CashFlow cashFlow = new CashFlow(sampleBarSeries, tradingRecord); assertNumEquals(1, cashFlow.getValue(0)); assertNumEquals(1, cashFlow.getValue(1)); assertNumEquals(1, cashFlow.getValue(2)); assertNumEquals(0, cashFlow.getValue(3)); assertNumEquals(0, cashFlow.getValue(4)); assertNumEquals(0, cashFlow.getValue(5)); }
@Override public Num calculate(BarSeries series, Position position) { if (position.isClosed()) { final int exitIndex = position.getExit().getIndex(); final int entryIndex = position.getEntry().getIndex(); return series.numOf(exitIndex - entryIndex + 1); } return series.zero(); }
@Test public void calculateWithOnePosition() { MockBarSeries series = new MockBarSeries(numFunction, 100, 95, 100, 80, 85, 70); Position t = new Position(Trade.buyAt(2, series), Trade.sellAt(5, series)); AnalysisCriterion numberOfBars = getCriterion(); assertNumEquals(4, numberOfBars.calculate(series, t)); }
@Override public Serializable read(final MySQLBinlogColumnDef columnDef, final MySQLPacketPayload payload) { long datetime = readDatetimeV2FromPayload(payload); return 0L == datetime ? MySQLTimeValueUtils.DATETIME_OF_ZERO : readDatetime(columnDef, datetime, payload); }
@Test void assertReadNullTime() { assertThat(new MySQLDatetime2BinlogProtocolValue().read(columnDef, payload), is(MySQLTimeValueUtils.DATETIME_OF_ZERO)); }
public static int compose(final int major, final int minor, final int patch) { if (major < 0 || major > 255) { throw new IllegalArgumentException("major must be 0-255: " + major); } if (minor < 0 || minor > 255) { throw new IllegalArgumentException("minor must be 0-255: " + minor); } if (patch < 0 || patch > 255) { throw new IllegalArgumentException("patch must be 0-255: " + patch); } if (major + minor + patch == 0) { throw new IllegalArgumentException("all parts cannot be zero"); } return (major << 16) | (minor << 8) | patch; }
@Test void shouldDetectExcessiveMajor() { assertThrows(IllegalArgumentException.class, () -> SemanticVersion.compose(256, 1, 1)); }
public static Optional<PfxOptions> getPfxKeyStoreOptions(final Map<String, String> props) { // PFX key stores do not have a Private key password final String location = getKeyStoreLocation(props); final String password = getKeyStorePassword(props); if (!Strings.isNullOrEmpty(location)) { return Optional.of(buildPfxOptions(location, password)); } return Optional.empty(); }
@Test public void shouldBuildKeyStorePfxOptionsWithPathAndPassword() { // When final Optional<PfxOptions> pfxOptions = VertxSslOptionsFactory.getPfxKeyStoreOptions( ImmutableMap.of( SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, "path", SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, "password" ) ); // Then assertThat(pfxOptions.get().getPath(), is("path")); assertThat(pfxOptions.get().getPassword(), is("password")); }
<T extends PipelineOptions> T as(Class<T> iface) { checkNotNull(iface); checkArgument(iface.isInterface(), "Not an interface: %s", iface); T existingOption = computedProperties.interfaceToProxyCache.getInstance(iface); if (existingOption == null) { synchronized (this) { // double check existingOption = computedProperties.interfaceToProxyCache.getInstance(iface); if (existingOption == null) { Registration<T> registration = PipelineOptionsFactory.CACHE .get() .validateWellFormed(iface, computedProperties.knownInterfaces); List<PropertyDescriptor> propertyDescriptors = registration.getPropertyDescriptors(); Class<T> proxyClass = registration.getProxyClass(); existingOption = InstanceBuilder.ofType(proxyClass) .fromClass(proxyClass) .withArg(InvocationHandler.class, this) .build(); computedProperties = computedProperties.updated(iface, existingOption, propertyDescriptors); } } } return existingOption; }
@Test public void testJsonConversionOfSerializableWithMetadataProperty() throws Exception { SerializableWithMetadataProperty options = PipelineOptionsFactory.as(SerializableWithMetadataProperty.class); options.setValue(new SerializableWithMetadata("TestString")); SerializableWithMetadataProperty options2 = serializeDeserialize(SerializableWithMetadataProperty.class, options); assertEquals("TestString", options2.getValue().getValue()); }
public boolean isLocallyPresent(final UUID accountUuid, final byte deviceId) { return displacementListenersByPresenceKey.containsKey(getPresenceKey(accountUuid, deviceId)); }
@Test void testIsLocallyPresent() { final UUID accountUuid = UUID.randomUUID(); final byte deviceId = 1; assertFalse(clientPresenceManager.isLocallyPresent(accountUuid, deviceId)); clientPresenceManager.setPresent(accountUuid, deviceId, NO_OP); REDIS_CLUSTER_EXTENSION.getRedisCluster().useCluster(connection -> connection.sync().flushall()); assertTrue(clientPresenceManager.isLocallyPresent(accountUuid, deviceId)); }
@Override public MapperResult findConfigInfoLike4PageFetchRows(MapperContext context) { final String appName = (String) context.getWhereParameter(FieldConstant.APP_NAME); final String dataId = (String) context.getWhereParameter(FieldConstant.DATA_ID); final String group = (String) context.getWhereParameter(FieldConstant.GROUP_ID); final String content = (String) context.getWhereParameter(FieldConstant.CONTENT); final String tenantId = (String) context.getWhereParameter(FieldConstant.TENANT_ID); final String[] tagArr = (String[]) context.getWhereParameter(FieldConstant.TAG_ARR); final String[] types = (String[]) context.getWhereParameter(FieldConstant.TYPE); WhereBuilder where = new WhereBuilder( "SELECT a.ID,a.data_id,a.group_id,a.tenant_id,a.app_name,a.content,a.type FROM config_info a LEFT JOIN " + "config_tags_relation b ON a.id=b.id"); where.like("a.tenant_id", tenantId); if (StringUtils.isNotBlank(dataId)) { where.and().like("a.data_id", dataId); } if (StringUtils.isNotBlank(group)) { where.and().like("a.group_id", group); } if (StringUtils.isNotBlank(appName)) { where.and().eq("a.app_name", appName); } if (StringUtils.isNotBlank(content)) { where.and().like("a.content", content); } if (!ArrayUtils.isEmpty(tagArr)) { where.and().in("b.tag_name", tagArr); } if (!ArrayUtils.isEmpty(types)) { where.and().in("a.type", types); } where.offset(context.getStartRow(), context.getPageSize()); return where.build(); }
@Test void tsetFindConfigInfoLike4PageFetchRows() { MapperResult mapperResult = configInfoTagsRelationMapperByDerby.findConfigInfoLike4PageFetchRows(context); assertEquals(mapperResult.getSql(), "SELECT a.ID,a.data_id,a.group_id,a.tenant_id,a.app_name,a.content,a.type FROM config_info a " + "LEFT JOIN config_tags_relation b ON a.id=b.id WHERE a.tenant_id LIKE ? AND b.tag_name " + "IN (?, ?, ?, ?, ?) OFFSET 0 ROWS FETCH NEXT 5 ROWS ONLY"); List<Object> list = CollectionUtils.list(tenantId); list.addAll(Arrays.asList(tagArr)); assertArrayEquals(mapperResult.getParamList().toArray(), list.toArray()); }
@Override public Closeable enter() { // Only update status from tracked thread to avoid race condition and inconsistent state updates if (executionContext.getExecutionStateTracker().getTrackedThread() != Thread.currentThread()) { return () -> {}; } updateCurrentStateIfOutdated(); return executionContext.getExecutionStateTracker().enterState(currentExecutionState); }
@Test public void testEnterDoesntEnterStateIfCalledFromDifferentThread() { DataflowExecutionContext mockedExecutionContext = mock(DataflowExecutionContext.class); DataflowOperationContext mockedOperationContext = mock(DataflowOperationContext.class); final int siIndexId = 3; ExecutionStateTracker mockedExecutionStateTracker = mock(ExecutionStateTracker.class); when(mockedExecutionContext.getExecutionStateTracker()).thenReturn(mockedExecutionStateTracker); Thread mockedThreadObject = mock(Thread.class); when(mockedExecutionStateTracker.getTrackedThread()).thenReturn(mockedThreadObject); DataflowExecutionState mockedExecutionState = mock(DataflowExecutionState.class); when(mockedExecutionStateTracker.getCurrentState()).thenReturn(mockedExecutionState); NameContext mockedNameContext = mock(NameContext.class); when(mockedExecutionState.getStepName()).thenReturn(mockedNameContext); when(mockedNameContext.originalName()).thenReturn("DummyName"); NameContext mockedDeclaringNameContext = mock(NameContext.class); when(mockedOperationContext.nameContext()).thenReturn(mockedDeclaringNameContext); when(mockedDeclaringNameContext.originalName()).thenReturn("DummyDeclaringName"); CounterFactory mockedCounterFactory = mock(CounterFactory.class); when(mockedExecutionContext.getCounterFactory()).thenReturn(mockedCounterFactory); Counter<Long, Long> mockedCounter = mock(Counter.class); when(mockedCounterFactory.longSum(any())).thenReturn(mockedCounter); DataflowExecutionStateRegistry mockedExecutionStateRegistry = mock(DataflowExecutionStateRegistry.class); when(mockedExecutionContext.getExecutionStateRegistry()) .thenReturn(mockedExecutionStateRegistry); DataflowExecutionState mockedCounterExecutionState = mock(DataflowExecutionState.class); when(mockedExecutionStateRegistry.getIOState(any(), any(), any(), any(), any(), any())) .thenReturn(mockedCounterExecutionState); DataflowSideInputReadCounter testObject = new DataflowSideInputReadCounter(mockedExecutionContext, mockedOperationContext, siIndexId); testObject.enter(); verify(mockedExecutionStateTracker, never()).enterState(any()); }
public void setAreaRate(float areaRate) { this.areaRate = areaRate; }
@Test public void setAreaRate() { SAExposureConfig saExposureConfig = new SAExposureConfig(1,1,true); saExposureConfig.setAreaRate(2); assertEquals(2, saExposureConfig.getAreaRate(), 0.5); }
@Override public List<Container> allocateContainers(ResourceBlacklistRequest blackList, List<ResourceRequest> oppResourceReqs, ApplicationAttemptId applicationAttemptId, OpportunisticContainerContext opportContext, long rmIdentifier, String appSubmitter) throws YarnException { // Update black list. updateBlacklist(blackList, opportContext); // Add OPPORTUNISTIC requests to the outstanding ones. opportContext.addToOutstandingReqs(oppResourceReqs); Set<String> nodeBlackList = new HashSet<>(opportContext.getBlacklist()); Set<String> allocatedNodes = new HashSet<>(); List<Container> allocatedContainers = new ArrayList<>(); // Satisfy the outstanding OPPORTUNISTIC requests. boolean continueLoop = true; while (continueLoop) { continueLoop = false; List<Map<Resource, List<Allocation>>> allocations = new ArrayList<>(); for (SchedulerRequestKey schedulerKey : opportContext.getOutstandingOpReqs().descendingKeySet()) { // Allocated containers : // Key = Requested Capability, // Value = List of Containers of given cap (the actual container size // might be different than what is requested, which is why // we need the requested capability (key) to match against // the outstanding reqs) int remAllocs = -1; int maxAllocationsPerAMHeartbeat = getMaxAllocationsPerAMHeartbeat(); if (maxAllocationsPerAMHeartbeat > 0) { remAllocs = maxAllocationsPerAMHeartbeat - allocatedContainers.size() - getTotalAllocations(allocations); if (remAllocs <= 0) { LOG.info("Not allocating more containers as we have reached max " + "allocations per AM heartbeat {}", maxAllocationsPerAMHeartbeat); break; } } Map<Resource, List<Allocation>> allocation = allocate( rmIdentifier, opportContext, schedulerKey, applicationAttemptId, appSubmitter, nodeBlackList, allocatedNodes, remAllocs); if (allocation.size() > 0) { allocations.add(allocation); continueLoop = true; } } matchAllocation(allocations, allocatedContainers, opportContext); } return allocatedContainers; }
@Test public void testLotsOfContainersRackLocalAllocation() throws Exception { ResourceBlacklistRequest blacklistRequest = ResourceBlacklistRequest.newInstance( new ArrayList<>(), new ArrayList<>()); List<ResourceRequest> reqs = new ArrayList<>(); for (int i = 0; i < 100; i++) { reqs.add(ResourceRequest.newBuilder().allocationRequestId(i + 1) .priority(PRIORITY_NORMAL) .resourceName("*") .capability(CAPABILITY_1GB) .relaxLocality(true) .executionType(ExecutionType.OPPORTUNISTIC).build()); reqs.add(ResourceRequest.newBuilder().allocationRequestId(i + 1) .priority(PRIORITY_NORMAL) .resourceName("h1") .capability(CAPABILITY_1GB) .relaxLocality(true) .executionType(ExecutionType.OPPORTUNISTIC).build()); reqs.add(ResourceRequest.newBuilder().allocationRequestId(i + 1) .priority(PRIORITY_NORMAL) .resourceName("/r1") .capability(CAPABILITY_1GB) .relaxLocality(true) .executionType(ExecutionType.OPPORTUNISTIC).build()); } ApplicationAttemptId appAttId = ApplicationAttemptId.newInstance( ApplicationId.newInstance(0L, 1), 1); oppCntxt.updateNodeList( Arrays.asList( RemoteNode.newInstance( NodeId.newInstance("h3", 1234), "h3:1234", "/r2"), RemoteNode.newInstance( NodeId.newInstance("h2", 1234), "h2:1234", "/r1"), RemoteNode.newInstance( NodeId.newInstance("h5", 1234), "h5:1234", "/r1"), RemoteNode.newInstance( NodeId.newInstance("h4", 1234), "h4:1234", "/r2"))); List<Container> containers = new ArrayList<>(); for (int i = 0; i < 25; i++) { containers.addAll(allocator.allocateContainers( blacklistRequest, reqs, appAttId, oppCntxt, 1L, "luser")); } Assert.assertEquals(100, containers.size()); }
public static IntStream allLinesFor(DefaultIssue issue, String componentUuid) { DbIssues.Locations locations = issue.getLocations(); if (locations == null) { return IntStream.empty(); } Stream<DbCommons.TextRange> textRanges = Stream.concat( locations.hasTextRange() ? Stream.of(locations.getTextRange()) : Stream.empty(), locations.getFlowList().stream() .flatMap(f -> f.getLocationList().stream()) .filter(l -> Objects.equals(componentIdOf(issue, l), componentUuid)) .map(DbIssues.Location::getTextRange)); return textRanges.flatMapToInt(range -> IntStream.rangeClosed(range.getStartLine(), range.getEndLine())); }
@Test public void allLinesFor_default_component_of_location_is_the_issue_component() { DbIssues.Locations.Builder locations = DbIssues.Locations.newBuilder(); locations.addFlowBuilder() .addLocation(newLocation("", 5, 5)) .addLocation(newLocation(null, 7, 7)) .addLocation(newLocation("file2", 9, 9)) .build(); DefaultIssue issue = new DefaultIssue() .setComponentUuid("file1") .setLocations(locations.build()); assertThat(IssueLocations.allLinesFor(issue, "file1")).containsExactlyInAnyOrder(5, 7); assertThat(IssueLocations.allLinesFor(issue, "file2")).containsExactlyInAnyOrder(9); assertThat(IssueLocations.allLinesFor(issue, "file3")).isEmpty(); }
public void addDatabase(final String databaseName, final DatabaseType protocolType, final ConfigurationProperties props) { ShardingSphereDatabase database = ShardingSphereDatabase.create(databaseName, protocolType, props); databases.put(database.getName(), database); globalRuleMetaData.getRules().forEach(each -> ((GlobalRule) each).refresh(databases, GlobalRuleChangedType.DATABASE_CHANGED)); }
@Test void assertAddDatabase() { GlobalRule globalRule = mock(GlobalRule.class); ShardingSphereDatabase database = mockDatabase(mock(ResourceMetaData.class, RETURNS_DEEP_STUBS), new MockedDataSource(), globalRule); DatabaseType databaseType = mock(DatabaseType.class); ConfigurationProperties configProps = new ConfigurationProperties(new Properties()); when(ShardingSphereDatabase.create("foo_db", databaseType, configProps)).thenReturn(database); Map<String, ShardingSphereDatabase> databases = new HashMap<>(Collections.singletonMap("foo_db", database)); ShardingSphereMetaData metaData = new ShardingSphereMetaData(databases, mock(ResourceMetaData.class), new RuleMetaData(Collections.singleton(globalRule)), configProps); metaData.addDatabase("foo_db", databaseType, configProps); assertThat(metaData.getDatabases(), is(databases)); verify(globalRule).refresh(databases, GlobalRuleChangedType.DATABASE_CHANGED); }
public TreeCache start() throws Exception { Preconditions.checkState(treeState.compareAndSet(TreeState.LATENT, TreeState.STARTED), "already started"); if (createParentNodes) { client.createContainers(root.path); } client.getConnectionStateListenable().addListener(connectionStateListener); if (client.getZookeeperClient().isConnected()) { root.wasCreated(); } return this; }
@Test public void testDeleteThenCreate() throws Exception { client.create().forPath("/test"); client.create().forPath("/test/foo", "one".getBytes()); cache = newTreeCacheWithListeners(client, "/test"); cache.start(); assertEvent(TreeCacheEvent.Type.NODE_ADDED, "/test"); assertEvent(TreeCacheEvent.Type.NODE_ADDED, "/test/foo"); assertEvent(TreeCacheEvent.Type.INITIALIZED); client.delete().forPath("/test/foo"); assertEvent(TreeCacheEvent.Type.NODE_REMOVED, "/test/foo", "one".getBytes()); client.create().forPath("/test/foo", "two".getBytes()); assertEvent(TreeCacheEvent.Type.NODE_ADDED, "/test/foo"); client.delete().forPath("/test/foo"); assertEvent(TreeCacheEvent.Type.NODE_REMOVED, "/test/foo", "two".getBytes()); client.create().forPath("/test/foo", "two".getBytes()); assertEvent(TreeCacheEvent.Type.NODE_ADDED, "/test/foo"); assertNoMoreEvents(); }
@Override public int getTotalNumberOfRecords(Configuration conf) throws HiveJdbcDatabaseAccessException { Connection conn = null; PreparedStatement ps = null; ResultSet rs = null; try { initializeDatabaseConnection(conf); String tableName = getQualifiedTableName(conf); // Always use JDBC_QUERY if available both for correctness and performance. JDBC_QUERY can be set by the user // or the CBO including pushdown optimizations. SELECT all query should be used only when JDBC_QUERY is null. String sql = firstNonNull(conf.get(Constants.JDBC_QUERY), selectAllFromTable(tableName)); String countQuery = "SELECT COUNT(*) FROM (" + sql + ") tmptable"; LOGGER.info("Query to execute is [{}]", countQuery); conn = dbcpDataSource.getConnection(); ps = conn.prepareStatement(countQuery); rs = ps.executeQuery(); if (rs.next()) { return rs.getInt(1); } else { LOGGER.warn("The count query {} did not return any results.", countQuery); throw new HiveJdbcDatabaseAccessException("Count query did not return any results."); } } catch (HiveJdbcDatabaseAccessException he) { throw he; } catch (Exception e) { LOGGER.error("Caught exception while trying to get the number of records: " + e.getMessage(), e); throw new HiveJdbcDatabaseAccessException(e); } finally { cleanupResources(conn, ps, rs); } }
@Test public void testGetTotalNumberOfRecords_whereClause() throws HiveJdbcDatabaseAccessException { Configuration conf = buildConfiguration(); conf.set(JdbcStorageConfig.QUERY.getPropertyName(), "select * from test_strategy where strategy_id = '5'"); DatabaseAccessor accessor = DatabaseAccessorFactory.getAccessor(conf); int numRecords = accessor.getTotalNumberOfRecords(conf); assertThat(numRecords, is(equalTo(1))); }
public char parseChar(String name) { String property = getProperties().getProperty(name); if (property.length() != 1) { throw new IllegalArgumentException(name + " property is't char"); } return property.charAt(0); }
@Test public void testParseChar() { System.out.println("parseChar"); char expResult; char result; Properties props = new Properties(); props.put("value1", "b"); props.put("value2", "c"); props.put("empty", ""); props.put("str", "abc"); props.put("boolean", "true"); props.put("float", "24.98"); props.put("int", "12"); props.put("char", "a"); PropertyParser instance = new PropertyParser(props); expResult = 'b'; result = instance.parseChar("value1"); assertEquals(expResult, result); expResult = 'c'; result = instance.parseChar("value2"); assertEquals(expResult, result); try { instance.parseChar("empty"); } catch (IllegalArgumentException e) { } try { instance.parseChar("str"); } catch (IllegalArgumentException e) { } try { instance.parseChar("boolean"); } catch (IllegalArgumentException e) { } try { instance.parseChar("float"); } catch (IllegalArgumentException e) { } try { instance.parseChar("int"); } catch (IllegalArgumentException e) { } expResult = 'a'; result = instance.parseChar("char"); assertEquals(expResult, result); try { instance.parseChar("nonexistent"); fail("no exception"); assertEquals(expResult, result); } catch (NullPointerException e) { } }
public long getCompressedLen() { return compressed_len; }
@Test public void testGetCompressedLen() { assertEquals(TestParameters.VP_RES_TBL_COMPR_LENGTH, chmLzxcResetTable.getCompressedLen()); }
@Override public List<Node> sniff(List<Node> nodes) { if (attribute == null || value == null) { return nodes; } return nodes.stream() .filter(node -> nodeMatchesFilter(node, attribute, value)) .collect(Collectors.toList()); }
@Test void returnsNoNodesIfFilterDoesNotMatch() throws Exception { final List<Node> nodes = mockNodes(); final NodesSniffer nodesSniffer = new FilteredElasticsearchNodesSniffer("location", "alaska"); assertThat(nodesSniffer.sniff(nodes)).isEmpty(); }
protected void handle(com.drew.metadata.Metadata metadataExtractor) throws MetadataException { handle(metadataExtractor.getDirectories().iterator()); }
@Test public void testCopyUnknownFieldsHandler() throws MetadataException { Directory d = Mockito.mock(Directory.class); Tag t1 = Mockito.mock(Tag.class); Mockito.when(t1.getTagName()).thenReturn("Image Description"); Mockito.when(t1.getDescription()).thenReturn("t1"); Tag t2 = Mockito.mock(Tag.class); Mockito.when(t2.getTagName()).thenReturn(TikaCoreProperties.SUBJECT.toString()); Mockito.when(t2.getDescription()).thenReturn("known"); Tag t3 = Mockito.mock(Tag.class); Mockito.when(t3.getTagName()).thenReturn(TikaCoreProperties.DESCRIPTION.getName()); Mockito.when(t3.getDescription()).thenReturn("known"); List<Tag> tags = Arrays.asList(t1, t2, t3); Mockito.when(d.getTags()).thenReturn(tags); Metadata metadata = new Metadata(); new ImageMetadataExtractor.CopyUnknownFieldsHandler().handle(d, metadata); assertEquals("t1", metadata.get("Image Description")); assertNull(metadata.get(TikaCoreProperties.SUBJECT), "keywords should be excluded from bulk copy because it is a defined field"); assertNull(metadata.get(TikaCoreProperties.DESCRIPTION)); }
public static Application mergeApplication(Application first, Application second) { if (!first.getName().equals(second.getName())) { throw new IllegalArgumentException("Cannot merge applications with different names"); } Application merged = copyApplication(first); for (InstanceInfo instance : second.getInstances()) { switch (instance.getActionType()) { case ADDED: case MODIFIED: merged.addInstance(instance); break; case DELETED: merged.removeInstance(instance); } } return merged; }
@Test public void testMergeApplicationIfActionTypeDeletedReturnApplication() { Application application = createSingleInstanceApp("foo", "foo", InstanceInfo.ActionType.DELETED); Assert.assertNotEquals(application.getInstances(), EurekaEntityFunctions.mergeApplication( application, application).getInstances()); }
@Override public ExecuteContext after(ExecuteContext context) { // Reactive programming cannot be deleted in the after method, otherwise thread variables cannot be // transparently transmitted to the load balancer thread // Will be deleted in HttpServerHandleInterceptor, ReactiveTypeHandlerInterceptor return context; }
@Test public void testAfter() { ThreadLocalUtils.addRequestTag(Collections.singletonMap("bar", Collections.singletonList("foo"))); Assert.assertNotNull(ThreadLocalUtils.getRequestTag()); // Test the after method without releasing thread variables interceptor.after(context); Assert.assertNotNull(ThreadLocalUtils.getRequestTag()); }
@Override public long getCounter(String key) { ensureStateEnabled(); return defaultStateStore.getCounter(key); }
@Test(expectedExceptions = IllegalStateException.class) public void testGetCounterStateDisabled() { context.getCounter("test-key"); }
@Override public void onKey( int primaryCode, Keyboard.Key key, int multiTapIndex, int[] nearByKeyCodes, boolean fromUI) { mParentListener.listener().onKey(primaryCode, key, multiTapIndex, nearByKeyCodes, fromUI); if ((mInOneShot && primaryCode != KeyCodes.DELETE) || primaryCode == KeyCodes.ENTER) { mKeyboardDismissAction.run(); } }
@Test public void testOnKeyOnEnter() { final AnyKeyboard.AnyKey key = Mockito.mock(AnyKeyboard.AnyKey.class); final int[] nearByKeyCodes = {3}; mUnderTest.onKey(KeyCodes.ENTER, key, 2, nearByKeyCodes, true); final InOrder inOrder = Mockito.inOrder(mMockParentListener, mMockKeyboardDismissAction); inOrder .verify(mMockParentListener) .onKey( Mockito.eq(KeyCodes.ENTER), Mockito.same(key), Mockito.eq(2), Mockito.same(nearByKeyCodes), Mockito.eq(true)); inOrder.verify(mMockKeyboardDismissAction).run(); inOrder.verifyNoMoreInteractions(); }
@Override public String render(String template, StatementContext ctx) { final ExtensionMethod extensionMethod = ctx.getExtensionMethod(); final String originalResult = originalEngine.render(template, ctx); if (extensionMethod == null) { return originalResult; } final StringBuilder query = new StringBuilder(originalResult.length() + 100); query.append("/* "); final String className = extensionMethod.getType().getSimpleName(); if (!className.isEmpty()) { query.append(className).append('.'); } query.append(extensionMethod.getMethod().getName()); query.append(" */ "); query.append(originalResult); return query.toString(); }
@Test void testNoExtensionMethodShouldReturnOriginal() { when(ctx.getExtensionMethod()).thenReturn(null); final String result = sut.render(TEMPLATE, ctx); assertThat(result).isEqualTo(ORIGINAL_RENDERED); }
public String dump() { final ProtocolVersion protocolVersion = AllProtocolVersions.LATEST_PROTOCOL_VERSION; final DataMap pathKeysMap = new DataMap(URIParamUtils.encodePathKeysForUri(_pathKeys, protocolVersion)); final DataMap queryParamsMap = QueryParamsUtil.convertToDataMap(_queryParams, _queryParamClasses, protocolVersion); final ToStringBuilder builder = new ToStringBuilder(null, ToStringStyle.SHORT_PREFIX_STYLE) .append("baseUriTemplate", _baseUriTemplate) .append("pathKeys", Data.dump("", pathKeysMap, "")) .append("id", _id) .append("queryParams", Data.dump("", queryParamsMap, "")); return builder.toString(); }
@Test public void testDump() { final Request<?> request = Mockito.mock(Request.class); Mockito.when(request.getBaseUriTemplate()).thenReturn(BASE_URI_TEMPLATE); Mockito.when(request.getPathKeys()).thenReturn(PATH_KEYS); Mockito.when(request.getQueryParamsObjects()).thenReturn(QUERY_PARAMS_OBJECTS); final RestliRequestUriSignature signature = new RestliRequestUriSignature(request, RestliRequestUriSignature.ALL_FIELDS); final String dump = signature.dump(); Assert.assertNotNull(dump); }
static ViewVersion fromJson(String json) { Preconditions.checkArgument(json != null, "Cannot parse view version from null string"); return JsonUtil.parse(json, ViewVersionParser::fromJson); }
@Test public void testParseViewVersion() { SQLViewRepresentation firstRepresentation = ImmutableSQLViewRepresentation.builder() .sql("select * from foo") .dialect("spark-sql") .build(); SQLViewRepresentation secondRepresentation = ImmutableSQLViewRepresentation.builder() .sql("select a, b, c from foo") .dialect("some-sql") .build(); ViewVersion expectedViewVersion = ImmutableViewVersion.builder() .versionId(1) .timestampMillis(12345) .defaultNamespace(Namespace.of("one", "two")) .addRepresentations(firstRepresentation, secondRepresentation) .summary(ImmutableMap.of("user", "some-user")) .schemaId(1) .build(); String serializedRepresentations = "[{\"type\":\"sql\", \"sql\":\"select * from foo\", \"dialect\":\"spark-sql\"}, " + "{\"type\":\"sql\", \"sql\":\"select a, b, c from foo\", \"dialect\":\"some-sql\"}]"; String serializedViewVersion = String.format( "{\"version-id\":1, \"timestamp-ms\":12345, \"schema-id\":1, \"summary\":{\"user\":\"some-user\"}, \"representations\":%s, \"default-namespace\":[\"one\",\"two\"]}", serializedRepresentations); assertThat(ViewVersionParser.fromJson(serializedViewVersion)) .as("Should be able to parse valid view version") .isEqualTo(expectedViewVersion); }
@Override public long computeLocalQuota(long confUsage, long myUsage, long[] allUsages) throws PulsarAdminException { // ToDo: work out the initial conditions: we may allow a small number of "first few iterations" to go // unchecked as we get some history of usage, or follow some other "TBD" method. if (confUsage < 0) { // This can happen if the RG is not configured with this particular limit (message or byte count) yet. val retVal = -1; if (log.isDebugEnabled()) { log.debug("Configured usage ({}) is not set; returning a special value ({}) for calculated quota", confUsage, retVal); } return retVal; } long totalUsage = 0; for (long usage : allUsages) { totalUsage += usage; } if (myUsage < 0 || totalUsage < 0) { String errMesg = String.format("Local usage (%d) or total usage (%d) is negative", myUsage, totalUsage); log.error(errMesg); throw new PulsarAdminException(errMesg); } // If the total usage is zero (which may happen during initial transients), just return the configured value. // The caller is expected to check the value returned, or not call here with a zero global usage. // [This avoids a division by zero when calculating the local share.] if (totalUsage == 0) { if (log.isDebugEnabled()) { log.debug("computeLocalQuota: totalUsage is zero; " + "returning the configured usage ({}) as new local quota", confUsage); } return confUsage; } if (myUsage > totalUsage) { String errMesg = String.format("Local usage (%d) is greater than total usage (%d)", myUsage, totalUsage); // Log as a warning [in case this can happen transiently (?)]. log.warn(errMesg); } // How much unused capacity is left over? float residual = confUsage - totalUsage; // New quota is the old usage incremented by any residual as a ratio of the local usage to the total usage. // This should result in the calculatedQuota increasing proportionately if total usage is less than the // configured usage, and reducing proportionately if the total usage is greater than the configured usage. // Capped to 1, to prevent negative or zero setting of quota. // the rate limiter code assumes that rate value of 0 or less to mean that no rate limit should be applied float myUsageFraction = (float) myUsage / totalUsage; float calculatedQuota = max(myUsage + residual * myUsageFraction, 1); val longCalculatedQuota = (long) calculatedQuota; if (log.isDebugEnabled()) { log.debug("computeLocalQuota: myUsage={}, totalUsage={}, myFraction={}; newQuota returned={} [long: {}]", myUsage, totalUsage, myUsageFraction, calculatedQuota, longCalculatedQuota); } return longCalculatedQuota; }
@Test public void testRQCalcNegativeAllUsageTest() { final long[] allUsage = { -1 }; Assert.assertThrows(PulsarAdminException.class, () -> this.rqCalc.computeLocalQuota(0, 0, allUsage)); }
@Override public boolean contains(CharSequence name) { return get(name) != null; }
@Test public void testContainsName() { Http2Headers headers = newClientHeaders(); assertTrue(headers.contains("Name1")); assertTrue(headers.contains(Http2Headers.PseudoHeaderName.PATH.value())); assertFalse(headers.contains(Http2Headers.PseudoHeaderName.STATUS.value())); assertFalse(headers.contains("a missing header")); }
@Override public int getRowCount() { return _resultsArray.size(); }
@Test public void testGetRowCount() { // Run the test final int result = _selectionResultSetUnderTest.getRowCount(); // Verify the results assertEquals(1, result); }
@Override public void setConf(Configuration conf) { if (conf != null) { conf = addSecurityConfiguration(conf); } super.setConf(conf); }
@Test public void testFailoverWithAutoHa() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); // Turn on auto-HA in the config HdfsConfiguration conf = getHAConf(); conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand()); tool.setConf(conf); assertEquals(0, runTool("-failover", "nn1", "nn2")); Mockito.verify(mockZkfcProtocol).gracefulFailover(); }
@PutMapping("/modify-password/{id}") public ShenyuAdminResult modifyPassword(@PathVariable("id") @Existed(provider = DashboardUserMapper.class, message = "user is not found") final String id, @Valid @RequestBody final DashboardUserModifyPasswordDTO dashboardUserModifyPasswordDTO) { UserInfo userInfo = (UserInfo) SecurityUtils.getSubject().getPrincipal(); if (Objects.isNull(userInfo)) { return ShenyuAdminResult.error(ShenyuResultMessage.DASHBOARD_USER_LOGIN_ERROR); } dashboardUserModifyPasswordDTO.setId(id); if (!userInfo.getUserId().equals(id) && !userInfo.getUserName().equals(dashboardUserModifyPasswordDTO.getUserName())) { return ShenyuAdminResult.error(ShenyuResultMessage.DASHBOARD_MODIFY_PASSWORD_ERROR); } dashboardUserModifyPasswordDTO.setPassword(DigestUtils.sha512Hex(dashboardUserModifyPasswordDTO.getPassword())); dashboardUserModifyPasswordDTO.setOldPassword(DigestUtils.sha512Hex(dashboardUserModifyPasswordDTO.getOldPassword())); return ShenyuAdminResult.success(ShenyuResultMessage.UPDATE_SUCCESS, dashboardUserService.modifyPassword(dashboardUserModifyPasswordDTO)); }
@Test public void modifyPassword() throws Exception { final String url = "/dashboardUser/modify-password/2"; UserInfo userInfo = UserInfo.builder().userId("2").userName("admin").build(); SimplePrincipalMap principalMap = new SimplePrincipalMap(); principalMap.put("real", userInfo); ThreadContext.bind(new DefaultSecurityManager()); ThreadContext.bind(new Subject.Builder().principals(principalMap).buildSubject()); given(dashboardUserService.modifyPassword(any())).willReturn(1); mockMvc.perform(put(url, modifyPasswordDTO) .content(GsonUtils.getInstance().toJson(modifyPasswordDTO)) .contentType(MediaType.APPLICATION_JSON)) .andExpect(status().isOk()) .andDo(print()) .andExpect(jsonPath("$.message", is(ShenyuResultMessage.UPDATE_SUCCESS))) .andExpect(jsonPath("$.data", is(1))); ThreadContext.bind(new Subject.Builder().principals(new SimplePrincipalMap()).buildSubject()); mockMvc.perform(put(url, modifyPasswordDTO) .content(GsonUtils.getInstance().toJson(modifyPasswordDTO)) .contentType(MediaType.APPLICATION_JSON)) .andExpect(status().isOk()) .andDo(print()) .andExpect(jsonPath("$.message", is(ShenyuResultMessage.DASHBOARD_USER_LOGIN_ERROR))); }
@Override public void onStateElection(Job job, JobState newState) { if (isNotFailed(newState) || isJobNotFoundException(newState) || isProblematicExceptionAndMustNotRetry(newState) || maxAmountOfRetriesReached(job)) return; job.scheduleAt(now().plusSeconds(getSecondsToAdd(job)), String.format("Retry %d of %d", getFailureCount(job), getMaxNumberOfRetries(job))); }
@Test void retryFilterDoesNotScheduleJobAgainIfMaxNumberOfRetriesIsReached() { final Job job = aJob() .<TestService>withJobDetails(ts -> ts.doWorkThatFails()) .withState(new FailedState("a message", new RuntimeException("boom"))) .withState(new FailedState("firstRetry", new RuntimeException("boom"))) .build(); applyDefaultJobFilter(job); int beforeVersion = job.getJobStates().size(); retryFilter.onStateElection(job, job.getJobState()); int afterVersion = job.getJobStates().size(); assertThat(afterVersion).isEqualTo(beforeVersion); assertThat(job.getState()).isEqualTo(FAILED); }
@Override public Set<DisjointPath> getDisjointPaths(ElementId src, ElementId dst, LinkWeigher weigher) { checkNotNull(src, ELEMENT_ID_NULL); checkNotNull(dst, ELEMENT_ID_NULL); LinkWeigher internalWeigher = weigher != null ? weigher : DEFAULT_WEIGHER; // Get the source and destination edge locations EdgeLink srcEdge = getEdgeLink(src, true); EdgeLink dstEdge = getEdgeLink(dst, false); // If either edge is null, bail with no paths. if (srcEdge == null || dstEdge == null) { return ImmutableSet.of(); } DeviceId srcDevice = srcEdge != NOT_HOST ? srcEdge.dst().deviceId() : (DeviceId) src; DeviceId dstDevice = dstEdge != NOT_HOST ? dstEdge.src().deviceId() : (DeviceId) dst; // If the source and destination are on the same edge device, there // is just one path, so build it and return it. if (srcDevice.equals(dstDevice)) { return edgeToEdgePathsDisjoint(srcEdge, dstEdge, internalWeigher); } // Otherwise get all paths between the source and destination edge // devices. Topology topology = topologyService.currentTopology(); Set<DisjointPath> paths = topologyService.getDisjointPaths(topology, srcDevice, dstDevice, internalWeigher); return edgeToEdgePathsDisjoint(srcEdge, dstEdge, paths, internalWeigher); }
@Test public void testDisjointPathsWithRiskProfile() { topoMgr.definePaths(ImmutableSet.of(path1)); Map<Link, Object> riskProfile = ImmutableMap.of(); Set<DisjointPath> paths = service.getDisjointPaths(did("A"), did("C"), new TestWeigher(), riskProfile); checkDisjointPaths(paths); }
@Override public List<SourceRecord> poll() { if (!consumerAccess.tryAcquire()) { return null; } if (stopping) { return null; } try { ConsumerRecords<byte[], byte[]> records = consumer.poll(pollTimeout); List<SourceRecord> sourceRecords = new ArrayList<>(records.count()); for (ConsumerRecord<byte[], byte[]> record : records) { SourceRecord converted = convertRecord(record); sourceRecords.add(converted); TopicPartition topicPartition = new TopicPartition(converted.topic(), converted.kafkaPartition()); metrics.recordAge(topicPartition, System.currentTimeMillis() - record.timestamp()); metrics.recordBytes(topicPartition, byteSize(record.value())); } if (sourceRecords.isEmpty()) { // WorkerSourceTasks expects non-zero batch size return null; } else { log.trace("Polled {} records from {}.", sourceRecords.size(), records.partitions()); return sourceRecords; } } catch (WakeupException e) { return null; } catch (KafkaException e) { log.warn("Failure during poll.", e); return null; } catch (Throwable e) { log.error("Failure during poll.", e); // allow Connect to deal with the exception throw e; } finally { consumerAccess.release(); } }
@Test public void testPoll() { // Create a consumer mock byte[] key1 = "abc".getBytes(); byte[] value1 = "fgh".getBytes(); byte[] key2 = "123".getBytes(); byte[] value2 = "456".getBytes(); List<ConsumerRecord<byte[], byte[]>> consumerRecordsList = new ArrayList<>(); String topicName = "test"; String headerKey = "key"; RecordHeaders headers = new RecordHeaders(new Header[] { new RecordHeader(headerKey, "value".getBytes()), }); consumerRecordsList.add(new ConsumerRecord<>(topicName, 0, 0, System.currentTimeMillis(), TimestampType.CREATE_TIME, key1.length, value1.length, key1, value1, headers, Optional.empty())); consumerRecordsList.add(new ConsumerRecord<>(topicName, 1, 1, System.currentTimeMillis(), TimestampType.CREATE_TIME, key2.length, value2.length, key2, value2, headers, Optional.empty())); ConsumerRecords<byte[], byte[]> consumerRecords = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(topicName, 0), consumerRecordsList)); @SuppressWarnings("unchecked") KafkaConsumer<byte[], byte[]> consumer = mock(KafkaConsumer.class); when(consumer.poll(any())).thenReturn(consumerRecords); MirrorSourceMetrics metrics = mock(MirrorSourceMetrics.class); String sourceClusterName = "cluster1"; ReplicationPolicy replicationPolicy = new DefaultReplicationPolicy(); MirrorSourceTask mirrorSourceTask = new MirrorSourceTask(consumer, metrics, sourceClusterName, replicationPolicy, null); List<SourceRecord> sourceRecords = mirrorSourceTask.poll(); assertEquals(2, sourceRecords.size()); for (int i = 0; i < sourceRecords.size(); i++) { SourceRecord sourceRecord = sourceRecords.get(i); ConsumerRecord<byte[], byte[]> consumerRecord = consumerRecordsList.get(i); assertEquals(consumerRecord.key(), sourceRecord.key(), "consumerRecord key does not equal sourceRecord key"); assertEquals(consumerRecord.value(), sourceRecord.value(), "consumerRecord value does not equal sourceRecord value"); // We expect that the topicname will be based on the replication policy currently used assertEquals(replicationPolicy.formatRemoteTopic(sourceClusterName, topicName), sourceRecord.topic(), "topicName not the same as the current replicationPolicy"); // We expect that MirrorMaker will keep the same partition assignment assertEquals(consumerRecord.partition(), sourceRecord.kafkaPartition().intValue(), "partition assignment not the same as the current replicationPolicy"); // Check header values List<Header> expectedHeaders = new ArrayList<>(); consumerRecord.headers().forEach(expectedHeaders::add); List<org.apache.kafka.connect.header.Header> taskHeaders = new ArrayList<>(); sourceRecord.headers().forEach(taskHeaders::add); compareHeaders(expectedHeaders, taskHeaders); } }
@Override public void remove(NamedNode master) { connection.sync(RedisCommands.SENTINEL_REMOVE, master.getName()); }
@Test public void testRemove() { Collection<RedisServer> masters = connection.masters(); connection.remove(masters.iterator().next()); }
public void validate(String clientId, String clientSecret, String workspace) { Token token = validateAccessToken(clientId, clientSecret); if (token.getScopes() == null || !token.getScopes().contains("pullrequest")) { LOG.info(MISSING_PULL_REQUEST_READ_PERMISSION + String.format(SCOPE, token.getScopes())); throw new IllegalArgumentException(ERROR_BBC_SERVERS + ": " + MISSING_PULL_REQUEST_READ_PERMISSION); } try { doGet(token.getAccessToken(), buildUrl("/repositories/" + workspace), r -> null); } catch (NotFoundException | IllegalStateException e) { throw new IllegalArgumentException(e.getMessage()); } }
@Test public void validate_fails_when_ssl_verification_failed() throws IOException { //GIVEN OkHttpClient okHttpClient = mock(OkHttpClient.class); Call call = mock(Call.class); underTest = new BitbucketCloudRestClient(okHttpClient, serverURL, serverURL); when(okHttpClient.newCall(any())).thenReturn(call); when(call.execute()).thenThrow(new SSLHandshakeException("SSL verification failed")); //WHEN //THEN assertThatIllegalArgumentException() .isThrownBy(() -> underTest.validate("clientId", "clientSecret", "workspace")) .withMessage(UNABLE_TO_CONTACT_BBC_SERVERS); assertThat(logTester.logs(Level.INFO)).containsExactly(String.format(BBC_FAIL_WITH_ERROR, serverURL, "SSL verification failed")); }
public static String buildUrl(boolean isHttps, String serverAddr, String... subPaths) { StringBuilder sb = new StringBuilder(); if (isHttps) { sb.append(HTTPS_PREFIX); } else { sb.append(HTTP_PREFIX); } sb.append(serverAddr); String pre = null; for (String subPath : subPaths) { if (StringUtils.isBlank(subPath)) { continue; } Matcher matcher = CONTEXT_PATH_MATCH.matcher(subPath); if (matcher.find()) { throw new IllegalArgumentException("Illegal url path expression : " + subPath); } if (pre == null || !pre.endsWith("/")) { if (subPath.startsWith("/")) { sb.append(subPath); } else { sb.append('/').append(subPath); } } else { if (subPath.startsWith("/")) { sb.append(subPath.replaceFirst("\\/", "")); } else { sb.append(subPath); } } pre = subPath; } return sb.toString(); }
@Test void testBuildHttpUrl3() { assertThrows(IllegalArgumentException.class, () -> { String targetUrl = HttpUtils.buildUrl(false, "127.0.0.1:8080", "/v1", "/api//", "test"); assertNotEquals(exceptUrl, targetUrl); }); }
@Override public void finishSuccess() { for (SQLExecutionHook each : sqlExecutionHooks) { each.finishSuccess(); } }
@Test void assertFinishSuccess() { spiSQLExecutionHook.finishSuccess(); assertTrue(SQLExecutionHookFixture.containsAction("finishSuccess")); }
public static String toString(long unixTime, String pattern) { return Instant.ofEpochSecond(unixTime).atZone(ZoneId.systemDefault()).format(DateTimeFormatter.ofPattern(pattern)); }
@Test public void testToString2() { String dateStr = DateKit.toString(time, "yyyy-MM-dd"); Assert.assertEquals("2017-09-20", dateStr); }
public TimeRange parseTimeRange(final String timerangeKeyword) { try { if (StringUtils.isBlank(timerangeKeyword)) { return null; } final Optional<TimeRange> shortTimeRange = shortTimerangeFormatParser.parse(timerangeKeyword); return shortTimeRange.orElseGet(() -> KeywordRange.create(timerangeKeyword, "UTC")); } catch (Exception e) { throw new IllegalArgumentException("Could not parse timerange " + timerangeKeyword + ". It should have a short format (i.e. '2h') or natural date format (i.e. 'last 2 hours')"); } }
@Test void returnsShortTimerangeParserResponseIfItIsNotEmpty() { doReturn(Optional.of(KeywordRange.create("last 1 year", "UTC"))).when(shortTimerangeFormatParser).parse("1y"); final TimeRange result = toTest.parseTimeRange("1y"); assertEquals(KeywordRange.create("last 1 year", "UTC"), result); }
@Override public <T extends State> T state(StateNamespace namespace, StateTag<T> address) { return workItemState.get(namespace, address, StateContexts.nullContext()); }
@Test public void testNewOrderedListNoFetch() throws Exception { StateTag<OrderedListState<String>> addr = StateTags.orderedList("orderedList", StringUtf8Coder.of()); OrderedListState<String> orderedList = underTestNewKey.state(NAMESPACE, addr); assertThat(orderedList.read(), Matchers.emptyIterable()); // Shouldn't need to read from windmill for this. Mockito.verifyZeroInteractions(mockReader); }
@Override public String getCreateTableSql(Table table) { String tableName = table.getName(); String schema = table.getSchema(); List<Column> columns = table.getColumns(); String columnDefinitions = columns.stream().map(this::getColumnDefinition).collect(Collectors.joining(",\n")); // comment table:COMMENT ON TABLE "schemaName"."tableName" IS 'comment'; String comment = String.format("COMMENT ON TABLE \"%s\".\"%s\" IS '%s';\n", schema, tableName, table.getComment()); // get primaryKeys List<String> columnKeys = table.getColumns().stream() .filter(Column::isKeyFlag) .map(Column::getName) .map(t -> String.format("\"%s\"", t)) .collect(Collectors.toList()); // add primaryKey String primaryKeyStr = columnKeys.isEmpty() ? "" : columnKeys.stream().collect(Collectors.joining(",", ", \n\tPRIMARY KEY (", ")\n")); // CREATE TABLE "schemaName"."tableName" ( columnDefinitions ); comment String ddl = String.format( "CREATE TABLE \"%s\".\"%s\" (\n%s%s);\n%s", schema, tableName, columnDefinitions, primaryKeyStr, comment); ddl += columns.stream() // COMMENT ON COLUMN "schemaName"."tableName"."columnName" IS 'comment' .map(c -> String.format( "COMMENT ON COLUMN \"%s\".\"%s\".\"%s\" IS '%s';\n", schema, tableName, c.getName(), c.getComment())) .collect(Collectors.joining()); return ddl; }
@Test void getCreateTableSql() { PostgreSqlDriver postgreSqlDriver = new PostgreSqlDriver(); String tableDDL = postgreSqlDriver.getCreateTableSql(table); String expect = "CREATE TABLE \"public\".\"user\" (\n" + "\t\"id\" BIGINT NOT NULL,\n" + "\t\"username\" VARCHAR(255) NOT NULL,\n" + "\t\"password\" VARCHAR(255) NOT NULL,\n" + "\t\"email\" VARCHAR(255) NOT NULL,\n" + "\t\"phone\" VARCHAR(20) NOT NULL,\n" + "\t\"age\" INT,\n" + "\t\"gender\" BOOLEAN,\n" + "\t\"height\" DECIMAL(5,2),\n" + "\t\"birthday\" DATE,\n" + "\t\"register_time\" TIMESTAMP NOT NULL, \n" + "\tPRIMARY KEY (\"id\")\n" + ");\n" + "COMMENT ON TABLE \"public\".\"user\" IS '用户表';\n" + "COMMENT ON COLUMN \"public\".\"user\".\"id\" IS '用户id';\n" + "COMMENT ON COLUMN \"public\".\"user\".\"username\" IS '用户名';\n" + "COMMENT ON COLUMN \"public\".\"user\".\"password\" IS '密码';\n" + "COMMENT ON COLUMN \"public\".\"user\".\"email\" IS '邮箱';\n" + "COMMENT ON COLUMN \"public\".\"user\".\"phone\" IS '电话号码';\n" + "COMMENT ON COLUMN \"public\".\"user\".\"age\" IS '年龄';\n" + "COMMENT ON COLUMN \"public\".\"user\".\"gender\" IS '性别';\n" + "COMMENT ON COLUMN \"public\".\"user\".\"height\" IS '身高';\n" + "COMMENT ON COLUMN \"public\".\"user\".\"birthday\" IS '生日';\n" + "COMMENT ON COLUMN \"public\".\"user\".\"register_time\" IS '注册时间';\n"; assertEquals(expect, tableDDL); }
public static List<SQLRecognizer> get(String sql, String dbType) { return SQL_RECOGNIZER_FACTORY.create(sql, dbType); }
@Test public void testSqlRecognizing() { //test for ast was null Assertions.assertThrows(UnsupportedOperationException.class, () -> SQLVisitorFactory.get("", JdbcConstants.MYSQL)); //test for mysql/mariadb/polardb-x insert String sql = "insert into t(id) values (1)"; List<SQLRecognizer> recognizer = SQLVisitorFactory.get(sql, JdbcConstants.MYSQL); Assertions.assertEquals(recognizer.get(0).getClass().getName(), MySQLInsertRecognizer.class.getName()); recognizer = SQLVisitorFactory.get(sql, JdbcConstants.MARIADB); Assertions.assertEquals(recognizer.get(0).getClass().getName(), MariadbInsertRecognizer.class.getName()); recognizer = SQLVisitorFactory.get(sql, JdbcConstants.POLARDBX); Assertions.assertEquals(recognizer.get(0).getClass().getName(), PolarDBXInsertRecognizer.class.getName()); //test for oracle insert sql = "insert into t(id) values (1)"; recognizer = SQLVisitorFactory.get(sql, JdbcConstants.ORACLE); Assertions.assertEquals(recognizer.get(0).getClass().getName(), OracleInsertRecognizer.class.getName()); //test for mysql/mariadb/polardb-x delete sql = "delete from t"; recognizer = SQLVisitorFactory.get(sql, JdbcConstants.MYSQL); Assertions.assertEquals(recognizer.get(0).getClass().getName(), MySQLDeleteRecognizer.class.getName()); recognizer = SQLVisitorFactory.get(sql, JdbcConstants.MARIADB); Assertions.assertEquals(recognizer.get(0).getClass().getName(), MariadbDeleteRecognizer.class.getName()); recognizer = SQLVisitorFactory.get(sql, JdbcConstants.POLARDBX); Assertions.assertEquals(recognizer.get(0).getClass().getName(), PolarDBXDeleteRecognizer.class.getName()); //test for mysql/mariadb/polardb-x update sql = "update t set a = a"; recognizer = SQLVisitorFactory.get(sql, JdbcConstants.MYSQL); Assertions.assertEquals(recognizer.get(0).getClass().getName(), MySQLUpdateRecognizer.class.getName()); recognizer = SQLVisitorFactory.get(sql, JdbcConstants.MARIADB); Assertions.assertEquals(recognizer.get(0).getClass().getName(), MariadbUpdateRecognizer.class.getName()); recognizer = SQLVisitorFactory.get(sql, JdbcConstants.POLARDBX); Assertions.assertEquals(recognizer.get(0).getClass().getName(), PolarDBXUpdateRecognizer.class.getName()); //test for mysql/mariadb/polardb-x select sql = "select * from t"; recognizer = SQLVisitorFactory.get(sql, JdbcConstants.MYSQL); Assertions.assertNull(recognizer); recognizer = SQLVisitorFactory.get(sql, JdbcConstants.MARIADB); Assertions.assertNull(recognizer); recognizer = SQLVisitorFactory.get(sql, JdbcConstants.POLARDBX); Assertions.assertNull(recognizer); //test for mysql/mariadb/polardb-x select for update sql = "select * from t for update"; recognizer = SQLVisitorFactory.get(sql, JdbcConstants.MYSQL); Assertions.assertEquals(recognizer.get(0).getClass().getName(), MySQLSelectForUpdateRecognizer.class.getName()); recognizer = SQLVisitorFactory.get(sql, JdbcConstants.MARIADB); Assertions.assertEquals(recognizer.get(0).getClass().getName(), MariadbSelectForUpdateRecognizer.class.getName()); recognizer = SQLVisitorFactory.get(sql, JdbcConstants.POLARDBX); Assertions.assertEquals(recognizer.get(0).getClass().getName(), PolarDBXSelectForUpdateRecognizer.class.getName()); //test for sqlserver insert sql = "insert into t(id) values (1)"; recognizer = SQLVisitorFactory.get(sql, JdbcConstants.SQLSERVER); Assertions.assertEquals(recognizer.get(0).getClass().getName(), SqlServerInsertRecognizer.class.getName()); //test for sqlserver delete sql = "delete from t"; recognizer = SQLVisitorFactory.get(sql, JdbcConstants.SQLSERVER); Assertions.assertEquals(recognizer.get(0).getClass().getName(), SqlServerDeleteRecognizer.class.getName()); //test for sqlserver update sql = "update t set a = a"; recognizer = SQLVisitorFactory.get(sql, JdbcConstants.SQLSERVER); Assertions.assertEquals(recognizer.get(0).getClass().getName(), SqlServerUpdateRecognizer.class.getName()); //test for sqlserver select sql = "select * from t"; recognizer = SQLVisitorFactory.get(sql, JdbcConstants.SQLSERVER); Assertions.assertNull(recognizer); //test for oracle delete sql = "delete from t"; recognizer = SQLVisitorFactory.get(sql, JdbcConstants.ORACLE); Assertions.assertEquals(recognizer.get(0).getClass().getName(), OracleDeleteRecognizer.class.getName()); //test for oracle update sql = "update t set a = a"; recognizer = SQLVisitorFactory.get(sql, JdbcConstants.ORACLE); Assertions.assertEquals(recognizer.get(0).getClass().getName(), OracleUpdateRecognizer.class.getName()); //test for oracle select sql = "select * from t"; recognizer = SQLVisitorFactory.get(sql, JdbcConstants.ORACLE); Assertions.assertNull(recognizer); //test for oracle select for update sql = "select * from t for update"; recognizer = SQLVisitorFactory.get(sql, JdbcConstants.ORACLE); Assertions.assertEquals(recognizer.get(0).getClass().getName(), OracleSelectForUpdateRecognizer.class.getName()); //test for do not support db Assertions.assertThrows(EnhancedServiceNotFoundException.class, () -> { SQLVisitorFactory.get("select * from t", JdbcConstants.DB2); }); //TEST FOR Multi-SQL List<SQLRecognizer> sqlRecognizers; //test for mysql/mariadb/polardb-x insert Assertions.assertThrows(UnsupportedOperationException.class, () -> { SQLVisitorFactory.get("insert into t(id) values (1);insert into t(id) values (2)", JdbcConstants.MYSQL); }); Assertions.assertThrows(UnsupportedOperationException.class, () -> { SQLVisitorFactory.get("insert into t(id) values (1);insert into t(id) values (2)", JdbcConstants.MARIADB); }); Assertions.assertThrows(UnsupportedOperationException.class, () -> { SQLVisitorFactory.get("insert into t(id) values (1);insert into t(id) values (2)", JdbcConstants.POLARDBX); }); //test for mysql/mariadb/polardb-x insert and update Assertions.assertThrows(UnsupportedOperationException.class, () -> { SQLVisitorFactory.get("insert into t(id) values (1);update t set a = t;", JdbcConstants.MYSQL); }); Assertions.assertThrows(UnsupportedOperationException.class, () -> { SQLVisitorFactory.get("insert into t(id) values (1);update t set a = t;", JdbcConstants.MARIADB); }); Assertions.assertThrows(UnsupportedOperationException.class, () -> { SQLVisitorFactory.get("insert into t(id) values (1);update t set a = t;", JdbcConstants.POLARDBX); }); //test for mysql insert and deleted //test for mysql/mariadb/polardb-x insert and deleted Assertions.assertThrows(UnsupportedOperationException.class, () -> { SQLVisitorFactory.get("insert into t(id) values (1);delete from t where id = 1", JdbcConstants.MYSQL); }); Assertions.assertThrows(UnsupportedOperationException.class, () -> { SQLVisitorFactory.get("insert into t(id) values (1);delete from t where id = 1", JdbcConstants.MARIADB); }); Assertions.assertThrows(UnsupportedOperationException.class, () -> { SQLVisitorFactory.get("insert into t(id) values (1);delete from t where id = 1", JdbcConstants.POLARDBX); }); //test for mysql/mariadb/polardb-x delete sql = "delete from t where id =1 ; delete from t where id = 2"; sqlRecognizers = SQLVisitorFactory.get(sql, JdbcConstants.MYSQL); for (SQLRecognizer sqlRecognizer : sqlRecognizers) { Assertions.assertEquals(sqlRecognizer.getClass().getName(), MySQLDeleteRecognizer.class.getName()); } sqlRecognizers = SQLVisitorFactory.get(sql, JdbcConstants.MARIADB); for (SQLRecognizer sqlRecognizer : sqlRecognizers) { Assertions.assertEquals(sqlRecognizer.getClass().getName(), MariadbDeleteRecognizer.class.getName()); } sqlRecognizers = SQLVisitorFactory.get(sql, JdbcConstants.POLARDBX); for (SQLRecognizer sqlRecognizer : sqlRecognizers) { Assertions.assertEquals(sqlRecognizer.getClass().getName(), PolarDBXDeleteRecognizer.class.getName()); } //test for mysql/mariadb/polardb-x update sql = "update t set a = a;update t set a = c;"; sqlRecognizers = SQLVisitorFactory.get(sql, JdbcConstants.MYSQL); for (SQLRecognizer sqlRecognizer : sqlRecognizers) { Assertions.assertEquals(sqlRecognizer.getClass().getName(), MySQLUpdateRecognizer.class.getName()); } sqlRecognizers = SQLVisitorFactory.get(sql, JdbcConstants.MARIADB); for (SQLRecognizer sqlRecognizer : sqlRecognizers) { Assertions.assertEquals(sqlRecognizer.getClass().getName(), MariadbUpdateRecognizer.class.getName()); } sqlRecognizers = SQLVisitorFactory.get(sql, JdbcConstants.POLARDBX); for (SQLRecognizer sqlRecognizer : sqlRecognizers) { Assertions.assertEquals(sqlRecognizer.getClass().getName(), PolarDBXUpdateRecognizer.class.getName()); } //test for mysql/mariadb/polardb-x update and deleted Assertions.assertThrows(UnsupportedOperationException.class, () -> { SQLVisitorFactory.get("update t set a = a where id =1;update t set a = c where id = 1;delete from t where id =1", JdbcConstants.MYSQL); }); Assertions.assertThrows(UnsupportedOperationException.class, () -> { SQLVisitorFactory.get("update t set a = a where id =1;update t set a = c where id = 1;delete from t where id =1", JdbcConstants.MARIADB); }); Assertions.assertThrows(UnsupportedOperationException.class, () -> { SQLVisitorFactory.get("update t set a = a where id =1;update t set a = c where id = 1;delete from t where id =1", JdbcConstants.POLARDBX); }); //test for mysql/mariadb/polardb-x select Assertions.assertThrows(UnsupportedOperationException.class, () -> { SQLVisitorFactory.get("select * from d where id = 1; select * from t where id = 2", JdbcConstants.MYSQL); }); Assertions.assertThrows(UnsupportedOperationException.class, () -> { SQLVisitorFactory.get("select * from d where id = 1; select * from t where id = 2", JdbcConstants.MARIADB); }); Assertions.assertThrows(UnsupportedOperationException.class, () -> { SQLVisitorFactory.get("select * from d where id = 1; select * from t where id = 2", JdbcConstants.POLARDBX); }); //test for mysql/mariadb/polardb-x select for update Assertions.assertThrows(UnsupportedOperationException.class, () -> { SQLVisitorFactory.get("select * from t for update; select * from t where id = 2", JdbcConstants.MYSQL); }); Assertions.assertThrows(UnsupportedOperationException.class, () -> { SQLVisitorFactory.get("select * from t for update; select * from t where id = 2", JdbcConstants.MARIADB); }); Assertions.assertThrows(UnsupportedOperationException.class, () -> { SQLVisitorFactory.get("select * from t for update; select * from t where id = 2", JdbcConstants.POLARDBX); }); //test for oracle insert Assertions.assertThrows(UnsupportedOperationException.class, () -> { SQLVisitorFactory.get("insert into t(id) values (1);insert into t(id) values (2)", JdbcConstants.ORACLE); }); //test for oracle delete and deleted sql = "delete from t where id =1 ; delete from t where id = 2"; sqlRecognizers = SQLVisitorFactory.get(sql, JdbcConstants.ORACLE); for (SQLRecognizer sqlRecognizer : sqlRecognizers) { Assertions.assertEquals(sqlRecognizer.getClass().getName(), OracleDeleteRecognizer.class.getName()); } //test for oracle update sql = "update t set a = b where id =1 ;update t set a = c where id = 1;"; sqlRecognizers = SQLVisitorFactory.get(sql, JdbcConstants.ORACLE); for (SQLRecognizer sqlRecognizer : sqlRecognizers) { Assertions.assertEquals(sqlRecognizer.getClass().getName(), OracleUpdateRecognizer.class.getName()); } //test for oracle select Assertions.assertThrows(UnsupportedOperationException.class, () -> { SQLVisitorFactory.get("select * from b ; select * from t where id = 2", JdbcConstants.ORACLE); }); //test for oracle select for update Assertions.assertThrows(UnsupportedOperationException.class, () -> { SQLVisitorFactory.get("select * from t for update; select * from t where id = 2", JdbcConstants.ORACLE); }); //test for oracle insert and update Assertions.assertThrows(UnsupportedOperationException.class, () -> { SQLVisitorFactory.get("insert into t(id) values (1);update t set a = t;", JdbcConstants.ORACLE); }); //test for oracle insert and deleted Assertions.assertThrows(UnsupportedOperationException.class, () -> { SQLVisitorFactory.get("insert into t(id) values (1);delete from t where id = 1", JdbcConstants.ORACLE); }); //test for sqlserver select Assertions.assertThrows(UnsupportedOperationException.class, () -> { SQLVisitorFactory.get("select * from d where id = 1; select * from t where id = 2", JdbcConstants.SQLSERVER); }); //test for sqlserver select for update Assertions.assertThrows(UnsupportedOperationException.class, () -> { SQLVisitorFactory.get("select * from t WITH(UPDLOCK); select * from t where id = 2", JdbcConstants.SQLSERVER); }); }
public final void isFinite() { if (actual == null || actual.isNaN() || actual.isInfinite()) { failWithActual(simpleFact("expected to be finite")); } }
@Test public void isFinite() { assertThat(1.23).isFinite(); assertThat(Double.MAX_VALUE).isFinite(); assertThat(-1.0 * Double.MIN_VALUE).isFinite(); assertThatIsFiniteFails(Double.POSITIVE_INFINITY); assertThatIsFiniteFails(Double.NEGATIVE_INFINITY); assertThatIsFiniteFails(Double.NaN); assertThatIsFiniteFails(null); }
@Override protected void doProcess(Exchange exchange, MetricsEndpoint endpoint, MetricRegistry registry, String metricsName) throws Exception { Message in = exchange.getIn(); Counter counter = registry.counter(metricsName); Long increment = endpoint.getIncrement(); Long decrement = endpoint.getDecrement(); Long finalIncrement = getLongHeader(in, HEADER_COUNTER_INCREMENT, increment); Long finalDecrement = getLongHeader(in, HEADER_COUNTER_DECREMENT, decrement); if (finalIncrement != null) { counter.inc(finalIncrement); } else if (finalDecrement != null) { counter.dec(finalDecrement); } else { counter.inc(); } }
@Test public void testProcessOverridingDecrement() throws Exception { Object action = null; when(endpoint.getIncrement()).thenReturn(null); when(endpoint.getDecrement()).thenReturn(DECREMENT); when(in.getHeader(HEADER_COUNTER_DECREMENT, DECREMENT, Long.class)).thenReturn(DECREMENT - 1); producer.doProcess(exchange, endpoint, registry, METRICS_NAME); inOrder.verify(exchange, times(1)).getIn(); inOrder.verify(registry, times(1)).counter(METRICS_NAME); inOrder.verify(endpoint, times(1)).getIncrement(); inOrder.verify(endpoint, times(1)).getDecrement(); inOrder.verify(in, times(1)).getHeader(HEADER_COUNTER_INCREMENT, action, Long.class); inOrder.verify(in, times(1)).getHeader(HEADER_COUNTER_DECREMENT, DECREMENT, Long.class); inOrder.verify(counter, times(1)).dec(DECREMENT - 1); inOrder.verifyNoMoreInteractions(); }
public static String getTemporaryObjectNameWithEntropy( GSBlobIdentifier finalBlobIdentifier, UUID temporaryObjectId) { return temporaryObjectId.toString() + getTemporaryObjectPartialName(finalBlobIdentifier) + temporaryObjectId.toString(); }
@Test public void shouldProperlyConstructTemporaryObjectNameWithEntropy() { Configuration flinkConfig = new Configuration(); flinkConfig.set(GSFileSystemOptions.ENABLE_FILESINK_ENTROPY, Boolean.TRUE); GSBlobIdentifier identifier = new GSBlobIdentifier("foo", "bar"); UUID temporaryObjectId = UUID.fromString("f09c43e5-ea49-4537-a406-0586f8f09d47"); String partialName = BlobUtils.getTemporaryObjectNameWithEntropy(identifier, temporaryObjectId); assertEquals( "f09c43e5-ea49-4537-a406-0586f8f09d47.inprogress/foo/bar/f09c43e5-ea49-4537-a406-0586f8f09d47", partialName); }
@Override public String named() { return PluginEnum.SENTINEL.getName(); }
@Test public void testNamed() { final String result = sentinelPlugin.named(); assertEquals(PluginEnum.SENTINEL.getName(), result); }
public static ObjectInputDecoder createDecoder(Type type, TypeManager typeManager) { String base = type.getTypeSignature().getBase(); switch (base) { case UnknownType.NAME: return o -> o; case BIGINT: return o -> (Long) o; case INTEGER: return o -> ((Long) o).intValue(); case SMALLINT: return o -> ((Long) o).shortValue(); case TINYINT: return o -> ((Long) o).byteValue(); case BOOLEAN: return o -> (Boolean) o; case DATE: return DateTimeUtils::createDate; case DECIMAL: if (Decimals.isShortDecimal(type)) { final int scale = ((DecimalType) type).getScale(); return o -> HiveDecimal.create(BigInteger.valueOf((long) o), scale); } else if (Decimals.isLongDecimal(type)) { final int scale = ((DecimalType) type).getScale(); return o -> HiveDecimal.create(Decimals.decodeUnscaledValue((Slice) o), scale); } break; case REAL: return o -> intBitsToFloat(((Number) o).intValue()); case DOUBLE: return o -> ((Double) o); case TIMESTAMP: return o -> new Timestamp(((long) o)); case VARBINARY: return o -> ((Slice) o).getBytes(); case VARCHAR: return o -> ((Slice) o).toStringUtf8(); case CHAR: return o -> ((Slice) o).toStringUtf8(); case ROW: return RowObjectInputDecoder.create(((RowType) type), typeManager); case ARRAY: return ArrayObjectInputDecoder.create(((ArrayType) type), typeManager); case MAP: return MapObjectInputDecoder.create(((MapType) type), typeManager); } throw unsupportedType(type); }
@Test public void testBlockObjectDecoders() { ObjectInputDecoder decoder; decoder = createDecoder(new ArrayType(BIGINT), typeManager); assertTrue(decoder instanceof ObjectInputDecoders.ArrayObjectInputDecoder); assertEquals(((ArrayList) decoder.decode(createLongArrayBlock())).get(0), 2L); decoder = createDecoder(new MapType( BIGINT, BIGINT, methodHandle(TestRowType.class, "throwUnsupportedOperation"), methodHandle(TestRowType.class, "throwUnsupportedOperation")), typeManager); assertTrue(decoder instanceof ObjectInputDecoders.MapObjectInputDecoder); HashMap map = (HashMap) decoder.decode(createLongArrayBlock()); assertEquals(map.get(2L), 1L); }